ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
1f41e247-d429-4883-ac69-1d498174b3ed | cpp | tensorflow/tensorflow | quantized_instance_norm | tensorflow/core/kernels/quantized_instance_norm.cc | tensorflow/core/kernels/quantized_instance_norm_test.cc | #define EIGEN_USE_THREADS
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#define USE_NEON
#include <arm_neon.h>
#endif
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/numeric_op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#ifdef USE_NEON
namespace {
void ColMeanAndVariance(const uint8_t* input, const uint32_t rows,
const uint32_t cols, float* mean, float* variance) {
for (uint32_t col_offset = 0; col_offset < cols; col_offset += 16) {
uint32x4_t sum[4] = {0};
float nA = 0.0f;
float32x4_t xA[4] = {0.0f};
float32x4_t M2A[4] = {0.0f};
const uint8_t* inp_ptr = input + col_offset;
for (uint32_t row = 0; row < rows; row += 256) {
uint32x4_t sub_sum[4] = {0};
uint32x4_t sub_sq_sum[4] = {0};
const uint32_t limit = std::min(rows, row + 256);
const float nB = limit - row;
for (uint32_t subrow = row; subrow < limit; ++subrow) {
const uint8x16_t v = vld1q_u8(inp_ptr);
inp_ptr += cols;
const uint8x8_t v_high = vget_high_u8(v);
const uint8x8_t v_low = vget_low_u8(v);
const uint16x8_t v_high_u16 = vmovl_u8(v_high);
const uint16x8_t v_low_u16 = vmovl_u8(v_low);
const uint16x4_t v_high_high = vget_high_u16(v_high_u16);
const uint16x4_t v_high_low = vget_low_u16(v_high_u16);
const uint16x4_t v_low_high = vget_high_u16(v_low_u16);
const uint16x4_t v_low_low = vget_low_u16(v_low_u16);
sub_sum[0] = vaddw_u16(sub_sum[0], v_high_high);
sub_sum[1] = vaddw_u16(sub_sum[1], v_high_low);
sub_sum[2] = vaddw_u16(sub_sum[2], v_low_high);
sub_sum[3] = vaddw_u16(sub_sum[3], v_low_low);
sub_sq_sum[0] = vmlal_u16(sub_sq_sum[0], v_high_high, v_high_high);
sub_sq_sum[1] = vmlal_u16(sub_sq_sum[1], v_high_low, v_high_low);
sub_sq_sum[2] = vmlal_u16(sub_sq_sum[2], v_low_high, v_low_high);
sub_sq_sum[3] = vmlal_u16(sub_sq_sum[3], v_low_low, v_low_low);
}
for (int i = 0; i < 4; ++i) {
sum[i] = vaddq_u32(sum[i], sub_sum[i]);
const float nX = nA + nB;
const float32x4_t xB =
vmulq_n_f32(vcvtq_f32_u32(sub_sum[i]), 1.0f / nB);
const float32x4_t delta = vsubq_f32(xB, xA[i]);
xA[i] = vmulq_n_f32(
vaddq_f32(vmulq_n_f32(xA[i], nA), vmulq_n_f32(xB, nB)), 1.0f / nX);
const float32x4_t sub_sum_f32 = vcvtq_f32_u32(sub_sum[i]);
const float32x4_t sub_sum_sq = vmulq_f32(sub_sum_f32, sub_sum_f32);
const float32x4_t M2B = vsubq_f32(vcvtq_f32_u32(sub_sq_sum[i]),
vmulq_n_f32(sub_sum_sq, 1.0f / nB));
const float32x4_t last_term =
vmulq_n_f32(vmulq_f32(delta, delta), nA * nB / nX);
M2A[i] = vaddq_f32(vaddq_f32(M2A[i], M2B), last_term);
}
nA += limit;
}
const float inv_rows = 1.0f / static_cast<float>(rows);
vst1q_f32(mean + col_offset, vmulq_n_f32(vcvtq_f32_u32(sum[3]), inv_rows));
vst1q_f32(mean + col_offset + 4,
vmulq_n_f32(vcvtq_f32_u32(sum[2]), inv_rows));
vst1q_f32(mean + col_offset + 8,
vmulq_n_f32(vcvtq_f32_u32(sum[1]), inv_rows));
vst1q_f32(mean + col_offset + 12,
vmulq_n_f32(vcvtq_f32_u32(sum[0]), inv_rows));
vst1q_f32(variance + col_offset, vmulq_n_f32(M2A[3], inv_rows));
vst1q_f32(variance + col_offset + 4, vmulq_n_f32(M2A[2], inv_rows));
vst1q_f32(variance + col_offset + 8, vmulq_n_f32(M2A[1], inv_rows));
vst1q_f32(variance + col_offset + 12, vmulq_n_f32(M2A[0], inv_rows));
}
}
void MinAndMax(const uint8_t* input, const uint32_t rows, const uint32_t cols,
const float* mean_ptr, const float* variance_ptr,
float variance_epsilon, float* minimum, float* maximum) {
float v_maximum = std::numeric_limits<float>::min();
float v_minimum = std::numeric_limits<float>::max();
const float32x4_t eps = vdupq_n_f32(variance_epsilon);
for (uint32_t col_offset = 0; col_offset < cols; col_offset += 16) {
const float32x4_t mean[4] = {vld1q_f32(mean_ptr + col_offset),
vld1q_f32(mean_ptr + col_offset + 4),
vld1q_f32(mean_ptr + col_offset + 8),
vld1q_f32(mean_ptr + col_offset + 12)};
const float32x4_t variance[4] = {vld1q_f32(variance_ptr + col_offset),
vld1q_f32(variance_ptr + col_offset + 4),
vld1q_f32(variance_ptr + col_offset + 8),
vld1q_f32(variance_ptr + col_offset + 12)};
const float32x4_t inv_stddev[4] = {
vrsqrteq_f32(vaddq_f32(variance[0], eps)),
vrsqrteq_f32(vaddq_f32(variance[1], eps)),
vrsqrteq_f32(vaddq_f32(variance[2], eps)),
vrsqrteq_f32(vaddq_f32(variance[3], eps))};
const uint8_t* inp_ptr = input + col_offset;
for (uint32_t row = 0; row < rows; ++row) {
const uint8x16_t v = vld1q_u8(inp_ptr);
inp_ptr += cols;
const uint16x8_t v_high = vmovl_u8(vget_high_u8(v));
const uint16x8_t v_low = vmovl_u8(vget_low_u8(v));
const float32x4_t v_float[4] = {
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_high))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_high))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_low))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_low)))};
for (int i = 0; i < 4; ++i) {
const float32x4_t normed =
vmulq_f32(vsubq_f32(v_float[i], mean[i]), inv_stddev[i]);
const float32x2_t high = vget_high_f32(normed);
const float32x2_t low = vget_low_f32(normed);
float32x2_t tmp_max = vpmax_f32(low, high);
tmp_max = vpmax_f32(tmp_max, tmp_max);
v_maximum = std::max(v_maximum, vget_lane_f32(tmp_max, 0));
float32x2_t tmp_min = vpmin_f32(low, high);
tmp_min = vpmin_f32(tmp_min, tmp_min);
v_minimum = std::min(v_minimum, vget_lane_f32(tmp_min, 0));
}
}
}
*minimum = v_minimum;
*maximum = v_maximum;
}
void InstanceNorm(const uint8_t* input, const uint32_t rows,
const uint32_t cols, const float* mean_ptr,
const float* variance_ptr, float variance_epsilon,
float minimum, float maximum, uint8_t* output) {
const float32x4_t eps = vdupq_n_f32(variance_epsilon);
const float32x4_t out_min = vdupq_n_f32(minimum);
const float out_scale = 255.0f / (maximum - minimum);
for (uint32_t col_offset = 0; col_offset < cols; col_offset += 16) {
const float32x4_t mean[4] = {vld1q_f32(mean_ptr + col_offset + 12),
vld1q_f32(mean_ptr + col_offset + 8),
vld1q_f32(mean_ptr + col_offset + 4),
vld1q_f32(mean_ptr + col_offset)};
const float32x4_t variance[4] = {vld1q_f32(variance_ptr + col_offset + 12),
vld1q_f32(variance_ptr + col_offset + 8),
vld1q_f32(variance_ptr + col_offset + 4),
vld1q_f32(variance_ptr + col_offset)};
const float32x4_t inv_stddev[4] = {
vrsqrteq_f32(vaddq_f32(variance[0], eps)),
vrsqrteq_f32(vaddq_f32(variance[1], eps)),
vrsqrteq_f32(vaddq_f32(variance[2], eps)),
vrsqrteq_f32(vaddq_f32(variance[3], eps))};
const uint8_t* inp_ptr = input + col_offset;
uint8_t* out_ptr = output + col_offset;
for (uint32_t row = 0; row < rows; ++row) {
const uint8x16_t v = vld1q_u8(inp_ptr);
inp_ptr += cols;
const uint16x8_t v_high = vmovl_u8(vget_high_u8(v));
const uint16x8_t v_low = vmovl_u8(vget_low_u8(v));
const float32x4_t v_float[4] = {
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_high))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_high))),
vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_low))),
vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_low)))};
uint16x4_t normed_uint16[4];
for (int i = 0; i < 4; ++i) {
const float32x4_t normed =
vmulq_f32(vsubq_f32(v_float[i], mean[i]), inv_stddev[i]);
const int32x4_t normed_int32 =
vcvtq_s32_f32(vmulq_n_f32(vsubq_f32(normed, out_min), out_scale));
normed_uint16[i] = vqmovun_s32(normed_int32);
}
vst1_u8(out_ptr,
vqmovn_u16(vcombine_u16(normed_uint16[3], normed_uint16[2])));
vst1_u8(out_ptr + 8,
vqmovn_u16(vcombine_u16(normed_uint16[1], normed_uint16[0])));
out_ptr += cols;
}
}
}
}
#endif
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
class QuantizedInstanceNorm : public OpKernel {
public:
explicit QuantizedInstanceNorm(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context,
context->GetAttr("variance_epsilon", &variance_epsilon_));
OP_REQUIRES_OK(context,
context->GetAttr("min_separation", &min_separation_));
OP_REQUIRES_OK(
context, context->GetAttr("output_range_given", &output_range_given_));
if (output_range_given_) {
OP_REQUIRES_OK(context, context->GetAttr("given_y_min", &given_y_min_));
OP_REQUIRES_OK(context, context->GetAttr("given_y_max", &given_y_max_));
OP_REQUIRES(context, given_y_min_ < given_y_max_,
errors::InvalidArgument(
"given_y_min must be less than given_y_max : ",
given_y_min_, " >= ", given_y_max_));
}
}
void Compute(OpKernelContext* context) override {
const Tensor& input = context->input(0);
const Tensor& x_min = context->input(1);
const Tensor& x_max = context->input(2);
OP_REQUIRES(context, TensorShapeUtils::IsScalar(x_min.shape()),
errors::InvalidArgument("`x_min` must be rank 0 but is rank ",
x_min.dims()));
OP_REQUIRES(context, TensorShapeUtils::IsScalar(x_max.shape()),
errors::InvalidArgument("`x_max` must be rank 0 but is rank ",
x_max.dims()));
float input_min = x_min.scalar<float>()();
float input_max = x_max.scalar<float>()();
float input_scale = (input_max - input_min) / 255.0f;
OP_REQUIRES(context, input_min < input_max,
errors::InvalidArgument(
"input_min must be less than input_max : ", input_min,
" >= ", input_max));
auto input_tensor = input.tensor<quint8, 4>();
auto N = input_tensor.dimension(0);
auto H = input_tensor.dimension(1);
auto W = input_tensor.dimension(2);
auto C = input_tensor.dimension(3);
Tensor* output = nullptr;
OP_REQUIRES_OK(context,
context->allocate_output(0, input.shape(), &output));
Tensor* output_min = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min));
Tensor* output_max = nullptr;
OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max));
typedef TTypes<float>::Tensor::Index Index;
const Eigen::IndexList<Eigen::type2index<1>, Eigen::type2index<2>>
reduction_indices;
Eigen::IndexList<Eigen::type2index<1>, Index, Index, Eigen::type2index<1>>
broadcast_spec;
broadcast_spec.set(1, H);
broadcast_spec.set(2, W);
Eigen::IndexList<Index, Eigen::type2index<1>, Eigen::type2index<1>, Index>
expand_spec;
expand_spec.set(0, N);
expand_spec.set(3, C);
Eigen::Tensor<float, 2, Eigen::RowMajor> float_mean(N, C);
Eigen::Tensor<float, 2, Eigen::RowMajor> float_variance(N, C);
#ifdef USE_NEON
if (N == 1 && (C % 16 == 0)) {
VLOG(2) << "Calling optimized";
ColMeanAndVariance(reinterpret_cast<const uint8_t*>(input_tensor.data()),
H * W, C, float_mean.data(), float_variance.data());
float minimum = given_y_min_, maximum = given_y_max_;
if (!output_range_given_) {
MinAndMax(reinterpret_cast<const uint8_t*>(input_tensor.data()), H * W,
C, float_mean.data(), float_variance.data(),
variance_epsilon_, &minimum, &maximum);
}
if (maximum - minimum < min_separation_) {
maximum = minimum + min_separation_;
}
InstanceNorm(reinterpret_cast<const uint8_t*>(input_tensor.data()), H * W,
C, float_mean.data(), float_variance.data(),
variance_epsilon_, minimum, maximum,
reinterpret_cast<uint8_t*>(output->flat<quint8>().data()));
output_min->scalar<float>()() = minimum;
output_max->scalar<float>()() = maximum;
} else
#endif
{
VLOG(2) << "Calling unoptimized";
float_mean = input_tensor.cast<float>().reduce(
reduction_indices, Eigen::internal::MeanReducer<float>());
float_variance =
(input_scale *
((input_tensor.cast<float>() -
float_mean.reshape(expand_spec).broadcast(broadcast_spec))))
.square()
.reduce(reduction_indices, Eigen::internal::MeanReducer<float>());
Eigen::Tensor<float, 4, Eigen::RowMajor> instance_normed =
input_scale *
(input_tensor.cast<float>() -
float_mean.reshape(expand_spec).broadcast(broadcast_spec)) *
(float_variance + variance_epsilon_)
.rsqrt()
.reshape(expand_spec)
.broadcast(broadcast_spec);
Eigen::Tensor<float, 0, Eigen::RowMajor> normed_min;
Eigen::Tensor<float, 0, Eigen::RowMajor> normed_max;
if (!output_range_given_) {
normed_min = instance_normed.minimum();
normed_max = instance_normed.maximum();
} else {
normed_min() = given_y_min_;
normed_max() = given_y_max_;
}
if (normed_max() - normed_min() < min_separation_) {
normed_max() = normed_min() + min_separation_;
}
FloatToQuantizedStruct<quint8> output_f2q(normed_min(), normed_max());
auto instance_normed_quantized =
QUANTIZE_WITH_EIGEN(instance_normed, output_f2q, quint8);
output->tensor<quint8, 4>().device(
context->template eigen_device<CPUDevice>()) =
instance_normed_quantized;
output_min->flat<float>()(0) = normed_min();
output_max->flat<float>()(0) = normed_max();
}
}
private:
float variance_epsilon_;
float min_separation_;
bool output_range_given_;
float given_y_min_;
float given_y_max_;
};
REGISTER_KERNEL_BUILDER(Name("QuantizedInstanceNorm")
.Device(DEVICE_CPU)
.TypeConstraint<quint8>("T"),
QuantizedInstanceNorm);
} | #define EIGEN_USE_THREADS
#include <vector>
#include "tensorflow/cc/client/client_session.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
namespace tensorflow {
namespace ops {
namespace {
void ReferenceImpl(const quint8* inp, float inp_min, float inp_max,
const TensorShape& shape, float var_eps, float* out) {
int N = shape.dim_size(0);
int H = shape.dim_size(1);
int W = shape.dim_size(2);
int C = shape.dim_size(3);
int total = N * H * W * C;
float inp_scale = (inp_max - inp_min) / 255.0f;
std::unique_ptr<float[]> dequantized(new float[total]);
for (int i = 0; i < total; ++i) {
dequantized[i] = inp_min + inp_scale * static_cast<float>(inp[i]);
}
std::unique_ptr<float[]> inp_mean(new float[N * C]);
std::unique_ptr<float[]> inp_var(new float[N * C]);
float img_size = static_cast<float>(H) * static_cast<float>(W);
for (int n = 0; n < N; ++n) {
for (int c = 0; c < C; ++c) {
float sum = 0.0;
for (int i = 0; i < H * W; ++i) {
sum += dequantized[n * H * W * C + i * C + c];
}
inp_mean[n * C + c] = sum / img_size;
}
}
for (int n = 0; n < N; ++n) {
for (int c = 0; c < C; ++c) {
float sum = 0.0;
for (int i = 0; i < H * W; ++i) {
float tmp =
dequantized[n * H * W * C + i * C + c] - inp_mean[n * C + c];
sum += tmp * tmp;
}
inp_var[n * C + c] = sum / img_size;
}
}
for (int n = 0; n < N; ++n) {
for (int c = 0; c < C; ++c) {
for (int i = 0; i < H * W; ++i) {
out[n * H * W * C + i * C + c] =
(dequantized[n * H * W * C + i * C + c] - inp_mean[n * C + c]) /
std::sqrt(inp_var[n * C + c] + var_eps);
}
}
}
}
void Expect(const Tensor& input, float x_min, float x_max,
bool output_range_given, float give_y_min, float given_y_max) {
Scope root = Scope::NewRootScope();
auto input_ph = Placeholder(root, DT_QUINT8);
const float variance_eps = 1e-5;
auto instance_norm = QuantizedInstanceNorm(
root, input_ph, x_min, x_max,
QuantizedInstanceNorm::Attrs().VarianceEpsilon(variance_eps));
Status s = root.status();
EXPECT_TRUE(s.ok());
ClientSession session(root);
std::vector<Tensor> outputs;
s = session.Run({{input_ph, input}},
{instance_norm.y, instance_norm.y_min, instance_norm.y_max},
&outputs);
EXPECT_TRUE(s.ok());
Tensor expected(DT_FLOAT, input.shape());
ReferenceImpl(input.flat<quint8>().data(), x_min, x_max, input.shape(),
variance_eps, expected.flat<float>().data());
auto out = outputs[0].flat<quint8>();
float out_min = outputs[1].flat<float>()(0);
float out_max = outputs[2].flat<float>()(0);
float out_scale = (out_max - out_min) / 255.0f;
Eigen::Tensor<float, 0, Eigen::RowMajor> max_diff =
(expected.flat<float>() - (out_min + out_scale * out.cast<float>()))
.abs()
.maximum();
EXPECT_LE(max_diff(), 0.1);
LOG(INFO) << "max diff " << max_diff();
}
void TestBasic() {
Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32});
auto input = input_tensor.flat<quint8>();
input = input.random(Eigen::internal::UniformRandomGenerator<quint8>());
Expect(input_tensor, 0.0f, 1.0f, false, 0.0f, 0.0f);
}
void TestZeroInput() {
Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32});
auto input = input_tensor.flat<quint8>();
input = input.setConstant(0);
Expect(input_tensor, 2.0f, 3.0f, false, 0.0f, 0.0f);
}
void TestMaxInput() {
Tensor input_tensor(DT_QUINT8, {1, 1, 2, 16});
auto input = input_tensor.flat<quint8>();
input = input.setConstant(255);
Expect(input_tensor, 0.0f,
std::numeric_limits<float>::max() / static_cast<float>(2 * 16), false,
0.0f, 0.0f);
}
void TestOutputRangeGiven() {
Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32});
auto input = input_tensor.flat<quint8>();
input = input.random(Eigen::internal::UniformRandomGenerator<quint8>());
Expect(input_tensor, -10.0f, 10.0f, true, -1.0f, 1.0f);
}
void TestClamp() {
GTEST_SKIP() << "TODO(b/339058131): Fix test failure.";
Tensor input_tensor(DT_QUINT8, {1, 4, 4, 32});
auto input = input_tensor.flat<quint8>();
input = input.random(Eigen::internal::UniformRandomGenerator<quint8>());
Expect(input_tensor, -10.0f, 10.0f, true, 0.0f, 1.0f);
}
}
}
}
#define RUN_TEST(t) \
TEST(QuantizedInstanceNormTest, t) { tensorflow::ops::t(); }
RUN_TEST(TestBasic);
RUN_TEST(TestZeroInput);
RUN_TEST(TestMaxInput);
RUN_TEST(TestOutputRangeGiven);
RUN_TEST(TestClamp);
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_instance_norm.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/quantized_instance_norm_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1f3c6aef-7f7d-42a7-8cfa-9656e271f9f0 | cpp | tensorflow/tensorflow | remote_mgr | tensorflow/core/distributed_runtime/eager/remote_mgr.cc | tensorflow/core/distributed_runtime/eager/remote_mgr_test.cc | #include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/distributed_runtime/eager/remote_tensor_handle.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/util/env_var.h"
namespace tensorflow {
namespace {
Status WithErrorSourcePayload(Status error) {
core::platform::ErrorSourceProto error_source_proto;
error_source_proto.set_error_source(
core::platform::ErrorSourceProto::EAGER_REMOTE_MGR);
error.SetPayload(tensorflow::kErrorSource,
absl::Cord(error_source_proto.SerializeAsString()));
return error;
}
}
namespace eager {
void RemoteMgr::AddOperationOutputs(
const absl::Span<tensorflow::TensorHandle* const> handles,
int64_t operation_id) {
mutex_lock l(remote_tensor_handle_mu_);
for (int i = 0, end = handles.size(); i < end; i++) {
remote_tensor_handle_map_.emplace(
RemoteTensorHandleInternal(operation_id, i), handles[i]);
}
}
void RemoteMgr::AddOperationOutput(tensorflow::TensorHandle* handle,
int64_t operation_id, int32_t output_num) {
mutex_lock l(remote_tensor_handle_mu_);
remote_tensor_handle_map_.emplace(
RemoteTensorHandleInternal(operation_id, output_num), handle);
}
Status RemoteMgr::GetTensorHandleImpl(
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
auto iter = remote_tensor_handle_map_.find(remote_handle);
if (iter == remote_tensor_handle_map_.end()) {
std::string error_message = absl::StrCat(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num,
". One possible cause is that the tensor was accessed after "
"deallocation in a distributed worker setup.");
bool result;
TF_CHECK_OK(ReadBoolFromEnvVar("TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE",
true, &result));
if (result) {
std::string error_message_ext;
absl::StrAppend(
&error_message_ext, error_message,
"Try setting "
"`os.environ['TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE']='False'` in "
"your client to disable async streaming behavior to see if it fixes "
"the problem.");
return WithErrorSourcePayload(
absl::InvalidArgumentError(error_message_ext));
}
return WithErrorSourcePayload(absl::InvalidArgumentError(error_message));
}
*handle = iter->second;
return absl::OkStatus();
}
Status RemoteMgr::GetTensorHandle(
const RemoteTensorHandleInternal& remote_handle,
tensorflow::TensorHandle** handle) {
tf_shared_lock l(remote_tensor_handle_mu_);
return GetTensorHandleImpl(remote_handle, handle);
}
Status RemoteMgr::GetMirroredResourceShape(
const RemoteTensorHandleInternal& remote_handle,
std::vector<DtypeAndPartialTensorShape>* handle) {
tf_shared_lock l(mirrored_resource_shape_mu_);
auto iter = mirrored_resource_shape_map_.find(remote_handle);
if (iter == mirrored_resource_shape_map_.end()) {
return WithErrorSourcePayload(errors::InvalidArgument(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num,
". One possible cause is that the tensor was accessed after "
"deallocation in a distributed worker setup. Try setting "
"`os.environ['TF_ENABLE_EAGER_CLIENT_STREAMING_ENQUEUE']='False'` in "
"your client to disable async streaming behavior to see if it fixes "
"the problem."));
}
*handle = iter->second;
return absl::OkStatus();
}
Status RemoteMgr::GetRemoteTensorHandle(const tensorflow::TensorHandle* handle,
const bool wait_until_ready,
int64_t* op_id, int32* output_num) {
TF_RETURN_IF_ERROR(handle->RemoteAddress(handle->device(), wait_until_ready,
op_id, output_num));
tensorflow::TensorHandle* h;
TF_RETURN_IF_ERROR(
GetTensorHandleImpl(RemoteTensorHandleInternal(*op_id, *output_num), &h));
if (handle != h) {
return WithErrorSourcePayload(errors::Internal(
"Found two different tensor handles with the same op_id:", *op_id,
" and output_num:", *output_num));
}
return absl::OkStatus();
}
Status RemoteMgr::DeleteTensorHandle(
const RemoteTensorHandleInternal& remote_handle) {
{
mutex_lock l(remote_tensor_handle_mu_);
auto iter = remote_tensor_handle_map_.find(remote_handle);
if (iter != remote_tensor_handle_map_.end()) {
iter->second->Unref();
remote_tensor_handle_map_.erase(iter);
return absl::OkStatus();
}
}
{
mutex_lock l(mirrored_resource_shape_mu_);
auto iter = mirrored_resource_shape_map_.find(remote_handle);
if (iter != mirrored_resource_shape_map_.end()) {
mirrored_resource_shape_map_.erase(iter);
return absl::OkStatus();
}
}
return WithErrorSourcePayload(errors::InvalidArgument(
"Unable to find the relevant tensor remote_handle: Op ID: ",
remote_handle.op_id, ", Output num: ", remote_handle.output_num));
}
Status RemoteMgr::SerializeRemoteTensorHandle(
TensorHandle* in, const bool wait_until_ready, RemoteTensorHandle* out,
Device* device, absl::string_view device_name,
const bool serialize_resource_dtype_and_shape) {
int64_t op_id;
int32_t output_num;
auto status =
in->RemoteAddress(device, wait_until_ready, &op_id, &output_num);
if (!status.ok()) {
LOG(ERROR)
<< "Failed to get remote address for tensor handle with given device "
<< device->name() << " error " << status.message();
tf_shared_lock l(remote_tensor_handle_mu_);
TF_RETURN_IF_ERROR(
GetRemoteTensorHandle(in, wait_until_ready, &op_id, &output_num));
}
out->Clear();
out->set_op_id(op_id);
out->set_output_num(output_num);
out->set_op_device(in->op_device() ? in->op_device()->name() : "");
out->set_device(device_name.empty()
? std::string(in->DeviceOrHostCPU(*parent_)->name())
: std::string(device_name));
out->set_dtype(in->dtype);
if (serialize_resource_dtype_and_shape) {
std::vector<DtypeAndPartialTensorShape> resource_dtypes_and_shapes;
TF_RETURN_IF_ERROR(
in->GetResourceHandleDtypesAndShapes(&resource_dtypes_and_shapes));
for (const auto& dtype_and_shape : resource_dtypes_and_shapes) {
ResourceDtypeAndShape* dtype_and_shape_proto =
out->add_resource_dtypes_and_shapes();
dtype_and_shape_proto->set_dtype(dtype_and_shape.dtype);
dtype_and_shape.shape.AsProto(dtype_and_shape_proto->mutable_shape());
}
}
return absl::OkStatus();
}
Status RemoteMgr::DeserializeRemoteTensorHandle(const RemoteTensorHandle& in,
TensorHandle** out) {
Device* device;
if (parent_->local_device_mgr()->LookupDevice(in.op_device(), &device).ok() ||
parent_->local_device_mgr()->LookupDevice(in.device(), &device).ok()) {
TF_RETURN_IF_ERROR(GetTensorHandle(RemoteTensorHandleInternal(in), out));
(*out)->Ref();
} else {
const string& device_name =
in.op_device().empty() ? in.device() : in.op_device();
TF_RETURN_IF_ERROR(
parent_->FindDeviceFromName(device_name.c_str(), &device));
*out = TensorHandle::CreateLazyRemoteHandle(in.op_id(), in.output_num(),
in.dtype(), device,
true, parent_);
std::vector<DtypeAndPartialTensorShape> dtypes_and_shapes;
if (!GetMirroredResourceShape(RemoteTensorHandleInternal(in),
&dtypes_and_shapes)
.ok()) {
for (const auto& dtype_and_shape_proto :
in.resource_dtypes_and_shapes()) {
dtypes_and_shapes.push_back(DtypeAndPartialTensorShape{
dtype_and_shape_proto.dtype(),
TensorShape(dtype_and_shape_proto.shape())});
}
mutex_lock l(mirrored_resource_shape_mu_);
mirrored_resource_shape_map_.emplace(
RemoteTensorHandleInternal(in.op_id(), in.output_num()),
dtypes_and_shapes);
}
(*out)->SetResourceHandleDtypeAndShape(std::move(dtypes_and_shapes));
}
return absl::OkStatus();
}
EagerExecutor& RemoteMgr::GetOrCreateExecutorForStream(uint64 stream_id) {
mutex_lock l(executor_map_mu_);
auto it = executor_map_.find(stream_id);
if (it == executor_map_.end()) {
auto it_and_bool = executor_map_.emplace(
std::piecewise_construct, std::forward_as_tuple(stream_id),
std::forward_as_tuple(true));
DCHECK(it_and_bool.second);
it = it_and_bool.first;
}
return it->second;
}
void RemoteMgr::DeleteExecutorForStream(uint64 stream_id) {
mutex_lock l(executor_map_mu_);
auto it = executor_map_.find(stream_id);
if (it == executor_map_.end()) {
return;
}
Status s = it->second.ShutDown();
if (!s.ok()) {
LOG(ERROR) << "EagerExecutor shutdown with error " << s.message();
}
executor_map_.erase(it);
}
}
} | #include "tensorflow/core/distributed_runtime/eager/remote_mgr.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/eager/tensor_handle.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/error_payloads.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/remote_tensor_handle.pb.h"
namespace tensorflow {
namespace eager {
namespace {
class TestRemoteMgr : public RemoteMgr {
public:
TestRemoteMgr(bool is_master, EagerContext* ctx)
: RemoteMgr(is_master, ctx) {}
uint64 OpId() {
tf_shared_lock l(next_id_mutex_);
return next_op_id_;
}
};
class RemoteMgrTest : public ::testing::Test {
public:
RemoteMgrTest() {
std::vector<std::unique_ptr<Device>> devices;
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:localhost/replica:0/task:0"));
local_device_ = devices.back().get();
devices.push_back(
DeviceFactory::NewDevice("CPU", {}, "/job:worker/replica:0/task:0"));
remote_device_ = devices.back().get();
auto device_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
auto rendezvous = tsl::core::RefCountPtr<tensorflow::Rendezvous>(
new tensorflow::IntraProcessRendezvous(device_mgr.get()));
ctx_ = new tensorflow::EagerContext(
SessionOptions(),
tensorflow::ContextDevicePlacementPolicy::DEVICE_PLACEMENT_SILENT,
false, device_mgr.release(), true, std::move(rendezvous),
nullptr, nullptr, true);
}
~RemoteMgrTest() override { ctx_->Unref(); }
Device* local_device_;
Device* remote_device_;
EagerContext* ctx_;
};
TEST_F(RemoteMgrTest, SerializeLocalTensorHandleWithRemoteMirror) {
RemoteMgr remote_mgr(false, ctx_);
const TensorShape shape({0});
Tensor t(DT_FLOAT, shape);
TensorHandle* handle = TensorHandle::CreateLocalHandle(
std::move(t), local_device_, local_device_, ctx_);
const uint64 op_id = 2;
const int output_num = 3;
TF_ASSERT_OK(handle->AddUnshapedRemoteMirror(remote_device_, op_id,
output_num, "", ctx_));
TF_ASSERT_OK(
handle->SetRemoteShape(shape, remote_device_, ctx_->GetContextViewId()));
RemoteTensorHandle remote_handle;
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_,
remote_device_->name()));
EXPECT_EQ(op_id, remote_handle.op_id());
EXPECT_EQ(output_num, remote_handle.output_num());
EXPECT_EQ(remote_device_->name(), remote_handle.device());
handle->Unref();
}
TEST_F(RemoteMgrTest, SerializeRemoteTensorHandle) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateLazyRemoteHandle(
op_id, output_num, DT_FLOAT, remote_device_, true, ctx_);
RemoteTensorHandle remote_handle;
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_));
EXPECT_EQ(op_id, remote_handle.op_id());
EXPECT_EQ(output_num, remote_handle.output_num());
EXPECT_EQ(remote_device_->name(), remote_handle.device());
handle->Unref();
}
TEST_F(RemoteMgrTest, InvalidateRemoteMirrorWithClusterUpdate) {
RemoteMgr remote_mgr(false, ctx_);
Tensor t(DT_FLOAT, TensorShape({0}));
TensorHandle* handle = TensorHandle::CreateLocalHandle(
std::move(t), local_device_, local_device_, ctx_);
const uint64 op_id = 2;
const int output_num = 3;
TF_ASSERT_OK(handle->AddUnshapedRemoteMirror(remote_device_, op_id,
output_num, "", ctx_));
EXPECT_TRUE(
handle->HasRemoteMirror(remote_device_, ctx_->GetContextViewId()));
ctx_->IncrementContextViewId();
EXPECT_FALSE(
handle->HasRemoteMirror(remote_device_, ctx_->GetContextViewId()));
EXPECT_FALSE(handle
->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId())
.ok());
handle->Unref();
}
TEST_F(RemoteMgrTest, SetRemoteShapeWithClusterUpdate) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateUnshapedRemoteHandle(
op_id, output_num,
"", DT_FLOAT, remote_device_, ctx_);
TF_ASSERT_OK(handle->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId()));
handle->Unref();
handle = TensorHandle::CreateUnshapedRemoteHandle(
op_id, output_num,
"", DT_FLOAT, remote_device_, ctx_);
ctx_->IncrementContextViewId();
TF_ASSERT_OK(handle->SetRemoteShape(TensorShape({0}), remote_device_,
ctx_->GetContextViewId()));
handle->Unref();
}
TEST_F(RemoteMgrTest, ErrorSourcesShouldExist) {
RemoteMgr remote_mgr(false, ctx_);
const uint64 op_id = 3;
const int output_num = 1;
TensorHandle* handle = TensorHandle::CreateLazyRemoteHandle(
op_id, output_num, DT_FLOAT, remote_device_, true, ctx_);
RemoteTensorHandle remote_handle;
remote_mgr.AddOperationOutput(handle, op_id, output_num);
TF_ASSERT_OK(remote_mgr.SerializeRemoteTensorHandle(
handle, true, &remote_handle, remote_device_));
auto remote_handle_internal = RemoteTensorHandleInternal(remote_handle);
TF_ASSERT_OK(remote_mgr.DeleteTensorHandle(remote_handle_internal));
Status s = remote_mgr.DeleteTensorHandle(remote_handle_internal);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
TensorHandle* out;
s = remote_mgr.GetTensorHandle(remote_handle_internal, &out);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
s = remote_mgr.DeserializeRemoteTensorHandle(remote_handle, &out);
EXPECT_FALSE(s.ok());
EXPECT_TRUE(s.GetPayload(kErrorSource).has_value());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/remote_mgr.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/eager/remote_mgr_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
548583df-a9ce-42fe-a3f2-1f92da313ff7 | cpp | tensorflow/tensorflow | conditional_simplifier | third_party/xla/xla/service/conditional_simplifier.cc | third_party/xla/xla/service/conditional_simplifier_test.cc | #include "xla/service/conditional_simplifier.h"
#include <iterator>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/call_graph.h"
#include "xla/service/call_inliner.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace {
bool ComputationIsEmptyWithArrayRoot(const HloComputation* computation) {
bool empty_operations = absl::c_all_of(
computation->MakeInstructionPostOrder(),
HloPredicateIsOp<HloOpcode::kTuple, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>);
bool contains_array = false;
ShapeUtil::ForEachSubshape(computation->root_instruction()->shape(),
[&](const Shape& shape, const ShapeIndex& index) {
if (shape.IsArray()) {
contains_array = true;
}
});
return empty_operations && contains_array;
}
absl::StatusOr<bool> TryRemoveUnusedConditionalOperands(
HloComputation* computation,
const absl::flat_hash_set<HloInstruction*>& calling_conditionals) {
HloInstruction* param = computation->parameter_instruction(0);
if (param == computation->root_instruction()) {
return false;
}
if (!param->shape().IsTuple()) {
return false;
}
std::set<int64_t> tuple_indices_to_keep;
for (HloInstruction* user : param->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
return false;
}
tuple_indices_to_keep.insert(user->tuple_index());
}
int64_t old_tuple_element_count =
ShapeUtil::TupleElementCount(param->shape());
if (tuple_indices_to_keep.size() == old_tuple_element_count) {
return false;
}
std::vector<const Shape*> new_tuple_shapes;
new_tuple_shapes.reserve(tuple_indices_to_keep.size());
std::vector<int64_t> map(old_tuple_element_count, -1);
for (int64_t i : tuple_indices_to_keep) {
map[i] = new_tuple_shapes.size();
new_tuple_shapes.push_back(¶m->shape().tuple_shapes(i));
}
Shape tuple_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_tuple_shapes);
HloComputation* new_computation =
computation->parent()->AddEmbeddedComputation(computation->Clone());
param = new_computation->parameter_instruction(0);
*param->mutable_shape() = tuple_shape;
for (HloInstruction* user : param->users()) {
user->set_tuple_index(map[user->tuple_index()]);
}
for (HloInstruction* conditional : calling_conditionals) {
if (conditional->has_sharding()) {
continue;
}
for (int64_t branch = 0; branch < conditional->branch_count(); ++branch) {
if (conditional->branch_computation(branch) != computation) {
continue;
}
conditional->set_branch_computation(branch, new_computation);
const Shape& old_shape = conditional->operand(branch + 1)->shape();
std::vector<HloInstruction*> new_tuple_operands;
new_tuple_operands.reserve(tuple_indices_to_keep.size());
for (int64_t i : tuple_indices_to_keep) {
new_tuple_operands.push_back(conditional->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
old_shape.tuple_shapes(i),
conditional->mutable_operand(branch + 1), i)));
}
HloInstruction* new_tuple = conditional->parent()->AddInstruction(
HloInstruction::CreateTuple(new_tuple_operands));
TF_RETURN_IF_ERROR(
conditional->ReplaceOperandWithDifferentShape(branch + 1, new_tuple));
CHECK(ShapeUtil::Compatible(conditional->operand(branch + 1)->shape(),
conditional->branch_computation(branch)
->parameter_instruction(0)
->shape()));
CHECK(ShapeUtil::Compatible(
conditional->shape(),
conditional->branch_computation(branch)->root_instruction()->shape()))
<< conditional->branch_computation(branch)->ToString();
}
}
return true;
}
bool ReplaceRootWithEmptyTupleIfNoUsers(HloInstruction* conditional_op) {
const Shape empty_tuple = ShapeUtil::MakeTupleShape({});
if (conditional_op->user_count() == 0 &&
conditional_op != conditional_op->parent()->root_instruction() &&
!ShapeUtil::Compatible(empty_tuple, conditional_op->shape())) {
for (int64_t branch_id = 0; branch_id < conditional_op->branch_count();
++branch_id) {
auto branch_computation =
conditional_op->GetModule()->AddEmbeddedComputation(
conditional_op->branch_computation(branch_id)->Clone());
conditional_op->set_branch_computation(branch_id, branch_computation);
auto new_empty_root =
branch_computation->AddInstruction(HloInstruction::CreateTuple({}));
branch_computation->set_root_instruction(new_empty_root,
true);
}
*conditional_op->mutable_shape() = empty_tuple;
return true;
}
return false;
}
bool RemoveUnusedTupleElements(HloInstruction* conditional_op) {
if (conditional_op->user_count() == 0 ||
conditional_op == conditional_op->parent()->root_instruction() ||
!conditional_op->shape().IsTuple()) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to non-tuple result:\n"
<< conditional_op->ToShortString();
return false;
}
const int old_tuple_shapes_size = conditional_op->shape().tuple_shapes_size();
std::vector<bool> used_indices(old_tuple_shapes_size, false);
for (const HloInstruction* user : conditional_op->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to non-GTE user:\n"
<< user->ToShortString();
return false;
}
used_indices[user->tuple_index()] = true;
}
const int new_tuple_shapes_size =
std::count(used_indices.begin(), used_indices.end(), true);
if (new_tuple_shapes_size == old_tuple_shapes_size) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to every index is in use.";
return false;
}
absl::flat_hash_map<int, int> new_to_old_mapping, old_to_new_mapping;
auto old_iter = used_indices.begin();
for (int new_index = 0; new_index < new_tuple_shapes_size; ++new_index) {
old_iter = std::find(old_iter, used_indices.end(), true);
const int old_index = std::distance(used_indices.begin(), old_iter);
new_to_old_mapping[new_index] = old_index;
old_to_new_mapping[old_index] = new_index;
++old_iter;
}
const Shape old_shape = conditional_op->shape();
std::vector<const Shape*> new_tuple_shapes;
new_tuple_shapes.reserve(new_tuple_shapes_size);
for (int new_index = 0; new_index < new_tuple_shapes_size; ++new_index) {
new_tuple_shapes.push_back(
&old_shape.tuple_shapes(new_to_old_mapping[new_index]));
}
const Shape new_shape = ShapeUtil::MakeTupleShapeWithPtrs(new_tuple_shapes);
for (HloComputation* branch : conditional_op->branch_computations()) {
const HloInstruction* root = branch->root_instruction();
if (!root->shape().IsTuple() ||
!ShapeUtil::Compatible(branch->root_instruction()->shape(),
old_shape)) {
VLOG(3) << "Skip RemoveUnusedTupleElements due to some branch "
<< branch->name() << " has in-compatible root shape, expect "
<< old_shape.ToString() << ", but got "
<< root->shape().ToString() << "\n"
<< conditional_op->ToString();
return false;
}
}
for (int branch_id = 0; branch_id < conditional_op->branch_count();
++branch_id) {
HloComputation* old_branch = conditional_op->branch_computation(branch_id);
HloComputation* cloned_branch =
conditional_op->GetModule()->AddEmbeddedComputation(
old_branch->Clone());
conditional_op->set_branch_computation(branch_id, cloned_branch);
HloInstruction* old_root = cloned_branch->root_instruction();
std::vector<HloInstruction*> new_tuple_root_operands;
for (int old_index = 0; old_index < old_tuple_shapes_size; ++old_index) {
if (used_indices[old_index]) {
new_tuple_root_operands.push_back(
cloned_branch->AddInstruction(HloInstruction::CreateGetTupleElement(
old_shape.tuple_shapes(old_index), old_root, old_index)));
}
}
HloInstruction* new_tuple_root = cloned_branch->AddInstruction(
HloInstruction::CreateTuple(new_tuple_root_operands));
cloned_branch->set_root_instruction(new_tuple_root,
true);
}
*conditional_op->mutable_shape() = new_shape;
for (HloInstruction* user : conditional_op->users()) {
const int old_index = user->tuple_index();
const int new_index = old_to_new_mapping[old_index];
user->set_tuple_index(new_index);
}
return true;
}
bool MergeDuplicateTupleElements(HloInstruction* conditional) {
if (conditional->user_count() == 0 ||
conditional == conditional->parent()->root_instruction() ||
!conditional->shape().IsTuple()) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not tuple shape nor root "
"instruction:\n"
<< conditional->ToShortString();
return false;
}
for (const HloInstruction* user : conditional->users()) {
if (user->opcode() != HloOpcode::kGetTupleElement) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not all users are "
"kGetTupleElement:\n"
<< conditional->ToShortString();
return false;
}
}
for (const HloComputation* branch : conditional->branch_computations()) {
if (branch->root_instruction()->opcode() != HloOpcode::kTuple) {
VLOG(3) << "Skip MergeDuplicateTupleElements due not all branch roots "
"are kTuple:\n"
<< conditional->ToShortString();
return false;
}
}
auto vectorize_branches_root_tuple_ith_operand = [conditional](int64_t i) {
std::vector<const HloInstruction*> operands;
absl::c_transform(conditional->branch_computations(),
std::back_inserter(operands),
[i](const HloComputation* branch) {
return branch->root_instruction()->operand(i);
});
return operands;
};
auto replace_root_user_gte_jth_with_gte_ith = [conditional](int64_t i,
int64_t j) {
bool changed = false;
for (HloInstruction* user : conditional->users()) {
if (user->tuple_index() == j) {
user->set_tuple_index(i);
changed |= true;
}
}
return changed;
};
bool changed = false;
absl::flat_hash_map<std::vector<const HloInstruction*>, int64_t>
index_collision_table;
for (int i = 0; i < conditional->shape().tuple_shapes_size(); ++i) {
const std::vector<const HloInstruction*> ith_operands_vector =
vectorize_branches_root_tuple_ith_operand(i);
const auto emplace_res =
index_collision_table.emplace(ith_operands_vector, i);
if (!emplace_res.second) {
changed |=
replace_root_user_gte_jth_with_gte_ith(emplace_res.first->second, i);
}
}
return changed;
}
}
absl::StatusOr<bool> ConditionalSimplifier::TryRemoveConditional(
HloInstruction* conditional) {
CHECK_EQ(conditional->opcode(), HloOpcode::kConditional);
if (!conditional->parent()->IsSafelyRemovable(conditional) ||
conditional->HasSideEffect()) {
VLOG(2) << "Not attempting to remove conditional as it is not removable or "
"has side effect: "
<< conditional->ToShortString();
return false;
}
auto computation = conditional->parent();
auto create_call = [&](int64_t branch) {
auto call = computation->AddInstruction(HloInstruction::CreateCall(
conditional->shape(), {conditional->mutable_operand(1 + branch)},
conditional->branch_computation(branch)));
conditional->SetupDerivedInstruction(call);
return call;
};
if (conditional->branch_count() == 1) {
HloInstruction* call_op = create_call(0);
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, call_op));
TF_RETURN_IF_ERROR(CallInliner::Inline(call_op).status());
return true;
}
if (conditional->operand(0)->opcode() == HloOpcode::kConstant) {
int branch_index = 0;
if (conditional->operand(0)->shape().element_type() == PRED) {
branch_index = conditional->operand(0)->literal().Get<bool>({}) ? 0 : 1;
} else {
branch_index = conditional->operand(0)->literal().Get<int32_t>({});
if (branch_index < 0 || branch_index >= conditional->branch_count()) {
branch_index = conditional->branch_count() - 1;
}
}
HloInstruction* call_op = create_call(branch_index);
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(conditional, call_op));
TF_RETURN_IF_ERROR(CallInliner::Inline(call_op).status());
return true;
}
auto instruction_is_expensive = [](const HloInstruction* hlo) {
switch (hlo->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kGetTupleElement:
case HloOpcode::kReduce:
case HloOpcode::kReshape:
case HloOpcode::kPad:
case HloOpcode::kParameter:
case HloOpcode::kSlice:
case HloOpcode::kTuple:
return false;
default:
return !hlo->IsElementwise();
}
};
if (conditional->branch_count() != 2 ||
conditional->operand(0)->shape().element_type() != PRED ||
absl::c_any_of(conditional->branch_computation(0)->instructions(),
instruction_is_expensive) ||
absl::c_any_of(conditional->branch_computation(1)->instructions(),
instruction_is_expensive)) {
VLOG(2)
<< "Not attempting to remove conditional as its branch_index is not a "
"compile-time constant or contains expensive instructions: "
<< conditional->ToShortString();
return false;
}
bool branch_empty =
ComputationIsEmptyWithArrayRoot(conditional->branch_computation(0)) ||
ComputationIsEmptyWithArrayRoot(conditional->branch_computation(1));
if (branch_empty) {
return false;
}
HloInstruction* true_call_op = create_call(0);
HloInstruction* false_call_op = create_call(1);
auto condition_broadcast = [&](const Shape& shape) {
if (ShapeUtil::IsScalar(shape)) {
return conditional->mutable_operand(0);
}
Shape new_shape = ShapeUtil::ChangeElementType(shape, PRED);
UpdateLayout(&new_shape);
return computation->AddInstruction(HloInstruction::CreateBroadcast(
new_shape, conditional->mutable_operand(0), {}));
};
auto gte = [&](HloInstruction* hlo, int64_t i) {
return computation->AddInstruction(HloInstruction::CreateGetTupleElement(
hlo->shape().tuple_shapes(i), hlo, i));
};
std::function<HloInstruction*(HloInstruction*, HloInstruction*)> select =
[&](HloInstruction* t, HloInstruction* f) {
if (f->shape().IsToken()) {
return computation->AddInstruction(
HloInstruction::CreateAfterAll({t, f}));
}
if (f->shape().IsArray()) {
return computation->AddInstruction(HloInstruction::CreateTernary(
f->shape(), HloOpcode::kSelect, condition_broadcast(f->shape()),
t, f));
}
std::vector<HloInstruction*> selects;
const int64_t tuple_element_count =
ShapeUtil::TupleElementCount(f->shape());
selects.reserve(tuple_element_count);
for (int64_t i = 0; i < tuple_element_count; ++i) {
selects.push_back(select(gte(t, i), gte(f, i)));
}
return computation->AddInstruction(
HloInstruction::CreateTuple(selects));
};
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(
conditional, select(true_call_op, false_call_op)));
TF_RETURN_IF_ERROR(CallInliner::Inline(false_call_op).status());
TF_RETURN_IF_ERROR(CallInliner::Inline(true_call_op).status());
return true;
}
static bool ComputationCallsChannelInstructions(
const HloComputation& computation) {
std::vector<const HloComputation*> worklist = {&computation};
while (!worklist.empty()) {
const HloComputation* work = worklist.back();
worklist.pop_back();
for (const HloInstruction* instruction : work->instructions()) {
if (DynCast<HloChannelInstruction>(instruction) != nullptr) {
return true;
}
worklist.insert(worklist.end(),
instruction->called_computations().begin(),
instruction->called_computations().end());
}
}
return false;
}
static bool InstructionCallsChannelInstructions(
const HloInstruction& instruction) {
for (const HloComputation* called_computation :
instruction.called_computations()) {
if (ComputationCallsChannelInstructions(*called_computation)) {
return true;
}
}
return false;
}
absl::StatusOr<bool> ConditionalSimplifier::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
3, "ConditionalSimplifier::Run(), before:\n" + module->ToString());
bool changed = false;
std::vector<HloInstruction*> conditional_ops;
for (auto* comp : module->computations(execution_threads)) {
for (auto* instr : comp->MakeInstructionPostOrder()) {
if (instr->opcode() == HloOpcode::kConditional) {
if (InstructionCallsChannelInstructions(*instr)) {
continue;
}
if (instr->has_sharding()) {
continue;
}
conditional_ops.push_back(instr);
}
}
}
absl::flat_hash_set<HloInstruction*> removed_conditionals;
for (HloInstruction* conditional_op : conditional_ops) {
changed |= MergeDuplicateTupleElements(conditional_op);
changed |= RemoveUnusedTupleElements(conditional_op);
changed |= ReplaceRootWithEmptyTupleIfNoUsers(conditional_op);
TF_ASSIGN_OR_RETURN(bool result, TryRemoveConditional(conditional_op));
if (result) {
removed_conditionals.insert(conditional_op);
changed = true;
}
}
absl::flat_hash_map<HloComputation*, absl::flat_hash_set<HloInstruction*>>
calling_conditionals;
std::vector<HloComputation*> calling_computationals_vector;
for (HloInstruction* conditional : conditional_ops) {
if (removed_conditionals.contains(conditional)) {
continue;
}
for (int64_t branch = 0; branch < conditional->branch_count(); ++branch) {
auto* branch_comp = conditional->branch_computation(branch);
if (!calling_conditionals.contains(branch_comp)) {
calling_computationals_vector.push_back(branch_comp);
}
calling_conditionals[branch_comp].insert(conditional);
}
}
for (auto* comp : calling_computationals_vector) {
auto entry = calling_conditionals.find(comp);
CHECK(entry != calling_conditionals.end());
TF_ASSIGN_OR_RETURN(bool result, TryRemoveUnusedConditionalOperands(
entry->first, entry->second));
changed |= result;
}
XLA_VLOG_LINES(3,
"ConditionalSimplifier::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/conditional_simplifier.h"
#include <string>
#include <utility>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal_util.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
namespace op = xla::testing::opcode_matchers;
class ConditionalSimplifierTest : public HloTestBase {
public:
HloComputation* MakeConditional(HloModule* module, bool is_constant = true);
};
HloComputation* ConditionalSimplifierTest::MakeConditional(HloModule* module,
bool is_constant) {
HloComputation::Builder builder(TestName());
HloComputation* true_computation;
{
HloComputation::Builder true_computation_builder(TestName() +
".true_computation");
auto param =
true_computation_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "param"));
auto one = true_computation_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
true_computation_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, one));
true_computation =
module->AddEmbeddedComputation(true_computation_builder.Build());
}
HloComputation* false_computation;
{
HloComputation::Builder false_computation_builder(TestName() +
".false_computation");
auto param = false_computation_builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(S32, {}),
"param"));
auto forty_two = false_computation_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(42)));
false_computation_builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(S32, {}), HloOpcode::kAdd, param, forty_two));
false_computation =
module->AddEmbeddedComputation(false_computation_builder.Build());
}
auto false_instrn = builder.AddInstruction(
is_constant
? HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false))
: HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(PRED, {}),
"cond"));
auto false_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "false_param"));
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
builder.AddInstruction(HloInstruction::CreateConditional(
ShapeUtil::MakeShape(S32, {}), false_instrn, one, true_computation,
false_param, false_computation));
return module->AddEntryComputation(builder.Build());
}
TEST_F(ConditionalSimplifierTest, ConditionalGetsInlined) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
ASSERT_TRUE(ConditionalSimplifier().Run(m.get()).value());
EXPECT_THAT(computation->root_instruction(),
op::Add(op::Parameter(), op::Constant()));
}
TEST_F(ConditionalSimplifierTest, BranchGetsInlined) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get(), false);
ASSERT_TRUE(ConditionalSimplifier().Run(m.get()).value());
EXPECT_THAT(
computation->root_instruction(),
op::Select(op::Parameter(1), op::Add(op::Constant(), op::Constant()),
op::Add(op::Parameter(0), op::Constant())));
}
TEST_F(ConditionalSimplifierTest, ConditionalWithControlDependency) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* true_op = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
TF_ASSERT_OK(
true_op->AddControlDependencyTo(computation->root_instruction()));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsSend) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* true_computation = conditional->true_computation();
auto* token = true_computation->AddInstruction(HloInstruction::CreateToken());
auto* send = true_computation->AddInstruction(HloInstruction::CreateSend(
true_computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true))),
token, 0));
true_computation->AddInstruction(HloInstruction::CreateSendDone(send));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsRecv) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* true_computation = conditional->true_computation();
auto* token = true_computation->AddInstruction(HloInstruction::CreateToken());
auto* recv = true_computation->AddInstruction(HloInstruction::CreateRecv(
ShapeUtil::MakeShape(F32, {1}), token, 0));
true_computation->AddInstruction(HloInstruction::CreateRecvDone(recv));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, NotRemovedIfContainsNonRemovableInstruction) {
auto m = CreateNewVerifiedModule();
HloComputation* computation = MakeConditional(m.get());
auto* conditional = computation->root_instruction();
ASSERT_EQ(conditional->opcode(), HloOpcode::kConditional);
auto* false_computation = conditional->false_computation();
auto token = false_computation->AddInstruction(HloInstruction::CreateToken());
false_computation->AddInstruction(HloInstruction::CreateInfeed(
ShapeUtil::MakeShape(F32, {1}), token, "config"));
EXPECT_FALSE(ConditionalSimplifier().Run(m.get()).value());
}
TEST_F(ConditionalSimplifierTest, TrivalOperandsRemoved) {
absl::string_view hlo_string =
R"(
HloModule UnusedTupleOperands
on_false {
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
rhs = f32[40,40] get-tuple-element(t), index=1
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT result = (f32[20,40]) tuple(dot)
}
on_true {
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=2
rhs = f32[40,40] get-tuple-element(t), index=3
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT result = (f32[20,40]) tuple(dot)
}
ENTRY main {
c0_0 = f32[20,40] parameter(0)
c0_1 = f32[40,40] parameter(1)
c1_0 = f32[20,40] parameter(2)
c1_1 = f32[40,40] parameter(3)
p = pred[] parameter(4)
t = (f32[20,40], f32[40,40], f32[20,40], f32[40,40]) tuple(c0_0, c0_1, c1_0, c1_1)
call = (f32[20,40]) call(t), to_apply=on_true
ROOT result = (f32[20,40]) conditional(p,t,t), false_computation=on_false, true_computation=on_true
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
std::unique_ptr<HloModule> module = std::move(status).value();
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
TF_ASSERT_OK(v.Run(module.get()).status());
HloInstruction* conditional = module->entry_computation()->root_instruction();
EXPECT_TRUE(conditional != nullptr);
EXPECT_EQ(conditional->operand(1)->shape().tuple_shapes().size(), 2);
EXPECT_EQ(conditional->operand(2)->shape().tuple_shapes().size(), 2);
HloInstruction* call = FindInstruction(module.get(), "call");
EXPECT_EQ(
call->to_apply()->parameter_instruction(0)->shape().tuple_shapes().size(),
4);
}
TEST_F(ConditionalSimplifierTest,
TwoConditionalsCreatedInReversedLexicalOrder) {
absl::string_view hlo_string = R"(
HloModule DeadConditional
computation.1 {
param.1 = s64[] parameter(0)
constant.1 = s64[] constant(1)
ROOT add.1 = s64[] add(param.1, constant.1)
}
computation.2 {
param.2 = s64[] parameter(0)
constant.2 = s64[] constant(2)
ROOT add.2 = s64[] add(param.2, constant.2)
}
computation.3 {
param.3 = s64[] parameter(0)
constant.3 = s64[] constant(3)
ROOT add.3 = s64[] add(param.3, constant.3)
}
computation.4 {
param.4 = s64[] parameter(0)
constant.4 = s64[] constant(4)
ROOT add.4 = s64[] add(param.4, constant.4)
}
ENTRY KernelEntry {
param.1 = s64[] parameter(0)
param.2 = s64[] parameter(1)
param.3 = s64[] parameter(2)
param.4 = pred[] parameter(3)
conditional_1 = s64[] conditional(param.4, param.3, param.2),
true_computation=computation.3, false_computation=computation.4
constant.1 = pred[] constant(false)
ROOT conditional_2 = s64[] conditional(constant.1, conditional_1,
param.1), true_computation=computation.1,
false_computation=computation.2
})";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
std::unique_ptr<HloModule> module = std::move(status).value();
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
HloInstruction* conditional_1 =
FindInstruction(module.get(), "conditional_1");
HloInstruction* conditional_1_clone =
conditional_1->parent()->AddInstruction(conditional_1->Clone());
TF_ASSERT_OK(conditional_1->ReplaceAllUsesWith(conditional_1_clone));
TF_ASSERT_OK(conditional_1->parent()->RemoveInstruction(conditional_1));
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
}
TEST_F(ConditionalSimplifierTest, RemoveDeadRoots) {
absl::string_view hlo_string =
R"(
HloModule RemoveDeadRoots
on_false {
t = (f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
rhs = f32[40,40] get-tuple-element(t), index=1
dot = f32[20,40] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}
after-all = token[] after-all()
outfeed = token[] outfeed(dot, after-all)
ROOT result = (f32[20,40]) tuple(dot)
}
on_true {
t = (f32[20,40], f32[40,40]) parameter(0)
lhs = f32[20,40] get-tuple-element(t), index=0
add = f32[20,40] add(lhs, lhs)
ROOT result = (f32[20,40]) tuple(add)
}
ENTRY main {
c0_0 = f32[20,40] parameter(0)
c0_1 = f32[40,40] parameter(1)
p = pred[] parameter(2)
t = (f32[20,40], f32[40,40]) tuple(c0_0, c0_1)
conditional = (f32[20, 40]) conditional(p,t,t), false_computation=on_false, true_computation=on_true
ROOT result = () tuple()
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 0);
}
TEST_F(ConditionalSimplifierTest, SecondTupleElementUnusedAndRemoved) {
absl::string_view hlo_string =
R"(
HloModule SecondTupleElementUnusedAndRemoved
on_true {
arg_tuple.7 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.9 = f32[10,10]{1,0} get-tuple-element(arg_tuple.7), index=0
copy = f32[10,10]{1,0} copy(get-tuple-element.9)
ROOT tuple.6 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(copy, get-tuple-element.9)
}
on_false {
constant.17 = f32[] constant(0)
constant.18 = f32[] constant(1)
rng.19 = f32[10,10]{1,0} rng(constant.17, constant.18), distribution=rng_uniform
arg_tuple.14 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.16 = f32[10,10]{1,0} get-tuple-element(arg_tuple.14), index=0
ROOT tuple.7 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(rng.19, get-tuple-element.16)
}
ENTRY main {
constant.38 = pred[] constant(true)
arg_tuple.30 = (s32[], f32[10,10]{1,0}) parameter(0)
get-tuple-element.21 = f32[10,10]{1,0} get-tuple-element(arg_tuple.30), index=1
tuple.1 = (f32[10,10]{1,0}) tuple(get-tuple-element.21)
conditional = (f32[10,10]{1,0}, f32[10,10]{1,0}) conditional(constant.38, tuple.1, tuple.1), true_computation=on_true, false_computation=on_false
get-first-index = f32[10,10]{1,0} get-tuple-element(conditional), index=0
ROOT result = (f32[10,10]{1,0}) tuple(get-first-index)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
}
TEST_F(ConditionalSimplifierTest, FirstTupleElementUnusedAndRemoved) {
absl::string_view hlo_string =
R"(
HloModule FirstTupleElementUnusedAndRemoved
on_true {
arg_tuple.7 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.9 = f32[10,10]{1,0} get-tuple-element(arg_tuple.7), index=0
copy = f32[10,10]{1,0} copy(get-tuple-element.9)
ROOT tuple.6 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(copy, get-tuple-element.9)
}
on_false {
constant.17 = f32[] constant(0)
constant.18 = f32[] constant(1)
rng.19 = f32[10,10]{1,0} rng(constant.17, constant.18), distribution=rng_uniform
arg_tuple.14 = (f32[10,10]{1,0}) parameter(0)
get-tuple-element.16 = f32[10,10]{1,0} get-tuple-element(arg_tuple.14), index=0
ROOT tuple.7 = (f32[10,10]{1,0}, f32[10,10]{1,0}) tuple(rng.19, get-tuple-element.16)
}
ENTRY main {
constant.38 = pred[] constant(true)
arg_tuple.30 = (s32[], f32[10,10]{1,0}) parameter(0)
get-tuple-element.21 = f32[10,10]{1,0} get-tuple-element(arg_tuple.30), index=1
tuple.1 = (f32[10,10]{1,0}) tuple(get-tuple-element.21)
conditional = (f32[10,10]{1,0}, f32[10,10]{1,0}) conditional(constant.38, tuple.1, tuple.1), true_computation=on_true, false_computation=on_false
get-second-index = f32[10,10]{1,0} get-tuple-element(conditional), index=1
ROOT result = (f32[10,10]{1,0}) tuple(get-second-index)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
}
TEST_F(ConditionalSimplifierTest, MergeDuplicateTupleElements) {
absl::string_view hlo_string =
R"(
HloModule MergeDuplicateTupleElements
on_true {
param-true = (f32[]) parameter(0)
gte-true = f32[] get-tuple-element(param-true), index=0
ROOT tuple-true = (f32[], f32[]) tuple(gte-true, gte-true)
}
on_false {
param-false = (f32[]) parameter(0)
constant.0 = f32[] constant(0)
constant.1 = f32[] constant(1)
rng = f32[] rng(constant.0, constant.1), distribution=rng_uniform
ROOT tuple-false = (f32[], f32[]) tuple(rng, rng)
}
ENTRY main {
comp = pred[] parameter(0)
arg = (f32[]) parameter(1)
conditional = (f32[], f32[]) conditional(comp, arg, arg), true_computation=on_true, false_computation=on_false
gte.0 = f32[] get-tuple-element(conditional), index=0
gte.1 = f32[] get-tuple-element(conditional), index=1
ROOT add = f32[] add(gte.0, gte.1)
}
)";
auto status = ParseAndReturnVerifiedModule(hlo_string);
TF_ASSERT_OK(status.status());
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(status.value().get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(status.value().get()).value());
TF_ASSERT_OK(v.Run(status.value().get()).status());
const HloInstruction* conditional =
FindInstruction(status.value().get(), "conditional");
EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1);
const HloInstruction* gte_0 = FindInstruction(status.value().get(), "gte.0");
const HloInstruction* gte_1 = FindInstruction(status.value().get(), "gte.1");
EXPECT_EQ(gte_0->tuple_index(), 0);
EXPECT_EQ(gte_1->tuple_index(), 0);
}
TEST_F(ConditionalSimplifierTest, SimplifyConditionalWithTokens) {
absl::string_view hlo_string =
R"(
HloModule SimplifyConditionalWithTokens
true_comp {
ROOT parameter.13 = (token[]) parameter(0)
}
false_comp {
ROOT parameter.21 = (token[]) parameter(0)
}
ENTRY entry {
parameter.29 = pred[] parameter(0)
token.1 = token[] after-all()
token.2 = token[] after-all()
tuple.3 = (token[]) tuple(token.1)
tuple.4 = (token[]) tuple(token.2)
ROOT conditional.5 = (token[]) conditional(parameter.29, tuple.3, tuple.4), true_computation=true_comp, false_computation=false_comp
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloVerifier v(false, false);
TF_ASSERT_OK(v.Run(module.get()).status());
EXPECT_TRUE(ConditionalSimplifier().Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::AfterAll(
op::GetTupleElement(op::Tuple(op::AfterAll()), 0),
op::GetTupleElement(op::Tuple(op::AfterAll()), 0))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_simplifier.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/conditional_simplifier_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1511abd5-098d-46f7-9426-d0179c2675d1 | cpp | tensorflow/tensorflow | scatter_nd_op | tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc | tensorflow/core/kernels/scatter_nd_op_test.cc | #include <functional>
#include "absl/status/status.h"
#include "tensorflow/compiler/tf2xla/lib/scatter.h"
#include "tensorflow/compiler/tf2xla/xla_helpers.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/xla_data.pb.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
namespace tensorflow {
namespace {
Status ValidateUpdateShape(const TensorShape& buffer_shape,
const TensorShape& indices_shape,
const TensorShape& updates_shape,
bool broadcast_scalar_update) {
if (indices_shape.dims() < 1) {
return errors::InvalidArgument(
"indices shape must have >= 1 dimension; got ",
indices_shape.DebugString());
}
const int64_t num_index_dims =
indices_shape.dim_size(indices_shape.dims() - 1);
const int64_t batch_dim = indices_shape.dims() - 1;
auto shape_err = [&]() {
return errors::InvalidArgument(
"Must have updates.shape = indices.shape[:batch_dim] + ",
"buffer_shape[num_index_dims:], got updates.shape: ",
updates_shape.DebugString(),
", indices.shape: ", indices_shape.DebugString(),
", buffer_shape: ", buffer_shape.DebugString(),
", num_index_dims: ", num_index_dims, ", and batch_dim: ", batch_dim);
};
if (updates_shape.dims() == 0 && broadcast_scalar_update) {
return absl::OkStatus();
}
if (updates_shape.dims() < batch_dim) return shape_err();
if (buffer_shape.dims() <
num_index_dims + (updates_shape.dims() - batch_dim)) {
return shape_err();
}
if (updates_shape.dims() !=
batch_dim + buffer_shape.dims() - num_index_dims) {
return shape_err();
}
for (int d = 0; d < batch_dim; ++d) {
if (updates_shape.dim_size(d) != indices_shape.dim_size(d)) {
return shape_err();
}
}
for (int d = 0; d < updates_shape.dims() - batch_dim; ++d) {
if (updates_shape.dim_size(d + batch_dim) !=
buffer_shape.dim_size(d + num_index_dims)) {
return shape_err();
}
}
return absl::OkStatus();
}
class ScatterNdOp : public XlaOpKernel {
public:
explicit ScatterNdOp(OpKernelConstruction* context) : XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
DataType dtype = context->input_type(1);
TensorShape indices_shape = context->InputShape(0);
TensorShape updates_shape = context->InputShape(1);
TensorShape buffer_shape;
OP_REQUIRES_OK(context, context->ConstantInputAsShape(2, &buffer_shape));
OP_REQUIRES(
context, TensorShapeUtils::IsVectorOrHigher(buffer_shape),
errors::InvalidArgument("Output must be at least 1-D, ",
"got shape: ", buffer_shape.DebugString()));
OP_REQUIRES(
context,
buffer_shape.num_elements() > 0 || (indices_shape.num_elements() == 0 &&
updates_shape.num_elements() == 0),
errors::InvalidArgument(
"Indices and updates specified for empty output. indices shape: ",
indices_shape.DebugString()));
OP_REQUIRES_OK(
context, ValidateUpdateShape(buffer_shape, indices_shape, updates_shape,
false));
xla::XlaBuilder* builder = context->builder();
auto buffer = xla::Broadcast(XlaHelpers::Zero(builder, dtype),
buffer_shape.dim_sizes());
auto indices = context->Input(0);
auto updates = context->Input(1);
auto combine =
context->input_xla_type(1) == xla::PRED ? CombineBool : CombineNum;
auto result = XlaScatter(buffer, updates, indices,
true,
false,
combine, builder);
OP_REQUIRES_OK(context, result.status());
context->SetOutput(0, result.value());
}
private:
static xla::XlaOp CombineNum(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
(void)builder;
return xla::Add(x, y);
}
static xla::XlaOp CombineBool(const xla::XlaOp x, const xla::XlaOp y,
xla::XlaBuilder* builder) {
(void)builder;
return xla::Or(x, y);
}
};
REGISTER_XLA_OP(Name("ScatterNd").CompileTimeConstantInput("shape"),
ScatterNdOp);
void CompileTensorScatter(
XlaOpKernelContext* context,
const std::function<xla::XlaOp(xla::XlaOp, xla::XlaOp, xla::XlaBuilder*)>&
combiner,
bool broadcast_scalar_update) {
TensorShape buffer_shape = context->InputShape(0);
TensorShape indices_shape = context->InputShape(1);
TensorShape updates_shape = context->InputShape(2);
OP_REQUIRES(
context, TensorShapeUtils::IsVectorOrHigher(buffer_shape),
errors::InvalidArgument("Output must be at least 1-D, ",
"got shape: ", buffer_shape.DebugString()));
OP_REQUIRES(
context,
buffer_shape.num_elements() > 0 || (indices_shape.num_elements() == 0 &&
updates_shape.num_elements() == 0),
errors::InvalidArgument(
"Indices and updates specified for empty output. indices shape: ",
indices_shape.DebugString()));
OP_REQUIRES_OK(context,
ValidateUpdateShape(buffer_shape, indices_shape, updates_shape,
broadcast_scalar_update));
xla::XlaBuilder* builder = context->builder();
auto buffer = context->Input(0);
auto indices = context->Input(1);
auto updates = context->Input(2);
auto result = XlaScatter(buffer, updates, indices,
true,
false, combiner, builder);
OP_REQUIRES_OK(context, result.status());
context->SetOutput(0, result.value());
}
class TensorScatterAddOp : public XlaOpKernel {
public:
explicit TensorScatterAddOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
CompileTensorScatter(
context,
[](xla::XlaOp x, xla::XlaOp y, xla::XlaBuilder*) {
return xla::Add(x, y);
},
true);
}
};
class TensorScatterMaxOp : public XlaOpKernel {
public:
explicit TensorScatterMaxOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
CompileTensorScatter(
context,
[](xla::XlaOp x, xla::XlaOp y, xla::XlaBuilder*) {
return xla::Max(x, y);
},
false);
}
};
class TensorScatterMinOp : public XlaOpKernel {
public:
explicit TensorScatterMinOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
CompileTensorScatter(
context,
[](xla::XlaOp x, xla::XlaOp y, xla::XlaBuilder*) {
return xla::Min(x, y);
},
false);
}
};
class TensorScatterSubOp : public XlaOpKernel {
public:
explicit TensorScatterSubOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
CompileTensorScatter(
context,
[](xla::XlaOp x, xla::XlaOp y, xla::XlaBuilder*) {
return xla::Sub(x, y);
},
false);
}
};
class TensorScatterUpdateOp : public XlaOpKernel {
public:
explicit TensorScatterUpdateOp(OpKernelConstruction* context)
: XlaOpKernel(context) {}
void Compile(XlaOpKernelContext* context) override {
CompileTensorScatter(
context, [](xla::XlaOp, xla::XlaOp y, xla::XlaBuilder*) { return y; },
true);
}
};
REGISTER_XLA_OP(Name("TensorScatterAdd"), TensorScatterAddOp);
REGISTER_XLA_OP(Name("TensorScatterMax"), TensorScatterMaxOp);
REGISTER_XLA_OP(Name("TensorScatterMin"), TensorScatterMinOp);
REGISTER_XLA_OP(Name("TensorScatterSub"), TensorScatterSubOp);
REGISTER_XLA_OP(Name("TensorScatterUpdate"), TensorScatterUpdateOp);
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
class TensorScatterUpdateOpTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "TensorScatterUpdate")
.Input(FakeInput(variable_type))
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(TensorScatterUpdateOpTest, Simple_TwoD32) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
10002, 0, 0, 0, 777, 778, 779});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(TensorScatterUpdateOpTest, Simple_Two64) {
MakeOp(DT_FLOAT, DT_INT64);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int64_t>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
10002, 0, 0, 0, 777, 778, 779});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(TensorScatterUpdateOpTest, Simple_ZeroD) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 1}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({1, 1}), {3});
AddInputFromArray<float>(TensorShape({1, 1}), {101});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1}));
test::FillValues<float>(&expected, {0, 0, 0, 101, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(TensorScatterUpdateOpTest, Simple_OneD) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 1}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 101});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(TensorScatterUpdateOpTest, HigherRank) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({2, 3, 1}), {0, 4, 2, 1, 3, 6});
AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({8}));
test::FillValues<float>(&expected, {10, 40, 30, 50, 20, 0, 60, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(TensorScatterUpdateOpTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [99] does not index into shape [5,3]"))
<< s;
}
class TensorScatterUpdateOpErrorOnBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "TensorScatterUpdate")
.Input(FakeInput(variable_type))
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Attr("bad_indices_policy", "ERROR")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(TensorScatterUpdateOpErrorOnBadIndicesTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [99] does not index into shape [5,3]"))
<< s;
}
class TensorScatterUpdateOpIgnoreBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "TensorScatterUpdate")
.Input(FakeInput(variable_type))
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Attr("bad_indices_policy", "IGNORE")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(TensorScatterUpdateOpIgnoreBadIndicesTest, DropOutOfRangeIndices) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 1}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class ScatterNdUpdateOpTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_ref_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNdUpdate")
.Input(FakeInput(variable_ref_type))
.Input(FakeInput(index_type))
.Input(FakeInput(RemoveRefType(variable_ref_type)))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdUpdateOpTest, Simple_TwoD32) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
10002, 0, 0, 0, 777, 778, 779});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, Simple_Two64) {
MakeOp(DT_FLOAT_REF, DT_INT64);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int64_t>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 3}));
test::FillValues<float>(&expected, {100, 101, 102, 0, 0, 0, 10000, 10001,
10002, 0, 0, 0, 777, 778, 779});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, Simple_ZeroD) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({1}), {3});
AddInputFromArray<float>(TensorShape({1}), {101});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {0, 0, 0, 101, 0});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, Simple_OneD) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3}), {100, 101, 102});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({5}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 101});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, HigherRank) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({8}), {0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({2, 3, 1}), {0, 4, 2, 1, 3, 6});
AddInputFromArray<float>(TensorShape({2, 3}), {10, 20, 30, 40, 50, 60});
TF_ASSERT_OK(RunOpKernel());
Tensor params_tensor = *mutable_input(0).tensor;
Tensor expected(allocator(), DT_FLOAT, TensorShape({8}));
test::FillValues<float>(&expected, {10, 40, 30, 50, 20, 0, 60, 0});
test::ExpectTensorEqual<float>(expected, params_tensor);
}
TEST_F(ScatterNdUpdateOpTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [99] does not index into shape [5,3]"))
<< s;
}
TEST_F(ScatterNdUpdateOpTest, Error_WrongDimsIndices) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({2, 3}), {0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({1, 3, 1}), {0, 4, 99});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"Dimensions [0,1) of indices[shape=[1,3,1]] = 1 must match dimensions "
"[0,1) of updates[shape=[3,3]] = 3"))
<< s;
}
TEST_F(ScatterNdUpdateOpTest, Error_MismatchedParamsAndUpdateDimensions) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(
TensorShape({3, 4}),
{100, 101, 102, 103, 777, 778, 779, 780, 10000, 10001, 10002, 10004});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"Dimensions [1,2) of input[shape=[5,3]] must match dimensions [1,2) of "
"updates[shape=[3,4]]"))
<< s;
}
TEST_F(ScatterNdUpdateOpTest, Error_MismatchedIndicesAndUpdateDimensions) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({2, 3}),
{100, 101, 102, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(),
"Dimensions [0,1) of indices[shape=[3,1]] = 3 must match dimensions [0,1)"
" of updates[shape=[2,3]] = 2"))
<< s;
}
class ScatterNdUpdateOpErrorOnBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_ref_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNdUpdate")
.Input(FakeInput(variable_ref_type))
.Input(FakeInput(index_type))
.Input(FakeInput(RemoveRefType(variable_ref_type)))
.Attr("bad_indices_policy", "ERROR")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdUpdateOpErrorOnBadIndicesTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 3}),
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 99, 4});
AddInputFromArray<float>(TensorShape({3, 3}),
{100, 101, 102, 777, 778, 779, 10000, 10001, 10002});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [99] does not index into shape [5,3]"))
<< s;
}
class ScatterNdUpdateOpIgnoreBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_ref_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNdUpdate")
.Input(FakeInput(variable_ref_type))
.Input(FakeInput(index_type))
.Input(FakeInput(RemoveRefType(variable_ref_type)))
.Attr("bad_indices_policy", "IGNORE")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdUpdateOpIgnoreBadIndicesTest, DropOutOfRangeIndices) {
MakeOp(DT_FLOAT_REF, DT_INT32);
AddInputFromArray<float>(TensorShape({5, 1}), {0, 0, 0, 0, 0});
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 0});
test::ExpectTensorEqual<float>(expected, *mutable_input(0).tensor);
}
class ScatterNdUpdateOpConstructionTest : public OpsTestBase {};
TEST_F(ScatterNdUpdateOpConstructionTest, Error_BadIndicesPolicyInvalid) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY")
.Finalize(node_def()));
EXPECT_NE(InitOp(), absl::OkStatus());
}
class ScatterNdOpTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Input(FakeInput(DT_INT32))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdOpTest, Simple_OneD) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 4, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
AddInputFromArray<int32>(TensorShape({2}), {5, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 101});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
TEST_F(ScatterNdOpTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
AddInputFromArray<int32>(TensorShape({2}), {5, 1});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [5] does not index into shape [5,1]"))
<< s;
}
class ScatterNdOpErrorOnBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "ERROR")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdOpErrorOnBadIndicesTest, Error_IndexOutOfRange) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
AddInputFromArray<int32>(TensorShape({2}), {5, 1});
Status s = RunOpKernel();
EXPECT_TRUE(absl::StrContains(
s.ToString(), "indices[1] = [5] does not index into shape [5,1]"))
<< s;
}
class ScatterNdOpIgnoreBadIndicesTest : public OpsTestBase {
protected:
void MakeOp(DataType variable_type, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(index_type))
.Input(FakeInput(variable_type))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "IGNORE")
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(ScatterNdOpIgnoreBadIndicesTest, DropOutOfRangeIndices) {
MakeOp(DT_FLOAT, DT_INT32);
AddInputFromArray<int32>(TensorShape({3, 1}), {0, 5, 2});
AddInputFromArray<float>(TensorShape({3, 1}), {100, 101, 102});
AddInputFromArray<int32>(TensorShape({2}), {5, 1});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_FLOAT, TensorShape({5, 1}));
test::FillValues<float>(&expected, {100, 0, 102, 0, 0});
test::ExpectTensorEqual<float>(expected, *GetOutput(0));
}
class ScatterNdOpConstructionTest : public OpsTestBase {};
TEST_F(ScatterNdOpConstructionTest, Error_BadIndicesPolicyInvalid) {
TF_ASSERT_OK(NodeDefBuilder("myop", "ScatterNd")
.Input(FakeInput(DT_INT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_INT32))
.Attr("bad_indices_policy", "AN_UNRECOGNIZED_POLICY")
.Finalize(node_def()));
EXPECT_NE(InitOp(), absl::OkStatus());
}
class ScatterNdUpdateBM : public ScatterNdUpdateOpTest {
public:
void TestBody() override {}
void MakeBenchmarkOp(const char* op, DataType index_type) {
TF_ASSERT_OK(NodeDefBuilder("myop", op)
.Input(FakeInput(DT_FLOAT_REF))
.Input(FakeInput(index_type))
.Input(FakeInput(DT_FLOAT))
.Finalize(node_def()));
TF_CHECK_OK(InitOp());
}
};
template <typename Index>
void BM_ScatterNdHelper(::testing::benchmark::State& state, int embedding_size,
const char* op) {
const int kRows = 10000000 / embedding_size;
std::vector<float> values;
values.reserve(kRows);
for (int i = 0; i < kRows * embedding_size; i++) {
values.push_back(i);
}
const int kNumUpdates = 1000;
random::PhiloxRandom philox(301, 17);
random::SimplePhilox rnd(&philox);
std::vector<Index> indices;
std::vector<float> updates;
for (int i = 0; i < kNumUpdates; i++) {
indices.push_back(rnd.Uniform(kRows));
for (int j = 0; j < embedding_size; j++) {
updates.push_back(i * 10 + j);
}
}
ScatterNdUpdateBM bm;
bm.MakeBenchmarkOp(op, DataTypeToEnum<Index>::v());
bm.AddInputFromArray<float>(TensorShape({kRows, embedding_size}), values);
bm.AddInputFromArray<Index>(TensorShape({kNumUpdates}), indices);
bm.AddInputFromArray<float>(TensorShape({kNumUpdates, embedding_size}),
updates);
for (auto i : state) {
Status s = bm.RunOpKernel();
}
state.SetItemsProcessed((static_cast<int64_t>(kNumUpdates) * embedding_size) *
state.iterations());
}
void BM_ScatterNdUpdateInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterNdHelper<int32>(state, embedding_size, "ScatterNdUpdate");
}
void BM_ScatterNdUpdateInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterNdHelper<int64_t>(state, embedding_size, "ScatterNdUpdate");
}
void BM_ScatterNdAddInt32(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterNdHelper<int32>(state, embedding_size, "ScatterNdAdd");
}
void BM_ScatterNdAddInt64(::testing::benchmark::State& state) {
const int embedding_size = state.range(0);
BM_ScatterNdHelper<int64_t>(state, embedding_size, "ScatterNdAdd");
}
BENCHMARK(BM_ScatterNdUpdateInt32)
->Arg(1)
->Arg(10)
->Arg(64)
->Arg(256)
->Arg(1024);
BENCHMARK(BM_ScatterNdUpdateInt64)
->Arg(1)
->Arg(10)
->Arg(64)
->Arg(256)
->Arg(1024);
BENCHMARK(BM_ScatterNdAddInt32)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
BENCHMARK(BM_ScatterNdAddInt64)->Arg(1)->Arg(10)->Arg(64)->Arg(256)->Arg(1024);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2xla/kernels/scatter_nd_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/scatter_nd_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9a9fecee-89c4-40d9-9f46-bb6d41b7c402 | cpp | google/arolla | executable_builder | arolla/expr/eval/executable_builder.cc | arolla/expr/eval/executable_builder_test.cc | #include "arolla/expr/eval/executable_builder.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/expr/eval/dynamic_compiled_expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_stack_trace.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
namespace {
std::string FormatSlots(absl::Span<const TypedSlot> slots) {
return absl::StrJoin(slots, ", ", [](std::string* out, TypedSlot s) {
absl::StrAppend(out, FormatSlot(s));
});
}
class DynamicBoundExprImpl : public DynamicBoundExpr {
public:
DynamicBoundExprImpl(
absl::flat_hash_map<std::string, TypedSlot> input_slots,
TypedSlot output_slot,
std::vector<std::unique_ptr<BoundOperator>> init_ops,
std::vector<std::unique_ptr<BoundOperator>> eval_ops,
absl::flat_hash_map<std::string, TypedSlot> named_output_slots,
std::vector<std::string> init_op_descriptions,
std::vector<std::string> eval_op_descriptions,
DenseArray<Text> op_display_names, DenseArray<Text> op_stack_traces)
: DynamicBoundExpr(std::move(input_slots), output_slot,
std::move(named_output_slots)),
init_ops_(std::move(init_ops)),
eval_ops_(std::move(eval_ops)),
init_op_descriptions_(std::move(init_op_descriptions)),
eval_op_descriptions_(std::move(eval_op_descriptions)),
op_display_names_(std::move(op_display_names)),
op_stack_traces_(std::move(op_stack_traces)) {}
void InitializeLiterals(EvaluationContext* ctx, FramePtr frame) const final {
RunBoundOperators(init_ops_, ctx, frame);
}
void Execute(EvaluationContext* ctx, FramePtr frame) const final {
int64_t last_ip = RunBoundOperators(eval_ops_, ctx, frame);
if (!ctx->status().ok()) {
RETURN_IF_ERROR(std::move(*ctx).status()).With([&](auto status_builder)
{
DCHECK_LT(last_ip, op_display_names_.size());
status_builder << "during evaluation of operator "
<< op_display_names_[last_ip].AsOptional().value_or("");
if (!op_stack_traces_.empty()) {
status_builder << "\n"
<<
op_stack_traces_[last_ip].AsOptional().value_or("");
}
ctx->set_status(absl::Status(status_builder));
});
}
}
absl::Span<const std::string> init_op_descriptions() const final {
return init_op_descriptions_;
}
absl::Span<const std::string> eval_op_descriptions() const final {
return eval_op_descriptions_;
}
private:
std::vector<std::unique_ptr<BoundOperator>> init_ops_;
std::vector<std::unique_ptr<BoundOperator>> eval_ops_;
std::vector<std::string> init_op_descriptions_;
std::vector<std::string> eval_op_descriptions_;
DenseArray<Text> op_display_names_;
DenseArray<Text> op_stack_traces_;
};
absl::Status VerifyNoNulls(
absl::Span<const std::unique_ptr<BoundOperator>> ops) {
for (size_t i = 0; i < ops.size(); ++i) {
if (ops[i] == nullptr) {
return absl::InternalError(
absl::StrFormat("missing operator at position %d", i));
}
}
return absl::OkStatus();
}
}
std::string FormatSlot(TypedSlot slot) {
return absl::StrFormat("%s [0x%02X]", slot.GetType()->name(),
slot.byte_offset());
}
std::string FormatOperatorCall(absl::string_view op_name,
absl::Span<const TypedSlot> input_slots,
absl::Span<const TypedSlot> output_slots) {
if (output_slots.empty()) {
return absl::StrFormat("%s(%s)", op_name, FormatSlots(input_slots));
} else {
return absl::StrFormat("%s = %s(%s)", FormatSlots(output_slots), op_name,
FormatSlots(input_slots));
}
}
ExecutableBuilder::ExecutableBuilder(
FrameLayout::Builder* layout_builder, bool collect_op_descriptions,
std::shared_ptr<const ExprStackTrace> stack_trace)
: layout_builder_(layout_builder),
collect_op_descriptions_(collect_op_descriptions) {
if (stack_trace != nullptr) {
stack_trace_builder_ = BoundExprStackTraceBuilder(stack_trace);
}
}
absl::Status ExecutableBuilder::AddLiteralInitialization(
const TypedValue& literal_value, TypedSlot output_slot) {
if (literal_value.GetType() != output_slot.GetType()) {
return absl::InternalError(absl::StrFormat(
"incompatible types for literal and its slot: %s vs %s",
literal_value.GetType()->name(), output_slot.GetType()->name()));
}
if (collect_op_descriptions_) {
absl::StrAppendFormat(&init_literals_description_, "%s = %s\n",
FormatSlots({output_slot}), literal_value.Repr());
}
literal_values_and_slots_.push_back({literal_value, output_slot});
return absl::OkStatus();
}
absl::StatusOr<int64_t> ExecutableBuilder::BindEvalOp(
const QExprOperator& op, absl::Span<const TypedSlot> input_slots,
TypedSlot output_slot, absl::string_view display_name) {
ASSIGN_OR_RETURN(auto bound_op, op.Bind(input_slots, output_slot),
_ << "while binding operator " << display_name);
std::string description;
if (collect_op_descriptions_) {
description = FormatOperatorCall(display_name, input_slots, {output_slot});
}
return AddEvalOp(std::move(bound_op), std::move(description),
std::string(display_name));
}
int64_t ExecutableBuilder::AddInitOp(std::unique_ptr<BoundOperator> op,
std::string description) {
if (collect_op_descriptions_) {
init_op_descriptions_.push_back(std::move(description));
}
init_ops_.push_back(std::move(op));
return init_ops_.size() - 1;
}
int64_t ExecutableBuilder::AddEvalOp(std::unique_ptr<BoundOperator> op,
std::string description,
std::string display_name) {
if (collect_op_descriptions_) {
eval_op_descriptions_.push_back(std::move(description));
}
eval_ops_.push_back(std::move(op));
op_display_names_.push_back(std::move(display_name));
return eval_ops_.size() - 1;
}
int64_t ExecutableBuilder::SkipEvalOp() { return AddEvalOp(nullptr, "", ""); }
absl::Status ExecutableBuilder::SetEvalOp(int64_t offset,
std::unique_ptr<BoundOperator> op,
std::string description,
std::string display_name) {
if (offset < 0 || offset >= eval_ops_.size()) {
return absl::InternalError(absl::StrFormat(
"illegal operator offset: must be in range [0, %d), got %d",
eval_ops_.size(), offset));
}
if (eval_ops_[offset] != nullptr) {
return absl::InternalError(absl::StrFormat(
"attempt to override existing operator at position %d", offset));
}
if (collect_op_descriptions_) {
DCHECK_EQ(eval_ops_.size(), eval_op_descriptions_.size());
eval_op_descriptions_[offset] = std::move(description);
}
eval_ops_[offset] = std::move(op);
op_display_names_[offset] = std::move(display_name);
return absl::OkStatus();
}
absl::Status ExecutableBuilder::AddNamedOutput(absl::string_view name,
TypedSlot slot) {
if (!named_outputs_.emplace(name, slot).second) {
return absl::FailedPreconditionError(
absl::StrCat("duplicated output slot name: ", name));
}
return absl::OkStatus();
}
void ExecutableBuilder::RegisterStacktrace(int64_t ip,
const ExprNodePtr& node) {
if (stack_trace_builder_.has_value()) {
stack_trace_builder_->RegisterIp(ip, node);
}
}
std::unique_ptr<BoundExpr> ExecutableBuilder::Build(
const absl::flat_hash_map<std::string, TypedSlot>& input_slots,
TypedSlot output_slot) && {
if (!literal_values_and_slots_.empty()) {
if (!init_literals_description_.empty()) {
init_literals_description_.pop_back();
}
AddInitOp(MakeBoundOperator(
[values_and_slots = std::move(literal_values_and_slots_)](
EvaluationContext* ctx, FramePtr frame) {
for (const auto& [value, slot] : values_and_slots) {
auto ref = value.AsRef();
ref.GetType()->UnsafeCopy(
ref.GetRawPointer(),
frame.GetRawPointer(slot.byte_offset()));
}
}),
std::move(init_literals_description_));
}
DCHECK_OK(VerifyNoNulls(init_ops_));
DCHECK_OK(VerifyNoNulls(eval_ops_));
DenseArray<Text> stack_trace;
if (stack_trace_builder_.has_value()) {
stack_trace = stack_trace_builder_->Build(eval_ops_.size());
}
return std::make_unique<DynamicBoundExprImpl>(
input_slots, output_slot, std::move(init_ops_), std::move(eval_ops_),
std::move(named_outputs_), std::move(init_op_descriptions_),
std::move(eval_op_descriptions_),
CreateFullDenseArray<Text>(op_display_names_.begin(),
op_display_names_.end()),
std::move(stack_trace));
}
} | #include "arolla/expr/eval/executable_builder.h"
#include <cstdint>
#include <memory>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/expr/eval/test_utils.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/bound_operators.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
namespace arolla::expr::eval_internal {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::testing::Eq;
using ::testing::HasSubstr;
std::unique_ptr<BoundOperator> Noop() {
return MakeBoundOperator([](EvaluationContext* ctx, FramePtr frame) {});
}
TEST(ExecutableBuilderTest, SetEvalOp) {
FrameLayout::Builder layout_builder;
auto output_slot = layout_builder.AddSlot<float>();
ExecutableBuilder builder(&layout_builder, true);
EXPECT_THAT(builder.SetEvalOp(0, Noop(), "noop", "noop"),
StatusIs(absl::StatusCode::kInternal,
HasSubstr("illegal operator offset")));
builder.SkipEvalOp();
ASSERT_THAT(builder.SetEvalOp(0, Noop(), "noop", "noop"), IsOk());
EXPECT_THAT(
builder.SetEvalOp(0, Noop(), "noop", "noop"),
StatusIs(
absl::StatusCode::kInternal,
HasSubstr("attempt to override existing operator at position 0")));
EXPECT_THAT(std::move(builder).Build({}, TypedSlot::FromSlot(output_slot)),
AllOf(InitOperationsAre(), EvalOperationsAre("noop")));
}
TEST(ExecutableBuilderTest, BindInitializeLiteralOp) {
FrameLayout::Builder layout_builder;
auto float_slot = layout_builder.AddSlot<float>();
auto optional_int_slot = layout_builder.AddSlot<OptionalValue<int32_t>>();
ExecutableBuilder builder(&layout_builder, true);
EXPECT_THAT(
builder.AddLiteralInitialization(TypedValue::FromValue(float{57.}),
TypedSlot::FromSlot(float_slot)),
IsOk());
EXPECT_THAT(
builder.AddLiteralInitialization(TypedValue::FromValue(int32_t{57}),
TypedSlot::FromSlot(optional_int_slot)),
StatusIs(absl::StatusCode::kInternal,
"incompatible types for literal and its slot: INT32 vs "
"OPTIONAL_INT32"));
EXPECT_THAT(builder.AddLiteralInitialization(
TypedValue::FromValue(OptionalValue<int32_t>(57)),
TypedSlot::FromSlot(optional_int_slot)),
IsOk());
auto bound_expr =
std::move(builder).Build({}, TypedSlot::FromSlot(float_slot));
EXPECT_THAT(
bound_expr,
AllOf(InitOperationsAre("FLOAT32 [0x00] = 57.\n"
"OPTIONAL_INT32 [0x04] = optional_int32{57}"),
EvalOperationsAre()));
auto layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&layout);
EvaluationContext ctx;
bound_expr->InitializeLiterals(&ctx, alloc.frame());
EXPECT_THAT(alloc.frame().Get(float_slot), Eq(57.));
EXPECT_THAT(alloc.frame().Get(optional_int_slot), Eq(57));
}
TEST(ExecutableBuilderTest, ExecuteOk) {
FrameLayout::Builder layout_builder;
FrameLayout::Slot<int32_t> x_slot = layout_builder.AddSlot<int32_t>();
auto make_increment_operator = [x_slot](int32_t increment) {
return MakeBoundOperator(
[x_slot, increment](EvaluationContext* ctx, FramePtr frame) {
frame.Set(x_slot, frame.Get(x_slot) + increment);
});
};
ExecutableBuilder builder(&layout_builder, true);
builder.AddEvalOp(make_increment_operator(1), "inc(1)", "inc(1)");
builder.AddEvalOp(make_increment_operator(10), "inc(10)", "inc(10)");
builder.AddEvalOp(make_increment_operator(100), "inc(100)", "inc(100)");
builder.AddEvalOp(make_increment_operator(1000), "inc(1000)", "inc(1000)");
auto dynamic_bound_expr =
std::move(builder).Build({}, TypedSlot::FromSlot(x_slot));
FrameLayout layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&layout);
EvaluationContext ctx;
dynamic_bound_expr->Execute(&ctx, alloc.frame());
EXPECT_OK(ctx.status());
EXPECT_THAT(alloc.frame().Get(x_slot), Eq(1111));
}
TEST(ExecutableBuilderTest, ExecuteWithError) {
FrameLayout::Builder layout_builder;
FrameLayout::Slot<int32_t> x_slot = layout_builder.AddSlot<int32_t>();
auto make_increment_operator = [x_slot](int32_t increment) {
return MakeBoundOperator(
[x_slot, increment](EvaluationContext* ctx, FramePtr frame) {
frame.Set(x_slot, frame.Get(x_slot) + increment);
});
};
ExecutableBuilder builder(&layout_builder, true);
builder.AddEvalOp(make_increment_operator(1), "inc(1)", "inc(1)");
builder.AddEvalOp(make_increment_operator(10), "inc(10)", "inc(10)");
builder.AddEvalOp(make_increment_operator(100), "inc(100)", "inc(100)");
builder.AddEvalOp(
MakeBoundOperator([](EvaluationContext* ctx, FramePtr frame) {
ctx->set_status(absl::InvalidArgumentError("foo"));
}),
"error_operator", "error_operator");
builder.AddEvalOp(make_increment_operator(1000), "inc(1000)", "inc(1000)");
auto dynamic_bound_expr =
std::move(builder).Build({}, TypedSlot::FromSlot(x_slot));
FrameLayout layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&layout);
EvaluationContext ctx;
dynamic_bound_expr->Execute(&ctx, alloc.frame());
EXPECT_THAT(
ctx.status(),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("foo; during evaluation of operator error_operator")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/executable_builder.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/executable_builder_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
ef42979d-a408-4ce0-8289-0539d58d3676 | cpp | tensorflow/tensorflow | tflite_op_wrapper | tensorflow/lite/kernels/shim/tflite_op_wrapper.h | tensorflow/lite/kernels/shim/tflite_op_wrapper_test.cc | #ifndef TENSORFLOW_LITE_KERNELS_SHIM_TFLITE_OP_WRAPPER_H_
#define TENSORFLOW_LITE_KERNELS_SHIM_TFLITE_OP_WRAPPER_H_
#include <cstdint>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/shim/op_kernel.h"
#include "tensorflow/lite/kernels/shim/status_macros.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
namespace tflite {
namespace shim {
namespace op_wrapper {
using ::tflite::shim::OpKernelShim;
using ::tflite::shim::Runtime;
template <typename N, typename... T>
struct Attr {
const char* Name() const { return N::Name(); }
};
template <char const* str>
struct AttrName {
static const char* Name() { return str; }
};
template <typename T>
struct AttrType {
using type = T;
};
template <typename T, typename... Us>
static constexpr std::tuple<T, Us...> prependTypeInner(T, std::tuple<Us...>);
template <typename T, typename... Us>
static constexpr auto prependType(T, std::tuple<Us...>)
-> std::tuple<decltype(prependTypeInner(std::declval<T>(),
std::declval<Us>()))...>;
template <typename Name, typename... Ts>
static constexpr std::tuple<std::tuple<Ts>...> getCombinations(
Attr<Name, Ts...>);
template <typename Name, typename Head, typename... Attrs>
static constexpr auto getCombinations(Attr<Name, Head>, Attrs...)
-> decltype(prependType(std::declval<Head>(),
getCombinations(std::declval<Attrs>()...)));
template <typename Name, typename Head, typename... Tail, typename... Attrs>
static constexpr auto getCombinations(Attr<Name, Head, Tail...>, Attrs...)
-> decltype(std::tuple_cat(
prependType(std::declval<Head>(),
getCombinations(std::declval<Attrs>()...)),
getCombinations(std::declval<Attr<Name, Tail...>>(),
std::declval<Attrs>()...)));
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename... Ts>
static constexpr Op<Rt, Ts...> convertTuplesToOpsInner(std::tuple<Ts...>);
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename... Ts>
static constexpr auto convertTuplesToOps(std::tuple<Ts...>) -> std::tuple<
decltype(convertTuplesToOpsInner<Rt, Op>(std::declval<Ts>()))...>;
template <typename... Ts>
static constexpr std::variant<Ts...> convertTupleToVariant(std::tuple<Ts...>);
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename FirstAttr, typename... OtherAttrs>
struct VariantOp {
using type =
decltype(convertTupleToVariant(convertTuplesToOps<Rt, Op>(getCombinations(
std::declval<FirstAttr>(), std::declval<OtherAttrs>()...))));
};
template <Runtime Rt>
class OpWrapperExtension : public OpKernelShim<OpWrapperExtension, Rt> {};
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename... As>
class OpWrapper : public OpWrapperExtension<Rt> {
public:
using TmplOpType = typename VariantOp<Rt, Op, As...>::type;
using TmplOpType0 = typename std::variant_alternative<0, TmplOpType>::type;
using typename OpKernelShim<OpWrapperExtension, Rt>::InitContext;
using typename OpKernelShim<OpWrapperExtension, Rt>::InvokeContext;
using typename OpKernelShim<OpWrapperExtension, Rt>::ShapeInferenceContext;
OpWrapper() = default;
static const char* OpName() { return TmplOpType0::OpName(); }
static const char* Doc() { return TmplOpType0::Doc(); }
static std::vector<std::string> Attrs() { return TmplOpType0::Attrs(); }
static std::vector<std::string> Inputs() { return TmplOpType0::Inputs(); }
static std::vector<std::string> Outputs() { return TmplOpType0::Outputs(); }
static absl::Status ShapeInference(ShapeInferenceContext* context) {
return TmplOpType0::ShapeInference(context);
}
absl::Status Init(InitContext* context) {
SH_RETURN_IF_ERROR(SetVariantOp<As...>(context));
return std::visit(
[context](auto&& op) -> absl::Status { return op.Init(context); },
*op_);
}
absl::Status Invoke(InvokeContext* context) {
return std::visit(
[context](auto&& op) -> absl::Status { return op.Invoke(context); },
*op_);
}
private:
template <typename FirstAttr, typename... Attrs>
absl::Status SetVariantOp(InitContext* c) {
return CombineAttributeTypes(this, c, FirstAttr{}, Attrs{}...);
}
template <typename F, typename Name, typename T>
struct Forwarder {
public:
explicit Forwarder(F* f) : inner(f) {}
template <typename... Args>
absl::Status SetOpCombination(Args... args) {
return inner->SetOpCombination(Name::Name(), AttrType<T>{}, args...);
}
private:
F* inner;
};
template <typename F, typename Name, typename Head, typename... Tail,
typename... Attrs>
absl::Status CombineAttributeTypes(F* obj, InitContext* c,
Attr<Name, Head, Tail...>, Attrs... rest) {
SH_RETURN_IF_ERROR(
ApplyAttrType(obj, c, Name{}, AttrType<Head>{}, rest...));
return CombineAttributeTypes(obj, c, Attr<Name, Tail...>{}, rest...);
}
template <typename F, typename Name, typename... Attrs>
absl::Status CombineAttributeTypes(F*, InitContext*, Attr<Name>, Attrs...) {
return absl::OkStatus();
}
template <typename F, typename Name, typename T, typename Attr,
typename... Attrs>
absl::Status ApplyAttrType(F* obj, InitContext* c, Name, AttrType<T>, Attr a,
Attrs... rest) {
Forwarder<F, Name, T> forwarder(obj);
return CombineAttributeTypes(&forwarder, c, a, rest...);
}
template <typename F, typename Name, typename T>
absl::Status ApplyAttrType(F* obj, InitContext* c, Name, AttrType<T> t) {
return obj->SetOpCombination(Name::Name(), t, c);
}
template <typename T>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
InitContext* context) {
int64_t datatype_1;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
if (datatype_1 == typeToTfLiteType<T>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T>());
}
return absl::OkStatus();
}
template <typename T, typename U>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
std::string Name2, AttrType<U>,
InitContext* context) {
int64_t datatype_1, datatype_2;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
SH_RETURN_IF_ERROR(context->GetAttr(Name2, &datatype_2));
if (datatype_1 == typeToTfLiteType<T>() &&
datatype_2 == typeToTfLiteType<U>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T, U>());
}
return absl::OkStatus();
}
template <typename T, typename U, typename V>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
std::string Name2, AttrType<U>,
std::string Name3, AttrType<V>,
InitContext* context) {
int64_t datatype_1, datatype_2, datatype_3;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
SH_RETURN_IF_ERROR(context->GetAttr(Name2, &datatype_2));
SH_RETURN_IF_ERROR(context->GetAttr(Name3, &datatype_3));
if (datatype_1 == typeToTfLiteType<T>() &&
datatype_2 == typeToTfLiteType<U>() &&
datatype_3 == typeToTfLiteType<V>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T, U, V>());
}
return absl::OkStatus();
}
template <typename T, typename U, typename V, typename W>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
std::string Name2, AttrType<U>,
std::string Name3, AttrType<V>,
std::string Name4, AttrType<W>,
InitContext* context) {
int64_t datatype_1, datatype_2, datatype_3, datatype_4;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
SH_RETURN_IF_ERROR(context->GetAttr(Name2, &datatype_2));
SH_RETURN_IF_ERROR(context->GetAttr(Name3, &datatype_3));
SH_RETURN_IF_ERROR(context->GetAttr(Name4, &datatype_4));
if (datatype_1 == typeToTfLiteType<T>() &&
datatype_2 == typeToTfLiteType<U>() &&
datatype_3 == typeToTfLiteType<V>() &&
datatype_4 == typeToTfLiteType<W>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T, U, V, W>());
}
return absl::OkStatus();
}
template <typename T, typename U, typename V, typename W, typename X>
absl::Status SetOpCombination(std::string Name1, AttrType<T>,
std::string Name2, AttrType<U>,
std::string Name3, AttrType<V>,
std::string Name4, AttrType<W>,
std::string Name5, AttrType<X>,
InitContext* context) {
int64_t datatype_1, datatype_2, datatype_3, datatype_4, datatype_5;
SH_RETURN_IF_ERROR(context->GetAttr(Name1, &datatype_1));
SH_RETURN_IF_ERROR(context->GetAttr(Name2, &datatype_2));
SH_RETURN_IF_ERROR(context->GetAttr(Name3, &datatype_3));
SH_RETURN_IF_ERROR(context->GetAttr(Name4, &datatype_4));
SH_RETURN_IF_ERROR(context->GetAttr(Name5, &datatype_5));
if (datatype_1 == typeToTfLiteType<T>() &&
datatype_2 == typeToTfLiteType<U>() &&
datatype_3 == typeToTfLiteType<V>() &&
datatype_4 == typeToTfLiteType<W>() &&
datatype_5 == typeToTfLiteType<X>()) {
this->op_ = std::make_unique<TmplOpType>(Op<Rt, T, U, V, W, X>());
}
return absl::OkStatus();
}
protected:
std::unique_ptr<TmplOpType> op_;
};
}
}
}
#endif | #include "tensorflow/lite/kernels/shim/tflite_op_wrapper.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/lite/kernels/shim/op_kernel.h"
#include "tensorflow/lite/kernels/shim/tflite_op_shim.h"
namespace tflite {
namespace shim {
namespace op_wrapper {
namespace {
#ifndef EXPECT_OK
#define EXPECT_OK(x) EXPECT_TRUE(x.ok());
#endif
class VariantOpTest : public ::testing::Test {
public:
template <shim::Runtime Rt, typename... Ts>
class TmplOp {};
template <typename T, typename VARIANT_T>
struct isVariantMember;
template <typename T, typename... ALL_T>
struct isVariantMember<T, std::variant<ALL_T...>>
: public std::disjunction<std::is_same<T, ALL_T>...> {};
static constexpr char kAttrName[] = "AttrName";
};
TEST_F(VariantOpTest, TestVariantOpCreation_1) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 1);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, bool>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 2);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, bool>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_1x1) {
using VOp =
VariantOp<Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName>, int64_t>,
Attr<AttrName<kAttrName>, bool>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 1);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_1x1x1) {
using VOp =
VariantOp<Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName>, int64_t>,
Attr<AttrName<kAttrName>, bool>,
Attr<AttrName<kAttrName>, bool>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 1);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, bool>,
VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2x1) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, float>,
Attr<AttrName<kAttrName>, bool>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 2);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, float, bool>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_1x2) {
using VOp =
VariantOp<Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName>, int64_t>,
Attr<AttrName<kAttrName>, bool, float>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 2);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2x2) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t>,
Attr<AttrName<kAttrName>, bool, float>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 4);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, float>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_3x3) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t, int8_t>,
Attr<AttrName<kAttrName>, bool, float, char>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 9);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, char>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, float>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, char>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int8_t, bool>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int8_t, float>, VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int8_t, char>, VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2x2x2) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t>,
Attr<AttrName<kAttrName>, bool, float>,
Attr<AttrName<kAttrName>, char, int8_t>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 8);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, char>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, int8_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float, char>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, float, int8_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, char>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, int8_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, float, char>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, float, int8_t>,
VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_2x1x3x1) {
using VOp = VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t>,
Attr<AttrName<kAttrName>, bool>,
Attr<AttrName<kAttrName>, char, int8_t, float>,
Attr<AttrName<kAttrName>, uint16_t>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 6);
bool b;
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, char, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, int8_t, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int64_t, bool, float, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, char, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, int8_t, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
b = isVariantMember<TmplOp<Runtime::kTfLite, int32_t, bool, float, uint16_t>,
VOp>::value;
EXPECT_TRUE(b);
}
TEST_F(VariantOpTest, TestVariantOpCreation_4x4x6) {
using VOp =
VariantOp<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName>, int64_t, int32_t, int16_t, int8_t>,
Attr<AttrName<kAttrName>, int64_t, int32_t, int16_t, int8_t>,
Attr<AttrName<kAttrName>, int64_t, int32_t, int16_t, int8_t,
bool, float>>::type;
EXPECT_EQ(std::variant_size_v<VOp>, 96);
}
class SetVariantOpTest : public ::testing::Test {
public:
template <Runtime Rt, template <Runtime, typename...> typename Op,
typename... As>
class OpWrapperFriend : public OpWrapper<Rt, Op, As...> {
public:
using TmplOpType = typename VariantOp<Rt, Op, As...>::type;
TmplOpType* GetOp() { return this->op_.get(); }
};
template <Runtime Rt, typename... Ts>
class TmplOp : public OpKernelShim<TmplOp, Rt, Ts...> {
public:
using typename OpKernelShim<TmplOp, Rt, Ts...>::InitContext;
absl::Status Init(InitContext* ctx) { return absl::OkStatus(); }
};
class FakeInitContext : public TfLiteInitContext {
public:
explicit FakeInitContext(const flexbuffers::Map* m)
: TfLiteInitContext(nullptr, m) {}
};
template <typename T>
flexbuffers::Map CreateAttrMap() {
fbb_ = std::make_unique<flexbuffers::Builder>();
fbb_->Map([&]() {
fbb_->Int(kAttrName1, static_cast<int>(typeToTfLiteType<T>()));
});
fbb_->Finish();
return flexbuffers::GetRoot(fbb_->GetBuffer()).AsMap();
}
template <typename T, typename U>
flexbuffers::Map CreateAttrMap() {
fbb_ = std::make_unique<flexbuffers::Builder>();
fbb_->Map([&]() {
fbb_->Int(kAttrName1, static_cast<int>(typeToTfLiteType<T>()));
fbb_->Int(kAttrName2, static_cast<int>(typeToTfLiteType<U>()));
});
fbb_->Finish();
return flexbuffers::GetRoot(fbb_->GetBuffer()).AsMap();
}
template <typename T, typename U, typename V>
flexbuffers::Map CreateAttrMap() {
fbb_ = std::make_unique<flexbuffers::Builder>();
fbb_->Map([&]() {
fbb_->Int(kAttrName1, static_cast<int>(typeToTfLiteType<T>()));
fbb_->Int(kAttrName2, static_cast<int>(typeToTfLiteType<U>()));
fbb_->Int(kAttrName3, static_cast<int>(typeToTfLiteType<V>()));
});
fbb_->Finish();
return flexbuffers::GetRoot(fbb_->GetBuffer()).AsMap();
}
static constexpr char kAttrName1[] = "AttrName1";
static constexpr char kAttrName2[] = "AttrName2";
static constexpr char kAttrName3[] = "AttrName3";
private:
std::unique_ptr<flexbuffers::Builder> fbb_;
};
TEST_F(SetVariantOpTest, TestSetVariantOp_1) {
auto op_wrapper = OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool>>();
auto map = CreateAttrMap<bool>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_1x1) {
auto op_wrapper = OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool>,
Attr<AttrName<kAttrName2>, int32_t>>();
auto map = CreateAttrMap<bool, int32_t>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_1x1x1) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName1>, bool>,
Attr<AttrName<kAttrName2>, int32_t>, Attr<AttrName<kAttrName3>, float>>();
auto map = CreateAttrMap<bool, int32_t, float>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b =
std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2) {
auto op_wrapper =
OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int32_t>>();
auto map = CreateAttrMap<bool>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2x1) {
auto op_wrapper = OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int32_t>,
Attr<AttrName<kAttrName2>, float>>();
auto map = CreateAttrMap<int32_t, float>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int32_t, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_1x2) {
auto op_wrapper =
OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool>,
Attr<AttrName<kAttrName2>, float, int32_t>>();
auto map = CreateAttrMap<bool, float>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2x2) {
auto op_wrapper =
OpWrapperFriend<Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int64_t>,
Attr<AttrName<kAttrName2>, float, int32_t>>();
auto map = CreateAttrMap<bool, float>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
map = CreateAttrMap<bool, int32_t>();
context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
map = CreateAttrMap<int64_t, float>();
context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, float>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
map = CreateAttrMap<int64_t, int32_t>();
context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, bool, int32_t>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, float>>(
*op_wrapper.GetOp());
EXPECT_FALSE(b);
b = std::holds_alternative<TmplOp<Runtime::kTfLite, int64_t, int32_t>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_3x3) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int64_t, ::tensorflow::tstring>,
Attr<AttrName<kAttrName2>, float, int32_t, uint32_t>>();
auto map = CreateAttrMap<::tensorflow::tstring, int32_t>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b;
b = std::holds_alternative<
TmplOp<Runtime::kTfLite, ::tensorflow::tstring, int32_t>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2x2x2) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName1>, bool, int32_t>,
Attr<AttrName<kAttrName2>, float, uint32_t>,
Attr<AttrName<kAttrName3>, ::tensorflow::tstring, int64_t>>();
auto map = CreateAttrMap<int32_t, uint32_t, ::tensorflow::tstring>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<
TmplOp<Runtime::kTfLite, int32_t, uint32_t, ::tensorflow::tstring>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_2x1x3) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp, Attr<AttrName<kAttrName1>, bool, int32_t>,
Attr<AttrName<kAttrName2>, float>,
Attr<AttrName<kAttrName3>, ::tensorflow::tstring, int64_t, uint32_t>>();
auto map = CreateAttrMap<int32_t, float, ::tensorflow::tstring>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<
TmplOp<Runtime::kTfLite, int32_t, float, ::tensorflow::tstring>>(
*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
TEST_F(SetVariantOpTest, TestSetVariantOp_4x4x6) {
auto op_wrapper = OpWrapperFriend<
Runtime::kTfLite, TmplOp,
Attr<AttrName<kAttrName1>, bool, int32_t, uint32_t, int8_t>,
Attr<AttrName<kAttrName2>, float, int16_t, int32_t, uint32_t>,
Attr<AttrName<kAttrName3>, int8_t, uint8_t, int64_t, uint64_t, int32_t,
uint32_t>>();
auto map = CreateAttrMap<int32_t, float, uint32_t>();
auto context = FakeInitContext(&map);
EXPECT_OK(op_wrapper.Init(&context));
bool b = std::holds_alternative<
TmplOp<Runtime::kTfLite, int32_t, float, uint32_t>>(*op_wrapper.GetOp());
EXPECT_TRUE(b);
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tflite_op_wrapper.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/tflite_op_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
510f194a-8b26-442d-b84f-a1e12abb3685 | cpp | tensorflow/tensorflow | map_parallelization | tensorflow/core/grappler/optimizers/data/map_parallelization.cc | tensorflow/core/grappler/optimizers/data/map_parallelization_test.cc | #include "tensorflow/core/grappler/optimizers/data/map_parallelization.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/function_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kMapDataset[] = "MapDataset";
constexpr char kParallelMapDataset[] = "ParallelMapDatasetV2";
NodeDef MakeParallelMap(const string& name, MutableGraphView* graph) {
int index = graph_utils::FindGraphNodeWithName(name, *graph->graph());
DCHECK_NE(index, -1) << "Failed to find node " << name
<< " in the optimized graph.";
NodeDef parallel_map = graph->graph()->node(index);
graph_utils::SetUniqueGraphNodeName(kParallelMapDataset, graph->graph(),
¶llel_map);
parallel_map.set_op(kParallelMapDataset);
auto* num_parallel_calls = graph_utils::AddScalarConstNode(
static_cast<int64_t>(data::model::kAutotune), graph);
parallel_map.add_input(num_parallel_calls->name());
parallel_map.mutable_attr()->erase("force_synchronous");
AddNodeAttr("deterministic", "true", ¶llel_map);
return parallel_map;
}
}
Status MapParallelization::OptimizeAndCollectStats(Cluster* cluster,
const GrapplerItem& item,
GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization map_parallelization is not applied if "
"autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
absl::flat_hash_set<string> nodes_to_delete;
FunctionLibraryDefinition function_library(OpRegistry::Global(),
item.graph.library());
auto get_map_node = [](const NodeDef& node) -> const NodeDef* {
if (node.op() == kMapDataset) return &node;
return nullptr;
};
for (const NodeDef& node : item.graph.node()) {
const NodeDef* map_node = get_map_node(node);
if (!map_node) continue;
auto* function =
function_library.Find(map_node->attr().at("f").func().name());
if (function_utils::IsFunctionStateful(function_library, *function, true) ||
(map_node->attr().contains("force_synchronous") &&
map_node->attr().at("force_synchronous").b())) {
continue;
}
auto* parallel_map =
graph.AddNode(MakeParallelMap(map_node->name(), &graph));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(map_node->name(), parallel_map->name()));
nodes_to_delete.insert(map_node->name());
stats->num_changes++;
}
TF_RETURN_IF_ERROR(graph.DeleteNodes(nodes_to_delete));
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(MapParallelization, "map_parallelization");
}
} | #include "tensorflow/core/grappler/optimizers/data/map_parallelization.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
Status OptimizeWithMapParallelization(const GrapplerItem& item,
GraphDef* output, bool autotune) {
MapParallelization optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
using graph_tests_utils::MakeMapNode;
const char stateless_fun_name[] = "XTimesTwo";
const char stateful_fun_name[] = "RandomUniformFn";
class AutotuneSetting : public ::testing::TestWithParam<bool> {};
TEST_P(AutotuneSetting, MapParallelizationTest) {
const bool autotune = GetParam();
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range", stateless_fun_name),
NDef("Sink", "Identity", {"map"}, {})},
{
test::function::XTimesTwo(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, autotune));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output),
autotune);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("map", output), !autotune);
}
INSTANTIATE_TEST_SUITE_P(Test, AutotuneSetting, ::testing::Values(false, true));
class FromFunctionDef : public ::testing::TestWithParam<string> {};
TEST_P(FromFunctionDef, MapParallelizationTest) {
const string op = GetParam();
bool from_function_def = (op == "_Retval");
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map", "range", stateless_fun_name),
NDef("Sink", op, {"map"}, {})},
{
test::function::XTimesTwo(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, true));
EXPECT_EQ(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output),
!from_function_def);
EXPECT_EQ(graph_utils::ContainsGraphNodeWithName("map", output),
from_function_def);
}
INSTANTIATE_TEST_SUITE_P(Test, FromFunctionDef,
::testing::Values("Identity", "_Retval"));
TEST(ParallelizeAssert, MapParallelizationTest) {
using test::function::NDef;
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("filename", "Const", {}, {{"value", ""}, {"dtype", DT_STRING}}),
NDef("range", "RangeDataset", {"start", "stop", "step"}, {}),
MakeMapNode("map1", "range", stateful_fun_name),
MakeMapNode("map2", "map1", stateless_fun_name),
NDef("cache", "CacheDataset", {"map2", "filename"}, {}),
NDef("Sink", "Identity", {"cache"}, {})},
{
test::function::XTimesTwo(),
test::function::RandomUniform(),
});
item.fetch.push_back("Sink");
GraphDef output;
TF_ASSERT_OK(OptimizeWithMapParallelization(item, &output, true));
EXPECT_TRUE(graph_utils::ContainsNodeWithOp("ParallelMapDatasetV2", output));
EXPECT_TRUE(graph_utils::ContainsGraphNodeWithName("map1", output));
EXPECT_FALSE(graph_utils::ContainsGraphNodeWithName("map2", output));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_parallelization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/map_parallelization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3ebbc352-a229-49cf-9041-6f9176427a0c | cpp | google/cel-cpp | bool_type | common/types/bool_type.h | common/types/bool_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_BOOL_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_BOOL_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class BoolType final {
public:
static constexpr TypeKind kKind = TypeKind::kBool;
static constexpr absl::string_view kName = "bool";
BoolType() = default;
BoolType(const BoolType&) = default;
BoolType(BoolType&&) = default;
BoolType& operator=(const BoolType&) = default;
BoolType& operator=(BoolType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(BoolType&) noexcept {}
};
inline constexpr void swap(BoolType& lhs, BoolType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(BoolType, BoolType) { return true; }
inline constexpr bool operator!=(BoolType lhs, BoolType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, BoolType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const BoolType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(BoolType, Kind) {
EXPECT_EQ(BoolType().kind(), BoolType::kKind);
EXPECT_EQ(Type(BoolType()).kind(), BoolType::kKind);
}
TEST(BoolType, Name) {
EXPECT_EQ(BoolType().name(), BoolType::kName);
EXPECT_EQ(Type(BoolType()).name(), BoolType::kName);
}
TEST(BoolType, DebugString) {
{
std::ostringstream out;
out << BoolType();
EXPECT_EQ(out.str(), BoolType::kName);
}
{
std::ostringstream out;
out << Type(BoolType());
EXPECT_EQ(out.str(), BoolType::kName);
}
}
TEST(BoolType, Hash) {
EXPECT_EQ(absl::HashOf(BoolType()), absl::HashOf(BoolType()));
}
TEST(BoolType, Equal) {
EXPECT_EQ(BoolType(), BoolType());
EXPECT_EQ(Type(BoolType()), BoolType());
EXPECT_EQ(BoolType(), Type(BoolType()));
EXPECT_EQ(Type(BoolType()), Type(BoolType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/bool_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/bool_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
069a3ccd-321a-4f16-867b-1bd9d0c811d6 | cpp | tensorflow/tensorflow | rename_op | tensorflow/tools/graph_transforms/rename_op.cc | tensorflow/tools/graph_transforms/rename_op_test.cc | #include "tensorflow/core/common_runtime/constant_folding.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/graph/subgraph.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/fold_constants_lib.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RenameOp(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
if (!context.params.count("old_op_name") ||
(context.params.at("old_op_name").size() != 1) ||
!context.params.count("new_op_name") ||
(context.params.at("new_op_name").size() != 1)) {
return errors::InvalidArgument(
"rename_op expects exactly one 'old_op_name' and 'new_op_name' "
"argument, e.g. rename_op(old_op_name=Mul, new_op_name=Multiply)");
}
const string old_op_name = context.params.at("old_op_name")[0];
const string new_op_name = context.params.at("new_op_name")[0];
output_graph_def->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
*new_node = node;
if (node.op() == old_op_name) {
new_node->set_op(new_op_name);
}
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("rename_op", RenameOp);
}
} | #include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/tools/graph_transforms/transform_utils.h"
namespace tensorflow {
namespace graph_transforms {
Status RenameOp(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def);
class RenameOpTest : public ::testing::Test {
protected:
void TestRenameOp() {
GraphDef graph_def;
NodeDef* mul_node1 = graph_def.add_node();
mul_node1->set_name("mul_node1");
mul_node1->set_op("Mul");
mul_node1->add_input("add_node2");
mul_node1->add_input("add_node3");
NodeDef* add_node2 = graph_def.add_node();
add_node2->set_name("add_node2");
add_node2->set_op("Add");
add_node2->add_input("const_node1");
add_node2->add_input("const_node2");
NodeDef* add_node3 = graph_def.add_node();
add_node3->set_name("add_node3");
add_node3->set_op("Add");
add_node3->add_input("const_node1");
add_node3->add_input("const_node3");
NodeDef* const_node1 = graph_def.add_node();
const_node1->set_name("const_node1");
const_node1->set_op("Const");
NodeDef* const_node2 = graph_def.add_node();
const_node2->set_name("const_node2");
const_node2->set_op("Const");
NodeDef* const_node3 = graph_def.add_node();
const_node3->set_name("const_node3");
const_node3->set_op("Const");
NodeDef* add_node4 = graph_def.add_node();
add_node4->set_name("add_node4");
add_node4->set_op("Add");
add_node4->add_input("add_node2");
add_node4->add_input("add_node3");
GraphDef result;
TransformFuncContext context;
context.input_names = {};
context.output_names = {"mul_node1"};
context.params.insert(std::pair<string, std::vector<string>>(
{"old_op_name", {string("Mul")}}));
context.params.insert(std::pair<string, std::vector<string>>(
{"new_op_name", {string("Multiply")}}));
TF_ASSERT_OK(RenameOp(graph_def, context, &result));
std::map<string, const NodeDef*> node_lookup;
MapNamesToNodes(result, &node_lookup);
EXPECT_EQ(1, node_lookup.count("mul_node1"));
EXPECT_EQ("Multiply", node_lookup.at("mul_node1")->op());
EXPECT_EQ(1, node_lookup.count("add_node2"));
EXPECT_EQ("Add", node_lookup.at("add_node2")->op());
EXPECT_EQ(1, node_lookup.count("add_node3"));
EXPECT_EQ("Add", node_lookup.at("add_node3")->op());
EXPECT_EQ(1, node_lookup.count("add_node4"));
EXPECT_EQ("Add", node_lookup.at("add_node4")->op());
EXPECT_EQ(1, node_lookup.count("const_node1"));
EXPECT_EQ("Const", node_lookup.at("const_node1")->op());
EXPECT_EQ(1, node_lookup.count("const_node2"));
EXPECT_EQ("Const", node_lookup.at("const_node2")->op());
EXPECT_EQ(1, node_lookup.count("const_node3"));
EXPECT_EQ("Const", node_lookup.at("const_node3")->op());
}
};
TEST_F(RenameOpTest, TestRenameOp) { TestRenameOp(); }
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/tools/graph_transforms/rename_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
887c6150-e4d7-406b-8f4d-849ff0d7cb86 | cpp | tensorflow/tensorflow | nnapi_delegate | tensorflow/lite/delegates/nnapi/nnapi_delegate.cc | tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include <algorithm>
#include <cinttypes>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <functional>
#include <initializer_list>
#include <iostream>
#include <iterator>
#include <limits>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "Eigen/Core"
#include "tensorflow/compiler/mlir/lite/allocation.h"
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_plugin.h"
#include "tensorflow/lite/delegates/serialization.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/sl/public/NeuralNetworksSupportLibraryImpl.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif
#if defined __ANDROID__ || defined __unix__
#define TFLITE_NNAPI_ALLOW_MMAP_SHARING
#include <sys/mman.h>
#include <unistd.h>
#endif
#include "fp16.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/delegates/nnapi/quant_lstm_sup.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/utils/sparsity_format_converter.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include "tensorflow/lite/nnapi/nnapi_util.h"
#include "tensorflow/lite/util.h"
#ifdef NNAPI_VERBOSE_VALIDATION
#include "tensorflow/lite/schema/schema_generated.h"
#endif
namespace tflite {
namespace {
static const char kNnapiId[] = "nnapi_";
constexpr uint64_t kNoMemoryTimestamp = 0;
std::string NnApiBackendId(
const StatefulNnApiDelegate::Options& delegate_options) {
std::string delegate_id = kNnapiId;
if (delegate_options.accelerator_name) {
delegate_id += delegate_options.accelerator_name;
}
return delegate_id;
}
std::string NnApiErrorDescription(int error_code) {
switch (error_code) {
case ANEURALNETWORKS_NO_ERROR:
return "ANEURALNETWORKS_NO_ERROR";
case ANEURALNETWORKS_OUT_OF_MEMORY:
return "ANEURALNETWORKS_OUT_OF_MEMORY";
case ANEURALNETWORKS_INCOMPLETE:
return "ANEURALNETWORKS_INCOMPLETE";
case ANEURALNETWORKS_UNEXPECTED_NULL:
return "ANEURALNETWORKS_UNEXPECTED_NULL";
case ANEURALNETWORKS_BAD_DATA:
return "ANEURALNETWORKS_BAD_DATA";
case ANEURALNETWORKS_OP_FAILED:
return "ANEURALNETWORKS_OP_FAILED";
case ANEURALNETWORKS_BAD_STATE:
return "ANEURALNETWORKS_BAD_STATE";
case ANEURALNETWORKS_UNMAPPABLE:
return "ANEURALNETWORKS_UNMAPPABLE";
case ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE:
return "ANEURALNETWORKS_OUTPUT_INSUFFICIENT_SIZE";
case ANEURALNETWORKS_UNAVAILABLE_DEVICE:
return "ANEURALNETWORKS_UNAVAILABLE_DEVICE";
case ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT:
return "ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT";
case ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT:
return "ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT";
case ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT:
return "ANEURALNETWORKS_RESOURCE_EXHAUSTED_TRANSIENT";
case ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT:
return "ANEURALNETWORKS_RESOURCE_EXHAUSTED_PERSISTENT";
case ANEURALNETWORKS_DEAD_OBJECT:
return "ANEURALNETWORKS_DEAD_OBJECT";
default:
return "Unknown NNAPI error code: " + std::to_string(error_code);
}
}
#define RETURN_TFLITE_ERROR_IF_NN_ERROR(context, code, call_desc, p_errno) \
do { \
const auto _code = (code); \
const auto _call_desc = (call_desc); \
if (_code != ANEURALNETWORKS_NO_ERROR) { \
const auto error_desc = NnApiErrorDescription(_code); \
TF_LITE_KERNEL_LOG(context, \
"NN API returned error %s at line %d while %s.\n", \
error_desc.c_str(), __LINE__, _call_desc); \
*p_errno = _code; \
return kTfLiteError; \
} \
} while (0)
#define RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(context, code, call_desc, \
p_tensor, p_errno) \
do { \
const auto _code = (code); \
const auto _call_desc = (call_desc); \
if (_code != ANEURALNETWORKS_NO_ERROR) { \
const auto error_desc = NnApiErrorDescription(_code); \
TF_LITE_KERNEL_LOG(context, \
"NN API returned error %s at line %d while %s " \
"for tensor '%s'.\n", \
error_desc.c_str(), __LINE__, _call_desc, \
(p_tensor)->name ? (p_tensor)->name : "no-name"); \
*p_errno = _code; \
return kTfLiteError; \
} \
} while (0)
bool IsFloat(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
return true;
default:
return false;
}
}
bool IsFloatOrUInt8(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
return true;
default:
return false;
}
}
bool IsQuantized(TfLiteType type) {
switch (type) {
case kTfLiteUInt8:
case kTfLiteInt8:
return true;
default:
return false;
}
}
bool IsInt32(TfLiteType type) {
switch (type) {
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsFloatOrQuantized(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
return true;
default:
return false;
}
}
bool IsFloatOrInt32(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsFloatQuantizedOrInt32(TfLiteType type) {
switch (type) {
case kTfLiteFloat32:
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteInt32:
return true;
default:
return false;
}
}
bool IsScalarInputSupported(int builtin_code) {
switch (builtin_code) {
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinMul:
case kTfLiteBuiltinSub:
case kTfLiteBuiltinDiv:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinNotEqual:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinPow:
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinPrelu:
case kTfLiteBuiltinLeakyRelu:
return true;
default:
return false;
}
}
bool NeedInt8Conversion(const TfLiteContext* context, int builtin_code,
const TfLiteNode* node) {
const int input_id = node->inputs->data[0];
const TfLiteType input_type = context->tensors[input_id].type;
switch (builtin_code) {
case kTfLiteBuiltinConv2d:
case kTfLiteBuiltinDepthwiseConv2d:
case kTfLiteBuiltinFullyConnected: {
if (input_type == kTfLiteInt8) {
const int weights_id = node->inputs->data[1];
const auto& weights_tensor = context->tensors[weights_id];
if ((weights_tensor.type == kTfLiteInt8 ||
weights_tensor.type == kTfLiteUInt8) &&
weights_tensor.quantization.type == kTfLiteAffineQuantization) {
return true;
}
}
return false;
}
case kTfLiteBuiltinTransposeConv: {
const int input_id = 2;
const TfLiteType input_type = context->tensors[input_id].type;
if (input_type == kTfLiteInt8) {
return true;
}
return false;
}
case kTfLiteBuiltinSelect: {
const auto value_type = context->tensors[node->inputs->data[1]].type;
return value_type == kTfLiteInt8;
}
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinArgMax:
case kTfLiteBuiltinArgMin:
case kTfLiteBuiltinAveragePool2d:
case kTfLiteBuiltinBatchToSpaceNd:
case kTfLiteBuiltinConcatenation:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinExpandDims:
case kTfLiteBuiltinGather:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinHardSwish:
case kTfLiteBuiltinL2Normalization:
case kTfLiteBuiltinLeakyRelu:
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinLogistic:
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMaxPool2d:
case kTfLiteBuiltinMean:
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinMul:
case kTfLiteBuiltinNotEqual:
case kTfLiteBuiltinPad:
case kTfLiteBuiltinPadv2:
case kTfLiteBuiltinPrelu:
case kTfLiteBuiltinReduceMax:
case kTfLiteBuiltinReduceMin:
case kTfLiteBuiltinRelu:
case kTfLiteBuiltinReluN1To1:
case kTfLiteBuiltinRelu6:
case kTfLiteBuiltinResizeBilinear:
case kTfLiteBuiltinResizeNearestNeighbor:
case kTfLiteBuiltinReshape:
case kTfLiteBuiltinSlice:
case kTfLiteBuiltinSoftmax:
case kTfLiteBuiltinSpaceToBatchNd:
case kTfLiteBuiltinSpaceToDepth:
case kTfLiteBuiltinDepthToSpace:
case kTfLiteBuiltinStridedSlice:
case kTfLiteBuiltinSub:
case kTfLiteBuiltinTanh:
case kTfLiteBuiltinTile:
case kTfLiteBuiltinTopkV2:
case kTfLiteBuiltinTranspose: {
return input_type == kTfLiteInt8;
}
default:
return false;
}
}
constexpr int kLstmFullKernelInputSize = 24;
constexpr int kLstmFullKernelNoOptionalParamsInputSize = 20;
constexpr int kLstmBasicKernelInputSize = 5;
inline bool isLstmBasicKernel(const TfLiteNode* node) {
return node->inputs->size == kLstmBasicKernelInputSize;
}
inline bool isLstmFullKernel(const TfLiteNode* node) {
return node->inputs->size == kLstmFullKernelInputSize ||
node->inputs->size == kLstmFullKernelNoOptionalParamsInputSize;
}
bool IsMeanWithDifferentInputOutputQuantization(const TfLiteContext* context,
const TfLiteNode* node) {
const auto& input = context->tensors[node->inputs->data[0]];
const auto& output = context->tensors[node->outputs->data[0]];
return input.params.scale != output.params.scale ||
input.params.zero_point != output.params.zero_point;
}
bool IsBroadcastBatchMatMul(const TfLiteContext* context,
const TfLiteNode* node) {
const auto& input0 = context->tensors[node->inputs->data[0]];
const auto& input1 = context->tensors[node->inputs->data[1]];
if (input0.dims->size != input1.dims->size) {
return true;
}
for (int i = 0; i < input0.dims->size - 2; i++) {
if (input0.dims->data[i] != input1.dims->data[i]) {
return true;
}
}
return false;
}
bool IsHybridOperator(const TfLiteContext* context, int builtin_code,
const TfLiteNode* node) {
switch (builtin_code) {
case kTfLiteBuiltinConv2d:
case kTfLiteBuiltinFullyConnected: {
const int input_id = node->inputs->data[0];
const int filter_id = node->inputs->data[1];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType filter_type = context->tensors[filter_id].type;
return IsFloat(input_type) && IsQuantized(filter_type);
}
case kTfLiteBuiltinLstm: {
const int input_id = node->inputs->data[0];
const int weights_id = node->inputs->data[2];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType weights_type = context->tensors[weights_id].type;
return isLstmFullKernel(node) && IsFloat(input_type) &&
IsQuantized(weights_type);
}
case kTfLiteBuiltinUnidirectionalSequenceLstm: {
const int input_id = node->inputs->data[0];
const int weights_id = node->inputs->data[2];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType weights_type = context->tensors[weights_id].type;
return IsFloat(input_type) && IsQuantized(weights_type);
}
case kTfLiteBuiltinBidirectionalSequenceLstm: {
const int input_id = node->inputs->data[0];
const int weights_id = node->inputs->data[2];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType weights_type = context->tensors[weights_id].type;
return IsFloat(input_type) && IsQuantized(weights_type);
}
case kTfLiteBuiltinUnidirectionalSequenceRnn: {
const int input_id = node->inputs->data[0];
const int weights_id = node->inputs->data[1];
const TfLiteType input_type = context->tensors[input_id].type;
const TfLiteType weights_type = context->tensors[weights_id].type;
return IsFloat(input_type) && IsQuantized(weights_type);
}
default:
return false;
}
}
bool IsDequantizeConstFloat16(TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration) {
return registration->builtin_code == kTfLiteBuiltinDequantize &&
context->tensors[node->inputs->data[0]].type ==
TfLiteType::kTfLiteFloat16 &&
IsConstantTensor(&context->tensors[node->inputs->data[0]]);
}
bool IsDequantizeNonConstFloat16(TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration) {
return registration->builtin_code == kTfLiteBuiltinDequantize &&
context->tensors[node->inputs->data[0]].type ==
TfLiteType::kTfLiteFloat16 &&
!IsConstantTensor(&context->tensors[node->inputs->data[0]]);
}
bool IsDensifyConstTensor(TfLiteContext* context, const TfLiteNode* node,
const TfLiteRegistration* registration) {
return registration->builtin_code == kTfLiteBuiltinDensify &&
IsConstantTensor(&context->tensors[node->inputs->data[0]]);
}
ANeuralNetworksOperandType ConvertTensorTypeToNNType(
const TfLiteTensor* tensor, TfLiteType ann_type_equivalent,
bool use_int8_asymm_signed) {
int32_t nn_type = 0;
float scale = 0.0f;
int32_t zero_point = 0;
switch (tensor->type) {
case kTfLiteFloat32:
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteUInt8:
nn_type = ann_type_equivalent == kTfLiteInt32
? ANEURALNETWORKS_TENSOR_INT32
: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
scale = tensor->params.scale;
zero_point = tensor->params.zero_point;
if (scale == 0) {
scale = 1;
}
break;
case kTfLiteInt8:
scale = tensor->params.scale;
zero_point = tensor->params.zero_point;
if (use_int8_asymm_signed) {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
} else if (ann_type_equivalent == kTfLiteUInt8) {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
zero_point += 128;
} else if (ann_type_equivalent == kTfLiteInt32) {
nn_type = ANEURALNETWORKS_TENSOR_INT32;
zero_point += 128;
} else {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM;
}
if (scale == 0) {
scale = 1;
}
break;
case kTfLiteInt32:
nn_type = ANEURALNETWORKS_TENSOR_INT32;
scale = tensor->params.scale;
zero_point = tensor->params.zero_point;
break;
case kTfLiteBool:
nn_type = ANEURALNETWORKS_TENSOR_BOOL8;
break;
case kTfLiteInt16:
nn_type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM;
scale = tensor->params.scale;
zero_point = tensor->params.zero_point;
break;
default:
break;
}
uint32_t tensor_rank = static_cast<uint32_t>(tensor->dims->size);
uint32_t* tensor_dims = reinterpret_cast<uint32_t*>(tensor->dims->data);
static uint32_t scalar_rank = 1;
if (tensor_rank == 0) {
tensor_rank = scalar_rank;
tensor_dims = &scalar_rank;
}
ANeuralNetworksOperandType nn_operand_type{
.type = nn_type,
.dimensionCount = tensor_rank,
.dimensions = tensor_dims,
.scale = scale,
.zeroPoint = zero_point,
};
return nn_operand_type;
}
constexpr size_t kDefaultByteAlignmentForNNAPI = 64;
static size_t GetNumPaddingBytes(size_t byte_size) {
size_t num_padding_bytes = 0;
if (byte_size % kDefaultByteAlignmentForNNAPI) {
num_padding_bytes = kDefaultByteAlignmentForNNAPI -
(byte_size % kDefaultByteAlignmentForNNAPI);
}
return num_padding_bytes;
}
static size_t GetNNTensorSize(size_t tensor_size, bool allow_padding) {
size_t padding_bytes = GetNumPaddingBytes(tensor_size);
size_t nn_tensor_size = tensor_size;
if (allow_padding) {
nn_tensor_size += padding_bytes;
}
return nn_tensor_size;
}
TfLiteStatus GetDeviceHandle(const NnApi* nnapi, TfLiteContext* context,
const char* device_name_ptr,
ANeuralNetworksDevice** result, int* nnapi_errno) {
if (!device_name_ptr) return kTfLiteError;
*result = nullptr;
std::string device_name(device_name_ptr);
uint32_t num_devices = 0;
nnapi->ANeuralNetworks_getDeviceCount(&num_devices);
for (uint32_t i = 0; i < num_devices; i++) {
ANeuralNetworksDevice* device = nullptr;
const char* buffer = nullptr;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworks_getDevice(i, &device),
"Searching for target device", nnapi_errno);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworksDevice_getName(device, &buffer),
"Searching for target device", nnapi_errno);
if (device_name == buffer) {
*result = device;
return kTfLiteOk;
}
}
TF_LITE_KERNEL_LOG(context,
"Could not find the specified NNAPI accelerator: %s. "
"Must be one of: {%s}.",
device_name_ptr,
nnapi::GetStringDeviceNamesList(nnapi).c_str());
return kTfLiteError;
}
uint64_t GetHash(const TfLiteIntArray* int_array, uint64_t combine_with = 0) {
constexpr auto kHashConst = 0x9e3779b97f4a7800ULL;
uint64_t result = combine_with;
for (auto i : TfLiteIntArrayView(int_array)) {
result = result ^ (i + kHashConst + (result << 10) + (result >> 4));
}
return result;
}
bool HasZeroes(TfLiteIntArrayView array) {
for (auto value : array) {
if (value == 0) {
return true;
}
}
return false;
}
int ComputeSplitVUnknownSplitSize(const TfLiteContext* context,
const TfLiteNode* node) {
const auto& input = context->tensors[node->inputs->data[0]];
const auto& size_splits_tensor = context->tensors[node->inputs->data[1]];
const auto& axis_tensor = context->tensors[node->inputs->data[2]];
const auto* size_splits = size_splits_tensor.data.i32;
int num_splits = size_splits_tensor.dims->data[0];
bool has_unknown_split_size = false;
int sum_of_known_split_sizes = 0;
for (int i = 0; i < num_splits; i++) {
if (size_splits[i] == -1) {
has_unknown_split_size = true;
} else {
sum_of_known_split_sizes += size_splits[i];
}
}
int axis = axis_tensor.data.i32[0];
axis = axis < 0 ? axis + input.dims->size : axis;
int total_size = input.dims->data[axis];
return has_unknown_split_size ? total_size - sum_of_known_split_sizes : -1;
}
enum {
NN_TENSOR_FLAG_SCALAR_AS_TENSOR = 1U << 0,
NN_TENSOR_FLAG_INT8_CONVERSION = 1U << 1,
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED = 1U << 2,
NN_TENSOR_FLAG_FORCE_PER_CHANNEL = 1U << 3,
NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION = 1U << 4,
};
TfLiteStatus GetTargetFeatureLevel(
TfLiteContext* context, const NnApi* nnapi,
const std::vector<ANeuralNetworksDevice*>& device_handles,
int* target_feature_level, int* nnapi_errno) {
*target_feature_level = nnapi->nnapi_runtime_feature_level;
int64_t devices_feature_level = -1;
for (const auto* device_handle : device_handles) {
int64_t curr_device_feature_level;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi->ANeuralNetworksDevice_getFeatureLevel(
device_handle, &curr_device_feature_level),
"Searching for target device", nnapi_errno);
devices_feature_level =
std::max(curr_device_feature_level, devices_feature_level);
}
if ((devices_feature_level > 0) &&
(devices_feature_level < nnapi->nnapi_runtime_feature_level)) {
TFLITE_LOG(TFLITE_LOG_INFO,
"Changing NNAPI Feature Level %lld to "
"supported by target devices: %lld",
nnapi->android_sdk_version, devices_feature_level);
*target_feature_level = devices_feature_level;
}
return kTfLiteOk;
}
bool ShouldUseTargetDevices(StatefulNnApiDelegate::Options delegate_options,
const NnApi* nnapi,
bool exclude_nnapi_reference = false) {
const char* device_name_ptr = delegate_options.accelerator_name;
std::string nnapi_cpu("nnapi-reference");
bool has_selected_accelerator = device_name_ptr != nullptr;
if (exclude_nnapi_reference && has_selected_accelerator) {
if (nnapi_cpu == device_name_ptr) return false;
}
return (delegate_options.disallow_nnapi_cpu &&
nnapi->android_sdk_version >=
delegate::nnapi::kMinSdkVersionForNNAPI12) ||
has_selected_accelerator;
}
TfLiteStatus GetTargetDevices(TfLiteContext* context, TfLiteDelegate* delegate,
const NnApi* nnapi, int* nnapi_errno,
std::vector<ANeuralNetworksDevice*>* result) {
if (nnapi->android_sdk_version < delegate::nnapi::kMinSdkVersionForNNAPI12) {
return kTfLiteError;
}
const auto delegate_options = StatefulNnApiDelegate::GetOptions(delegate);
const char* device_name_ptr = delegate_options.accelerator_name;
if (device_name_ptr != nullptr) {
ANeuralNetworksDevice* nnapi_device = nullptr;
TF_LITE_ENSURE_STATUS(GetDeviceHandle(nnapi, context, device_name_ptr,
&nnapi_device, nnapi_errno));
result->push_back(nnapi_device);
} else if (delegate_options.disallow_nnapi_cpu) {
std::string nnapi_cpu("nnapi-reference");
uint32_t num_devices = 0;
nnapi->ANeuralNetworks_getDeviceCount(&num_devices);
for (uint32_t i = 0; i < num_devices; i++) {
ANeuralNetworksDevice* device = nullptr;
const char* buffer = nullptr;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworks_getDevice(i, &device),
"Getting list of available devices", nnapi_errno);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworksDevice_getName(device, &buffer),
"Getting list of available devices", nnapi_errno);
if (nnapi_cpu != buffer) {
result->push_back(device);
}
}
}
return kTfLiteOk;
}
class NnapiMappingContext {
public:
int next_ann_tensor_index_ = 0;
std::vector<int> lite_tensor_to_ann_tensor_;
std::vector<int> index_to_type_conversion_;
std::vector<int> nnapi_to_tflite_op_mapping_;
};
}
namespace delegate {
namespace nnapi {
#ifdef TFLITE_NNAPI_ALLOW_MMAP_SHARING
NNMemory::NNMemory(const NnApi* nnapi, const char* name, size_t size) {
if (name && size > 0) {
nnapi_ = nnapi;
byte_size_ = size;
#ifdef __ANDROID__
fd_ = nnapi_->ASharedMemory_create(name, size);
#else
char shm_name_buffer[L_tmpnam];
if (tmpnam(shm_name_buffer) == nullptr) {
shm_name_buffer[0] = '\0';
}
shm_region_name_ = std::string(name) + std::string(shm_name_buffer);
std::replace(shm_region_name_.begin(), shm_region_name_.end(), '/', '-');
fd_ = nnapi_->ASharedMemory_create(shm_region_name_.c_str(), size);
#endif
data_ptr_ = reinterpret_cast<uint8_t*>(
mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd_, 0));
nnapi_->ANeuralNetworksMemory_createFromFd(size, PROT_READ | PROT_WRITE,
fd_, 0, &nn_memory_handle_);
}
}
#else
NNMemory::NNMemory(const NnApi* , const char* ,
size_t )
: nnapi_(nullptr) {}
#endif
NNMemory::~NNMemory() {
#ifdef TFLITE_NNAPI_ALLOW_MMAP_SHARING
if (data_ptr_) {
munmap(data_ptr_, byte_size_);
}
if (nn_memory_handle_) {
nnapi_->ANeuralNetworksMemory_free(nn_memory_handle_);
}
#ifdef __ANDROID__
if (fd_ >= 0) close(fd_);
#else
if (!shm_region_name_.empty()) shm_unlink(shm_region_name_.c_str());
#endif
#endif
}
class DequantizeMapping {
public:
int DequantizedAnnIndex(int ann_index, TfLiteType type) const {
for (const auto& element : mapping_) {
if (ann_index == std::get<0>(element) && type == std::get<1>(element)) {
return std::get<2>(element);
}
}
return -1;
}
void Add(int ann_index, TfLiteType type, int dequantized_ann_index) {
mapping_.emplace_back(ann_index, type, dequantized_ann_index);
}
private:
std::vector<std::tuple<int, TfLiteType, int>> mapping_;
};
class NNAPIOpBuilder {
public:
NNAPIOpBuilder(const NnApi* nnapi, TfLiteContext* context,
NnapiMappingUtilCInterface* mapping_util,
DequantizeMapping* dequantize_mapping,
std::map<const MMAPAllocation*, ANeuralNetworksMemory*>*
allocation_mapping,
ANeuralNetworksModel* nn_model, int* nnapi_errno,
bool allow_dynamic_dimensions)
: nnapi_(nnapi),
context_(context),
mapping_util_(mapping_util),
dequantize_mapping_(dequantize_mapping),
allocation_memory_mapping_(allocation_mapping),
nn_model_(nn_model),
nnapi_errno_(nnapi_errno),
allow_dynamic_dimensions_(allow_dynamic_dimensions) {}
TfLiteStatus AddScalarBoolOperand(bool value) {
return AddScalarOperand<bool>(value, ANEURALNETWORKS_BOOL);
}
TfLiteStatus AddScalarInt32Operand(int32_t value) {
return AddScalarOperand<int32_t>(value, ANEURALNETWORKS_INT32);
}
TfLiteStatus AddScalarFloat32Operand(float value) {
return AddScalarOperand<float>(value, ANEURALNETWORKS_FLOAT32);
}
TfLiteStatus AddVectorInt32Operand(const int32_t* values,
uint32_t num_values) {
return AddVectorOperand<int32_t>(values, num_values,
ANEURALNETWORKS_TENSOR_INT32,
0.f, 0);
}
TfLiteStatus AddVectorInt32Operand(const int32_t* values, uint32_t num_values,
float scale, int32_t zero_point) {
return AddVectorOperand<int32_t>(
values, num_values, ANEURALNETWORKS_TENSOR_INT32, scale, zero_point);
}
TfLiteStatus AddVectorInt16Operand(const int16_t* values,
uint32_t num_values) {
return AddVectorOperand<int16_t>(values, num_values,
ANEURALNETWORKS_TENSOR_QUANT16_SYMM,
1.f, 0);
}
TfLiteStatus AddVectorInt8Operand(const int8_t* values, uint32_t num_values) {
return AddVectorOperand<int8_t>(values, num_values,
ANEURALNETWORKS_TENSOR_QUANT8_SYMM,
1.f, 0);
}
TfLiteStatus AddVectorFloat32Operand(const float* values,
uint32_t num_values) {
return AddVectorOperand<float>(values, num_values,
ANEURALNETWORKS_TENSOR_FLOAT32);
}
TfLiteStatus AddPoolingParams(void* data) {
auto builtin = reinterpret_cast<TfLitePoolParams*>(data);
AddScalarInt32Operand(builtin->padding);
AddScalarInt32Operand(builtin->stride_width);
AddScalarInt32Operand(builtin->stride_height);
AddScalarInt32Operand(builtin->filter_width);
AddScalarInt32Operand(builtin->filter_height);
AddScalarInt32Operand(builtin->activation);
return kTfLiteOk;
}
TfLiteStatus AddTensorInput(int tensor_index, bool hybrid_op,
int tensor_flags = 0) {
return AddTensor(tensor_index, hybrid_op, &augmented_inputs_, tensor_flags);
}
TfLiteStatus AddTensorOutput(int tensor_index, int tensor_flags = 0) {
return AddTensor(tensor_index, false, &augmented_outputs_,
tensor_flags);
}
TfLiteStatus AddAdditionalFloat32OutputTensor(uint32_t dimension_count) {
std::vector<uint32_t> dims(dimension_count, 0);
return AddFloat32OutputTensor(dimension_count, dims.data(), nullptr);
}
TfLiteStatus AddStateFloat32Tensor(int tensor_index,
int* ann_tensor_index_out) {
TfLiteTensor* tensor = &context_->tensors[tensor_index];
return AddFloat32OutputTensor(
tensor->dims->size, reinterpret_cast<uint32_t*>(tensor->dims->data),
ann_tensor_index_out);
}
TfLiteStatus AddStateInt16Tensor(int tensor_index,
int* ann_tensor_index_out) {
TfLiteTensor* tensor = &context_->tensors[tensor_index];
return AddAdditionalOutputTensor(
tensor->dims->size, reinterpret_cast<uint32_t*>(tensor->dims->data),
ANEURALNETWORKS_TENSOR_QUANT16_SYMM, tensor->params.scale,
tensor->params.zero_point, ann_tensor_index_out);
}
TfLiteStatus AddStateInt8AsymTensor(int tensor_index,
int* ann_tensor_index_out) {
TfLiteTensor* tensor = &context_->tensors[tensor_index];
return AddAdditionalOutputTensor(
tensor->dims->size, reinterpret_cast<uint32_t*>(tensor->dims->data),
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, tensor->params.scale,
tensor->params.zero_point, ann_tensor_index_out);
}
TfLiteStatus AddSingleValueConstantTensor(float value, bool is_quantized) {
if (!is_quantized) {
return AddVectorFloat32Operand(&value, 1);
} else {
const uint8_t quant8_value = 64;
return AddVectorOperand<uint8_t>(&quant8_value, 1,
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
value / quant8_value, 0);
}
}
TfLiteStatus CalculateQuantizationParams(float min, float max, float* scale,
int* zero_point) {
if (max < min) return kTfLiteError;
*scale = (max - min) / 255.f;
if (min > 0.f) {
*zero_point = 0;
} else if (max < 0.f) {
*zero_point = 255;
} else {
*zero_point = (0.f - min) / (*scale);
}
return kTfLiteOk;
}
TfLiteStatus TransformHardSwishIntoSupportedOps(int lite_input_index,
int lite_output_index,
bool need_int8_conversion,
int lite_node_index) {
const TfLiteTensor& tensor = context_->tensors[lite_input_index];
float input_scale = tensor.params.scale;
int input_zero_point = tensor.params.zero_point;
float input_min = 0.f;
float input_max = 0.f;
int tensor_flags = 0;
if (need_int8_conversion) {
tensor_flags = tensor_flags | NN_TENSOR_FLAG_INT8_CONVERSION;
input_zero_point += 128;
}
bool is_quantized = false;
int nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
if (tensor.type == kTfLiteInt8 || tensor.type == kTfLiteUInt8) {
is_quantized = true;
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
input_min = (0 - input_zero_point) * input_scale;
input_max = (255 - input_zero_point) * input_scale;
}
float s1_output_min = 0.f;
float s1_output_max = 0.f;
int s1_out_ann_index = 0;
{
float s1_output_scale = 0.f;
int s1_output_zero_point = 0;
if (is_quantized) {
s1_output_min = input_min / 3.f < -1.f ? -1.f : input_min / 3.f;
s1_output_max = input_max / 3.f > 1.f ? 1.f : input_max / 3.f;
CalculateQuantizationParams(s1_output_min, s1_output_max,
&s1_output_scale, &s1_output_zero_point);
}
TF_LITE_ENSURE_OK(context_,
AddTensorInput(lite_input_index, false, tensor_flags));
const float value3f = 1.f / 3.f;
TF_LITE_ENSURE_OK(context_,
AddSingleValueConstantTensor(value3f, is_quantized));
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_RELU1));
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
tensor.dims->size, reinterpret_cast<uint32_t*>(tensor.dims->data),
nn_type, s1_output_scale, s1_output_zero_point,
&s1_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_MUL, lite_node_index));
}
float s2_output_min = input_min / 2.f;
float s2_output_max = input_max / 2.f;
int s2_out_ann_index = 0;
{
float s2_output_scale = input_scale / 2.0f;
int s2_output_zero_point = input_zero_point;
TF_LITE_ENSURE_OK(context_,
AddTensorInput(lite_input_index, false, tensor_flags));
const float value2f = 0.5f;
TF_LITE_ENSURE_OK(context_,
AddSingleValueConstantTensor(value2f, is_quantized));
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
tensor.dims->size, reinterpret_cast<uint32_t*>(tensor.dims->data),
nn_type, s2_output_scale, s2_output_zero_point,
&s2_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_MUL, lite_node_index));
}
int s3_out_ann_index = 0;
{
augmented_inputs_.push_back(s1_out_ann_index);
augmented_inputs_.push_back(s2_out_ann_index);
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
float s3_output_scale = 0.f;
int s3_output_zero_point = 0;
if (is_quantized) {
float s3_output_min = 0.f;
float s3_output_max =
s1_output_max * s2_output_max > s1_output_min * s2_output_min
? s1_output_max * s2_output_max
: s1_output_min * s2_output_min;
CalculateQuantizationParams(s3_output_min, s3_output_max,
&s3_output_scale, &s3_output_zero_point);
}
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
tensor.dims->size, reinterpret_cast<uint32_t*>(tensor.dims->data),
nn_type, s3_output_scale, s3_output_zero_point,
&s3_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_MUL, lite_node_index));
}
{
augmented_inputs_.push_back(s2_out_ann_index);
augmented_inputs_.push_back(s3_out_ann_index);
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(context_,
AddTensorOutput(lite_output_index, tensor_flags));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_ADD, lite_node_index));
}
return kTfLiteOk;
}
TfLiteStatus AddOperationToModel(ANeuralNetworksOperationType type,
uint32_t input_count, const uint32_t* inputs,
uint32_t output_count,
const uint32_t* outputs,
int lite_node_index) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperation(
nn_model_, type, input_count, inputs, output_count, outputs),
"adding operation", nnapi_errno_);
mapping_util_->AddNnapiToTfliteOpMapping(mapping_util_, lite_node_index);
return kTfLiteOk;
}
TfLiteStatus AddDequantize(int nn_input_index, int lite_tensor_index,
TfLiteType dequantized_type, int lite_node_index) {
const int ann_index =
mapping_util_->TfLiteIndexToNnIndex(mapping_util_, lite_tensor_index);
int dequantized_ann_index =
dequantize_mapping_->DequantizedAnnIndex(ann_index, dequantized_type);
if (dequantized_ann_index == -1) {
const TfLiteTensor& tensor = context_->tensors[lite_tensor_index];
ANeuralNetworksOperandType operand_type{
ANEURALNETWORKS_TENSOR_FLOAT32,
static_cast<uint32_t>(tensor.dims->size),
reinterpret_cast<uint32_t*>(tensor.dims->data), 0.f, 0};
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
dequantized_ann_index =
mapping_util_->AddNewNonTensorOperand(mapping_util_);
const uint32_t dequantize_input[1] = {static_cast<uint32_t>(ann_index)};
const uint32_t dequantize_output[1] = {
static_cast<uint32_t>(dequantized_ann_index)};
TF_LITE_ENSURE_OK(
context_, AddOperationToModel(ANEURALNETWORKS_DEQUANTIZE,
1, dequantize_input,
1, dequantize_output,
lite_node_index));
dequantize_mapping_->Add(ann_index, dequantized_type,
dequantized_ann_index);
}
augmented_inputs_[nn_input_index] = dequantized_ann_index;
return kTfLiteOk;
}
TfLiteStatus AppendReshape(int nn_input_index, int lite_out_tensor_index,
int lite_node_index) {
augmented_inputs_.push_back(nn_input_index);
auto& output_tensor = context_->tensors[lite_out_tensor_index];
TF_LITE_ENSURE_STATUS(
AddVectorInt32Operand(output_tensor.dims->data,
static_cast<uint32_t>(output_tensor.dims->size)));
TF_LITE_ENSURE_OK(context_,
AddTensorOutput(lite_out_tensor_index,
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_RESHAPE, lite_node_index));
return kTfLiteOk;
}
TfLiteStatus AppendRequantize(int nn_input_index, int lite_out_tensor_index,
int lite_node_index, int tensor_flags = 0) {
augmented_inputs_.push_back(nn_input_index);
auto& output_tensor = context_->tensors[lite_out_tensor_index];
TF_LITE_ENSURE(context_, IsQuantized(output_tensor.type));
bool need_int8_conversion = tensor_flags & NN_TENSOR_FLAG_INT8_CONVERSION;
int nn_type = (output_tensor.type == kTfLiteUInt8 || need_int8_conversion)
? ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
int8_t zero = 0;
TF_LITE_ENSURE_STATUS(AddVectorOperand(&zero, 1, nn_type,
1.0f, 0));
TF_LITE_ENSURE_STATUS(AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_STATUS(AddTensorOutput(lite_out_tensor_index, tensor_flags));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_ADD, lite_node_index));
return kTfLiteOk;
}
TfLiteStatus TransformPackIntoSupportedOps(int lite_node_index,
TfLiteNode* node,
TfLiteRegistration* reg) {
int concat_output_ann_index = -1;
TfLitePackParams* builtin =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);
auto& input_tensor = context_->tensors[node->inputs->data[0]];
int axis = builtin->axis < 0 ? input_tensor.dims->size + builtin->axis + 1
: builtin->axis;
TF_LITE_ENSURE(context_, axis < input_tensor.dims->size);
uint32_t concat_dim_size = 0;
for (int input_pos = 0; input_pos < node->inputs->size; ++input_pos) {
const auto input_index = node->inputs->data[input_pos];
concat_dim_size +=
context_->tensors[node->inputs->data[input_pos]].dims->data[axis];
TF_LITE_ENSURE_STATUS(
AddTensorInput(input_index, false,
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
}
TF_LITE_ENSURE_STATUS(AddScalarInt32Operand(axis));
std::vector<uint32_t> concat_output_shape(input_tensor.dims->size, 0);
for (int i = 0; i < concat_output_shape.size(); i++) {
if (i == axis) {
concat_output_shape[i] = concat_dim_size;
} else {
concat_output_shape[i] = input_tensor.dims->data[i];
}
}
TF_LITE_ENSURE_STATUS(AddIntermediateOutputTensor(
input_tensor.type, concat_output_shape.size(),
concat_output_shape.data(), input_tensor.params.scale,
input_tensor.params.zero_point, &concat_output_ann_index));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_CONCATENATION, lite_node_index));
TF_LITE_ENSURE_STATUS(AppendReshape(
concat_output_ann_index, node->outputs->data[0], lite_node_index));
return kTfLiteOk;
}
TfLiteStatus TransformUnpackIntoSupportedOps(int lite_node_index,
TfLiteNode* node,
TfLiteRegistration* reg) {
auto& input_tensor = context_->tensors[node->inputs->data[0]];
auto* builtin = reinterpret_cast<TfLiteUnpackParams*>(node->builtin_data);
int axis = builtin->axis < 0 ? builtin->axis + input_tensor.dims->size
: builtin->axis;
TF_LITE_ENSURE(context_, axis >= 0);
TF_LITE_ENSURE(context_, axis < (input_tensor.dims->size - 1));
int num_splits = builtin->num;
TF_LITE_ENSURE(context_, num_splits == input_tensor.dims->data[axis]);
TF_LITE_ENSURE(context_, num_splits == node->outputs->size);
std::vector<int32_t> intermediate_shape(input_tensor.dims->size - 1);
std::copy(input_tensor.dims->data, input_tensor.dims->data + axis,
intermediate_shape.begin());
intermediate_shape[axis] =
input_tensor.dims->data[axis] * input_tensor.dims->data[axis + 1];
std::copy(input_tensor.dims->data + axis + 2,
input_tensor.dims->data + input_tensor.dims->size,
intermediate_shape.begin() + axis + 1);
TF_LITE_ENSURE_STATUS(AddTensorInput(node->inputs->data[0],
false,
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_STATUS(AddVectorInt32Operand(intermediate_shape.data(),
intermediate_shape.size()));
int reshape_output_ann_index = -1;
float scale = input_tensor.params.scale;
if (IsQuantized(input_tensor.type) && scale == 0.0f) {
scale = 1.0f;
}
TF_LITE_ENSURE_STATUS(AddIntermediateOutputTensor(
input_tensor.type, intermediate_shape.size(),
reinterpret_cast<uint32_t*>(intermediate_shape.data()), scale,
input_tensor.params.zero_point, &reshape_output_ann_index));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_RESHAPE, lite_node_index));
augmented_inputs_.push_back(reshape_output_ann_index);
TF_LITE_ENSURE_STATUS(AddScalarInt32Operand(axis));
TF_LITE_ENSURE_STATUS(AddScalarInt32Operand(num_splits));
for (int i = 0; i < num_splits; i++) {
int lite_output_index = node->outputs->data[i];
TF_LITE_ENSURE_STATUS(AddTensorOutput(
lite_output_index, NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
}
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_SPLIT, lite_node_index));
return kTfLiteOk;
}
TfLiteStatus TransformSplitVIntoSupportedOps(int lite_node_index,
TfLiteNode* node,
TfLiteRegistration* reg) {
auto& input = context_->tensors[node->inputs->data[0]];
int input_rank = input.dims->size;
const auto& size_splits_tensor = context_->tensors[node->inputs->data[1]];
const auto* size_splits = size_splits_tensor.data.i32;
int num_splits = size_splits_tensor.dims->data[0];
int axis = context_->tensors[node->inputs->data[2]].data.i32[0];
axis = axis < 0 ? axis + input_rank : axis;
TF_LITE_ENSURE(context_, axis >= 0);
TF_LITE_ENSURE(context_, axis < input_rank);
int unknown_split_size = ComputeSplitVUnknownSplitSize(context_, node);
int slice_begin_index = 0;
for (int split_index = 0; split_index < num_splits; split_index++) {
int split_size = size_splits[split_index] == -1
? unknown_split_size
: size_splits[split_index];
TF_LITE_ENSURE(context_, split_size > 0);
std::vector<int> begin_indices(input_rank);
std::vector<int> slice_sizes(input_rank);
for (int i = 0; i < input_rank; i++) {
if (i == axis) {
begin_indices[i] = slice_begin_index;
slice_sizes[i] = split_size;
} else {
begin_indices[i] = 0;
slice_sizes[i] = input.dims->data[i];
}
}
slice_begin_index += split_size;
TF_LITE_ENSURE_STATUS(AddTensorInput(
node->inputs->data[0],
false, NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_STATUS(
AddVectorInt32Operand(begin_indices.data(), begin_indices.size()));
TF_LITE_ENSURE_STATUS(
AddVectorInt32Operand(slice_sizes.data(), slice_sizes.size()));
int lite_output_index = node->outputs->data[split_index];
TF_LITE_ENSURE_STATUS(AddTensorOutput(
lite_output_index, NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_STATUS(
FinalizeAddOperation(ANEURALNETWORKS_SLICE, lite_node_index));
}
return kTfLiteOk;
}
TfLiteStatus TransformSquaredDifferenceIntoSupportedOps(
int lite_node_index, TfLiteNode* node, TfLiteRegistration* reg) {
const TfLiteTensor& lhs = context_->tensors[node->inputs->data[0]];
const TfLiteTensor& output = context_->tensors[node->outputs->data[0]];
int diff_out_ann_index = 0;
{
float max_output = 0.f;
int diff_output_zero_point = 0;
int diff_output_nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
switch (lhs.type) {
case kTfLiteFloat32:
diff_output_nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteInt32:
diff_output_nn_type = ANEURALNETWORKS_TENSOR_INT32;
break;
case kTfLiteUInt8:
max_output = (255 - output.params.zero_point) * output.params.scale;
diff_output_zero_point = 128;
diff_output_nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
break;
case kTfLiteInt8:
max_output = (127 - output.params.zero_point) * output.params.scale;
diff_output_zero_point = 0;
diff_output_nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
break;
default:
return kTfLiteError;
}
float diff_output_scale = 2.0f * std::sqrt(max_output) / 254.0f;
TF_LITE_ENSURE_OK(
context_, AddTensorInput(node->inputs->data[0], false,
NN_TENSOR_FLAG_SCALAR_AS_TENSOR |
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_OK(
context_, AddTensorInput(node->inputs->data[1], false,
NN_TENSOR_FLAG_SCALAR_AS_TENSOR |
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
output.dims->size, reinterpret_cast<uint32_t*>(output.dims->data),
diff_output_nn_type, diff_output_scale, diff_output_zero_point,
&diff_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_SUB, lite_node_index));
}
{
augmented_inputs_.push_back(diff_out_ann_index);
augmented_inputs_.push_back(diff_out_ann_index);
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(context_,
AddTensorOutput(node->outputs->data[0],
NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_MUL, lite_node_index));
}
return kTfLiteOk;
}
TfLiteStatus TransformCosIntoSupportedOps(int lite_node_index,
TfLiteNode* node,
TfLiteRegistration* reg) {
const TfLiteTensor& input = context_->tensors[node->inputs->data[0]];
const TfLiteTensor& output = context_->tensors[node->outputs->data[0]];
int diff_out_ann_index;
{
auto tensor_size = input.bytes / sizeof(float);
int tensor_index;
TF_LITE_ENSURE_OK(context_,
AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32,
input.dims, std::vector<float>(tensor_size, M_PI_2),
input.params, &tensor_index));
TF_LITE_ENSURE_OK(
context_, AddTensorInput(node->inputs->data[0], false));
TF_LITE_ENSURE_OK(context_,
AddScalarInt32Operand(ANEURALNETWORKS_FUSED_NONE));
TF_LITE_ENSURE_OK(
context_,
AddAdditionalOutputTensor(
output.dims->size, reinterpret_cast<uint32_t*>(output.dims->data),
ANEURALNETWORKS_TENSOR_FLOAT32, 0, 0, &diff_out_ann_index));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_SUB, lite_node_index));
}
{
augmented_inputs_.push_back(diff_out_ann_index);
TF_LITE_ENSURE_OK(context_, AddTensorOutput(node->outputs->data[0]));
TF_LITE_ENSURE_OK(
context_, FinalizeAddOperation(ANEURALNETWORKS_SIN, lite_node_index));
}
return kTfLiteOk;
}
TfLiteStatus FinalizeAddOperation(ANeuralNetworksOperationType type,
int lite_node_index) {
TF_LITE_ENSURE_OK(context_,
AddOperationToModel(
type, static_cast<uint32_t>(augmented_inputs_.size()),
augmented_inputs_.data(),
static_cast<uint32_t>(augmented_outputs_.size()),
augmented_outputs_.data(), lite_node_index));
augmented_inputs_.clear();
augmented_outputs_.clear();
return kTfLiteOk;
}
TfLiteStatus AddSingleValueTensorAsScalarOperand(int tensor_index,
int nn_type) {
const TfLiteTensor* tensor = &context_->tensors[tensor_index];
TF_LITE_ENSURE_EQ(context_, NumElements(tensor), 1);
ANeuralNetworksOperandType operand_type{.type = nn_type};
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", tensor, nnapi_errno_);
int ann_tensor_index =
mapping_util_->TfLiteIndexToNnIndex(mapping_util_, tensor_index);
if (ann_tensor_index != -1) {
augmented_inputs_.push_back(ann_tensor_index);
return kTfLiteOk;
}
ann_tensor_index =
mapping_util_->AddNewNnTensorIndex(mapping_util_, tensor_index);
augmented_inputs_.push_back(ann_tensor_index);
const TfLiteType tensor_type = tensor->type;
TfLiteType nn_type_equivalent;
TF_LITE_ENSURE_OK(context_, GetEquivalentToANNType(context_, nn_type,
&nn_type_equivalent));
if (tensor_type != nn_type_equivalent) {
mapping_util_->AddTypeConversion(mapping_util_, tensor_index,
nn_type_equivalent);
}
return kTfLiteOk;
}
template <typename T>
TfLiteStatus AddNewInputConstantTensor(
int32_t nn_type, TfLiteType type, const TfLiteIntArray* dims,
const std::vector<T>& tensor_value,
const TfLiteQuantizationParams& quant_params, int* tensor_index) {
TF_LITE_ENSURE_OK(context_,
context_->AddTensors(context_, 1, tensor_index));
TfLiteTensor* new_tensor = &context_->tensors[*tensor_index];
new_tensor->type = type;
new_tensor->allocation_type = kTfLiteDynamic;
new_tensor->params = quant_params;
TF_LITE_ENSURE_OK(
context_,
context_->ResizeTensor(
context_, new_tensor,
TfLiteIntArrayCopy(dims)));
memcpy(new_tensor->data.raw,
reinterpret_cast<const char*>(tensor_value.data()),
tensor_value.size() * sizeof(T));
const uint32_t tensor_rank = static_cast<uint32_t>(dims->size);
const uint32_t* tensor_dims = reinterpret_cast<const uint32_t*>(dims->data);
ANeuralNetworksOperandType operand_type{nn_type, tensor_rank, tensor_dims,
quant_params.scale,
quant_params.zero_point};
const int ann_tensor_index =
mapping_util_->AddDelegateGeneratedInputAnnTensorOperand(mapping_util_);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
augmented_inputs_.push_back(ann_tensor_index);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_tensor_index, new_tensor->data.raw,
new_tensor->bytes),
"setting new operand value", nnapi_errno_);
return kTfLiteOk;
}
template <typename T>
TfLiteStatus AddNewInputConstantTensor(
int32_t nn_type, TfLiteType type, std::initializer_list<int> dims,
const std::vector<T>& tensor_value,
const TfLiteQuantizationParams& quant_params, int* tensor_index) {
TfLiteIntArray* dim_array = TfLiteIntArrayCreate(dims.size());
dim_array->size = dims.size();
std::copy(dims.begin(), dims.end(), dim_array->data);
const auto result = AddNewInputConstantTensor(
nn_type, type, dim_array, tensor_value, quant_params, tensor_index);
TfLiteIntArrayFree(dim_array);
return result;
}
TfLiteStatus AddIntermediateOutputTensor(TfLiteType tfl_type,
uint32_t dimension_count,
const uint32_t* dimension_data,
float scale, int32_t zero_point,
int* ann_index_out,
bool need_int8_conversion = false) {
int32_t nn_type;
switch (tfl_type) {
case kTfLiteFloat32:
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteInt8:
nn_type = need_int8_conversion
? ANEURALNETWORKS_TENSOR_QUANT8_ASYMM
: ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
break;
case kTfLiteUInt8:
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
break;
default:
return kTfLiteError;
}
if (need_int8_conversion) {
zero_point += 128;
}
TF_LITE_ENSURE_STATUS(
AddAdditionalOutputTensor(dimension_count, dimension_data, nn_type,
scale, zero_point, ann_index_out));
return kTfLiteOk;
}
void ClearInputOuputLists() {
augmented_inputs_.clear();
augmented_outputs_.clear();
}
private:
TfLiteStatus GetEquivalentToANNType(TfLiteContext* context, int nn_type,
TfLiteType* type) {
switch (nn_type) {
case ANEURALNETWORKS_INT32:
*type = kTfLiteInt32;
return kTfLiteOk;
case ANEURALNETWORKS_FLOAT32:
*type = kTfLiteFloat32;
return kTfLiteOk;
default:
TF_LITE_KERNEL_LOG(context,
"NN API Delegate: Can't get an equivalent TF Lite "
"type for provided NN API type: %d.\n",
nn_type);
return kTfLiteError;
}
}
template <typename T>
TfLiteStatus AddScalarOperand(T value, int32_t nn_type) {
ANeuralNetworksOperandType operand_type{.type = nn_type};
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
const int ann_index = mapping_util_->AddNewNonTensorOperand(mapping_util_);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(nn_model_, ann_index,
&value, sizeof(T)),
"setting new operand value", nnapi_errno_);
augmented_inputs_.push_back(ann_index);
return kTfLiteOk;
}
template <typename T>
TfLiteStatus AddVectorOperand(const T* values, uint32_t num_values,
int32_t nn_type, float scale,
int32_t zero_point) {
ANeuralNetworksOperandType operand_type{.type = nn_type,
.dimensionCount = 1,
.dimensions = &num_values,
.scale = scale,
.zeroPoint = zero_point};
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
const int ann_index = mapping_util_->AddNewNonTensorOperand(mapping_util_);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_index, values, sizeof(T) * num_values),
"settings new operand value", nnapi_errno_);
augmented_inputs_.push_back(ann_index);
return kTfLiteOk;
}
template <typename T>
TfLiteStatus AddVectorOperand(const T* values, uint32_t num_values,
int32_t nn_type) {
return AddVectorOperand(values, num_values, nn_type, 0.f,
0);
}
TfLiteStatus AddFloat32OutputTensor(uint32_t dimension_count,
const uint32_t* dimension_data,
int* ann_index_out) {
return AddAdditionalOutputTensor(
dimension_count, dimension_data, ANEURALNETWORKS_TENSOR_FLOAT32,
0.f, 0, ann_index_out);
}
TfLiteStatus AddAdditionalOutputTensor(uint32_t dimension_count,
const uint32_t* dimension_data,
int32_t nn_type, float scale,
int32_t zero_point,
int* ann_index_out) {
ANeuralNetworksOperandType operand_type{
.type = nn_type,
.dimensionCount = dimension_count,
.dimensions = dimension_data,
.scale = scale,
.zeroPoint = zero_point,
};
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", nnapi_errno_);
const int ann_index = mapping_util_->AddNewNonTensorOperand(mapping_util_);
augmented_outputs_.push_back(ann_index);
if (ann_index_out) *ann_index_out = ann_index;
return kTfLiteOk;
}
TfLiteStatus AddTensor(int tensor_index, bool hybrid_op,
std::vector<uint32_t>* indices, int tensor_flags = 0) {
const bool scalar_as_tensor =
tensor_flags & NN_TENSOR_FLAG_SCALAR_AS_TENSOR;
const bool need_int8_conversion =
tensor_flags & NN_TENSOR_FLAG_INT8_CONVERSION;
const bool use_int8_asymm_signed =
tensor_flags & NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED;
const bool force_per_channel =
tensor_flags & NN_TENSOR_FLAG_FORCE_PER_CHANNEL;
const bool need_half2float_conversion =
tensor_flags & NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION;
int ann_tensor_index =
mapping_util_->TfLiteIndexToNnIndex(mapping_util_, tensor_index);
if (ann_tensor_index != -1) {
indices->push_back(ann_tensor_index);
return kTfLiteOk;
}
ann_tensor_index =
mapping_util_->AddNewNnTensorIndex(mapping_util_, tensor_index);
int32_t nn_type = 0;
float scale = 0.0f;
int32_t zeroPoint = 0;
ANeuralNetworksSymmPerChannelQuantParams ann_perchannel_params;
TfLiteTensor* tensor = &context_->tensors[tensor_index];
TfLiteType tensor_type = tensor->type;
if (hybrid_op && (tensor_type == kTfLiteUInt8)) {
tensor_type = kTfLiteInt8;
}
switch (tensor_type) {
case kTfLiteNoType:
indices->push_back(-1);
return kTfLiteOk;
case kTfLiteFloat32:
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
break;
case kTfLiteFloat16:
nn_type = ANEURALNETWORKS_TENSOR_FLOAT16;
if (need_half2float_conversion) {
nn_type = ANEURALNETWORKS_TENSOR_FLOAT32;
mapping_util_->AddTypeConversion(mapping_util_, tensor_index,
kTfLiteFloat32);
}
break;
case kTfLiteUInt8:
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
if (scale == 0) {
scale = 1;
}
break;
case kTfLiteInt8:
if (use_int8_asymm_signed) {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED;
} else if (need_int8_conversion) {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM;
} else {
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM;
}
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
if (tensor->quantization.type == kTfLiteAffineQuantization) {
TfLiteAffineQuantization* quantization_params =
static_cast<TfLiteAffineQuantization*>(
tensor->quantization.params);
if (quantization_params->scale->size > 1 || force_per_channel) {
ann_perchannel_params = {
.channelDim = static_cast<uint32_t>(
quantization_params->quantized_dimension),
.scaleCount =
static_cast<uint32_t>(quantization_params->scale->size),
.scales = quantization_params->scale->data,
};
nn_type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL;
scale = 0.0f;
zeroPoint = 0;
} else if (quantization_params->scale->size == 1) {
scale = quantization_params->scale->data[0];
zeroPoint = quantization_params->zero_point->data[0];
}
}
if (nn_type != ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
if (need_int8_conversion) {
zeroPoint += 128;
mapping_util_->AddTypeConversion(mapping_util_, tensor_index,
kTfLiteUInt8);
}
if (scale == 0) {
scale = 1;
}
}
break;
case kTfLiteInt32:
nn_type = ANEURALNETWORKS_TENSOR_INT32;
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
break;
case kTfLiteBool:
nn_type = ANEURALNETWORKS_TENSOR_BOOL8;
break;
case kTfLiteInt16:
nn_type = ANEURALNETWORKS_TENSOR_QUANT16_SYMM;
scale = tensor->params.scale;
zeroPoint = tensor->params.zero_point;
break;
default:
context_->ReportError(
context_, "Failed to add NN API tensor: type %s is not supported.",
TfLiteTypeGetName(tensor_type));
return kTfLiteError;
}
bool has_unspecified_dimensions = ::tflite::HasUnspecifiedDimension(tensor);
uint32_t tensor_rank = static_cast<uint32_t>(tensor->dims->size);
std::vector<uint32_t> dims_unspecified(tensor_rank, 0);
if (has_unspecified_dimensions) {
for (int i = 0; i < tensor->dims_signature->size; i++) {
dims_unspecified[i] = tensor->dims_signature->data[i] == -1
? 0
: tensor->dims_signature->data[i];
}
}
uint32_t* tensor_dims =
has_unspecified_dimensions && allow_dynamic_dimensions_
? dims_unspecified.data()
: reinterpret_cast<uint32_t*>(tensor->dims->data);
if (scalar_as_tensor && tensor_rank == 0) {
tensor_rank = 1;
tensor_dims = &tensor_rank;
}
if (tensor_rank == 0) {
tensor_dims = nullptr;
}
ANeuralNetworksOperandType operand_type{nn_type, tensor_rank, tensor_dims,
scale, zeroPoint};
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_addOperand(nn_model_, &operand_type),
"adding operand", tensor, nnapi_errno_);
if (nn_type == ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(
nn_model_, ann_tensor_index, &ann_perchannel_params),
"setting new operand per channel quantization params", tensor,
nnapi_errno_);
}
if (tensor->allocation_type == kTfLiteMmapRo) {
if (IsQuantized(tensor_type) && need_int8_conversion &&
nn_type != ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL) {
int new_tensor_index = -1;
TF_LITE_ENSURE_OK(context_,
context_->AddTensors(context_, 1, &new_tensor_index));
TfLiteTensor* new_tensor = &context_->tensors[new_tensor_index];
new_tensor->type = kTfLiteUInt8;
new_tensor->allocation_type = kTfLiteDynamic;
new_tensor->params.scale = scale;
new_tensor->params.zero_point = zeroPoint;
TF_LITE_ENSURE_OK(
context_, context_->ResizeTensor(context_, new_tensor,
TfLiteIntArrayCopy(tensor->dims)));
const auto num_elements = NumElements(tensor);
for (int i = 0; i < num_elements; ++i) {
new_tensor->data.uint8[i] = static_cast<const uint8_t>(
static_cast<int32_t>(tensor->data.int8[i]) + 128);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_tensor_index, new_tensor->data.raw,
new_tensor->bytes),
"setting new operand value", tensor, nnapi_errno_);
} else if (tensor_type == kTfLiteFloat16 && need_half2float_conversion) {
int new_tensor_index = -1;
TF_LITE_ENSURE_OK(context_,
context_->AddTensors(context_, 1, &new_tensor_index));
TfLiteTensor* new_tensor = &context_->tensors[new_tensor_index];
new_tensor->type = kTfLiteFloat32;
new_tensor->allocation_type = kTfLiteDynamic;
TF_LITE_ENSURE_OK(
context_, context_->ResizeTensor(context_, new_tensor,
TfLiteIntArrayCopy(tensor->dims)));
const auto num_elements = NumElements(tensor);
for (int i = 0; i < num_elements; ++i) {
new_tensor->data.f[i] = fp16_ieee_to_fp32_value(
reinterpret_cast<uint16_t*>(tensor->data.data)[i]);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_tensor_index, new_tensor->data.data,
new_tensor->bytes),
"setting new operand value", tensor, nnapi_errno_);
#ifdef TFLITE_NNAPI_ALLOW_MMAP_SHARING
} else if (tensor->allocation &&
static_cast<const Allocation*>(tensor->allocation)->type() ==
Allocation::Type::kMMap) {
const MMAPAllocation* mmap_alloc =
static_cast<const MMAPAllocation*>(tensor->allocation);
if (allocation_memory_mapping_->count(mmap_alloc) == 0) {
ANeuralNetworksMemory* ann_memory_handle = nullptr;
nnapi_->ANeuralNetworksMemory_createFromFd(
mmap_alloc->mmapped_buffer_size(), PROT_READ, mmap_alloc->fd(),
mmap_alloc->mmapped_buffer_offset_in_file(), &ann_memory_handle);
allocation_memory_mapping_->insert(
std::make_pair(mmap_alloc, ann_memory_handle));
}
ANeuralNetworksMemory* ann_memory_handle =
allocation_memory_mapping_->at(mmap_alloc);
auto offset =
reinterpret_cast<const uint8_t*>(tensor->data.raw) -
reinterpret_cast<const uint8_t*>(mmap_alloc->mmapped_buffer());
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValueFromMemory(
nn_model_, ann_tensor_index, ann_memory_handle, offset,
tensor->bytes),
"setting new operand value from memory", tensor, nnapi_errno_);
#endif
} else {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context_,
nnapi_->ANeuralNetworksModel_setOperandValue(
nn_model_, ann_tensor_index, tensor->data.data, tensor->bytes),
"setting new operand value", tensor, nnapi_errno_);
}
}
indices->push_back(ann_tensor_index);
return kTfLiteOk;
}
const NnApi* const nnapi_;
TfLiteContext* const context_;
NnapiMappingUtilCInterface* const mapping_util_;
DequantizeMapping* const dequantize_mapping_;
std::map<const MMAPAllocation*, ANeuralNetworksMemory*>* const
allocation_memory_mapping_;
ANeuralNetworksModel* const nn_model_;
std::vector<uint32_t> augmented_inputs_;
std::vector<uint32_t> augmented_outputs_;
int* nnapi_errno_;
bool allow_dynamic_dimensions_;
};
namespace {
struct OpValidationContext {
bool is_valid;
std::vector<NNAPIValidationFailure>* validation_failures;
};
#define EXPECT_INPUT_TYPE_IN(actual_type, ...) \
ExpectTypeIn(actual_type, {__VA_ARGS__}, \
NNAPIValidationFailureType::kUnsupportedInputType, \
"Input type not in expected list " #__VA_ARGS__, &val_ctx)
inline void AddValidationFailure(NNAPIValidationFailureType failure_type,
const char* message,
OpValidationContext* val_ctx) {
val_ctx->is_valid = false;
#ifdef NNAPI_VERBOSE_VALIDATION
if (val_ctx->validation_failures) {
val_ctx->validation_failures->push_back({failure_type, message});
}
#endif
}
template <typename... Args>
inline void AddValidationFailureFmt(OpValidationContext* val_ctx,
NNAPIValidationFailureType failure_type,
const char* message_fmt, Args... args) {
val_ctx->is_valid = false;
#ifdef NNAPI_VERBOSE_VALIDATION
if (val_ctx->validation_failures) {
size_t req_buf_size = snprintf(nullptr, 0, message_fmt, args...) + 1;
std::unique_ptr<char[]> tmp_buf(new char[req_buf_size]);
snprintf(tmp_buf.get(), req_buf_size, message_fmt, args...);
val_ctx->validation_failures->push_back({failure_type, tmp_buf.get()});
}
#endif
}
inline bool Expect(bool condition, NNAPIValidationFailureType failure_type,
const char* message, OpValidationContext* val_ctx) {
if (!condition) {
AddValidationFailure(failure_type, message, val_ctx);
return false;
}
return true;
}
template <typename... Args>
inline bool ExpectFmt(bool condition, OpValidationContext* val_ctx,
NNAPIValidationFailureType failure_type,
const char* message_fmt, Args... args) {
if (!condition) {
AddValidationFailureFmt(val_ctx, failure_type, message_fmt, args...);
return false;
}
return true;
}
inline bool ExpectTypeIn(TfLiteType actual_type,
std::initializer_list<TfLiteType> allowed_types,
NNAPIValidationFailureType failure_type,
const char* msg, OpValidationContext* val_ctx) {
return Expect(std::find(allowed_types.begin(), allowed_types.end(),
actual_type) != allowed_types.end(),
failure_type, msg, val_ctx);
}
inline bool ExpectMinAndroidSdkVersion(int curr_version, int min_version,
OpValidationContext* val_ctx) {
return ExpectFmt(curr_version >= min_version, val_ctx,
NNAPIValidationFailureType::kUnsupportedAndroidVersion,
"Android sdk version less than %d", min_version);
}
inline bool ExpectMaxOpVersion(int curr_version, int max_version,
OpValidationContext* val_ctx) {
return ExpectFmt(curr_version <= max_version, val_ctx,
NNAPIValidationFailureType::kUnsupportedOperatorVersion,
"OP Version higher than %d", max_version);
}
inline bool ExpectOpVersion(int curr_version, int max_version,
OpValidationContext* val_ctx) {
return ExpectFmt(curr_version <= max_version, val_ctx,
NNAPIValidationFailureType::kUnsupportedOperatorVersion,
"OP Version different from %d", max_version);
}
inline bool ExpectIsFloatOperator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloat(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float", val_ctx);
}
bool ExpectIsFloatOrUint8Operator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloatOrUInt8(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float or UINT8", val_ctx);
}
bool ExpectIsFloatOrQuant8Operator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloatOrQuantized(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float or Quant8", val_ctx);
}
bool ExpectIsFloatOrInt32Operator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloatOrInt32(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float or Int32", val_ctx);
}
bool ExpectIsFloatQuant8OrInt32Operator(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const auto input_type = context->tensors[node->inputs->data[0]].type;
return Expect(IsFloatQuantizedOrInt32(input_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float, Quant8, or Int32", val_ctx);
}
bool ExpectIsRestrictedScalesCompliant(const TfLiteContext* context,
const TfLiteNode* node,
OpValidationContext* val_ctx) {
const int input_id = node->inputs->data[0];
const int filter_id = node->inputs->data[1];
const int output_id = node->outputs->data[0];
const float input_scale = context->tensors[input_id].params.scale;
const float filter_scale = context->tensors[filter_id].params.scale;
const float output_scale = context->tensors[output_id].params.scale;
return Expect(input_scale * filter_scale < output_scale,
NNAPIValidationFailureType::kNotRestrictedScaleCompliant,
"When using NN API version 1.0 or 1.1, input_scale * "
"filter_scale < output_scale.",
val_ctx);
}
void AppendDynamicDimensions(const TfLiteContext* context,
const TfLiteIntArray* tensor_indices,
std::vector<int>& dynamic_dimensions) {
for (int i : TfLiteIntArrayView(tensor_indices)) {
if (i == kTfLiteOptionalTensor) continue;
const auto& tensor = context->tensors[i];
if (tensor.dims_signature) {
for (int i = 0; i < tensor.dims_signature->size; i++) {
if (tensor.dims_signature->data[i] == -1) {
dynamic_dimensions.push_back(tensor.dims->data[i]);
}
}
}
}
}
NNAPIExecutionCache::Signature CreateExecutionCacheSignature(
const TfLiteContext* context, const TfLiteNode* node,
const StatefulNnApiDelegate::Options& delegate_options,
const std::vector<StatefulNnApiDelegate::MemoryRegistration>&
tensor_memory_map) {
std::vector<uint64_t> tensor_handle_timestamps(context->tensors_size);
for (int i = 0; i < tensor_handle_timestamps.size(); i++) {
auto handle = context->tensors[i].buffer_handle;
if (handle < 0 || handle >= tensor_memory_map.size()) {
tensor_handle_timestamps[i] = kNoMemoryTimestamp;
} else {
tensor_handle_timestamps[i] = tensor_memory_map[handle].timestamp;
}
}
std::vector<int> dynamic_dimensions;
if (delegate_options.allow_dynamic_dimensions) {
AppendDynamicDimensions(context, node->inputs, dynamic_dimensions);
if (delegate_options.vendor_plugin == nullptr) {
AppendDynamicDimensions(context, node->outputs, dynamic_dimensions);
}
}
return NNAPIExecutionCache::Signature{std::move(tensor_handle_timestamps),
std::move(dynamic_dimensions)};
}
template <typename T>
std::size_t HashVector(const std::vector<T>& vec) {
std::size_t seed = vec.size();
auto hasher = std::hash<T>{};
for (const auto& i : vec) {
seed = CombineHashes({seed, hasher(i)});
}
return seed;
}
}
bool NNAPIExecutionCache::Signature::operator==(const Signature& other) const {
return tensor_handle_timestamps == other.tensor_handle_timestamps &&
dynamic_dimensions == other.dynamic_dimensions;
}
std::size_t NNAPIExecutionCache::Signature::Hasher::operator()(
const Signature& signature) const {
return CombineHashes({HashVector(signature.tensor_handle_timestamps),
HashVector(signature.dynamic_dimensions)});
}
ANeuralNetworksExecution* NNAPIExecutionCache::Get(const Signature& signature) {
auto it = lookup_.find(signature);
if (it == lookup_.end()) {
return nullptr;
}
auto& list_it = it->second.first;
order_.erase(list_it);
order_.push_front(signature);
list_it = order_.begin();
auto& execution = it->second.second;
return execution.get();
}
void NNAPIExecutionCache::Put(const Signature& signature,
UniqueExecution execution) {
if (order_.size() >= max_cache_size_) {
ReleaseLRU();
}
order_.push_front(signature);
lookup_.emplace(signature,
std::make_pair(order_.begin(), std::move(execution)));
}
void NNAPIExecutionCache::Clear() {
order_.clear();
lookup_.clear();
}
void NNAPIExecutionCache::SetMaxCacheSize(uint32_t max_cache_size) {
max_cache_size_ = max_cache_size;
while (order_.size() > max_cache_size_) {
ReleaseLRU();
}
}
void NNAPIExecutionCache::ReleaseLRU() {
lookup_.erase(order_.back());
order_.pop_back();
}
bool NNAPIDelegateKernel::Validate(
const TfLiteContext* context, const TfLiteRegistration* registration,
int android_sdk_version, const TfLiteNode* node,
bool is_accelerator_specified, NnapiDelegateVendorPlugin* vendor_plugin,
std::vector<NNAPIValidationFailure>* map_failures) {
OpValidationContext val_ctx{true, map_failures};
if (vendor_plugin) {
if (vendor_plugin->ValidateNode(context, registration, node)) {
return true;
}
}
auto builtin_code = registration->builtin_code;
auto version = registration->version;
switch (builtin_code) {
case kTfLiteBuiltinAdd: {
ExpectMaxOpVersion(version, 2, &val_ctx);
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
if (IsInt32(context->tensors[node->inputs->data[0]].type)) {
Expect(reinterpret_cast<TfLiteAddParams*>(node->builtin_data)
->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"No activation function supported", &val_ctx);
}
} else {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
}
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank must be <= 4", &val_ctx);
} break;
case kTfLiteBuiltinArgMax:
case kTfLiteBuiltinArgMin: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[(0)]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat16, kTfLiteFloat32,
kTfLiteInt32, kTfLiteUInt8, kTfLiteInt8);
const auto& axis_tensor = context->tensors[node->inputs->data[1]];
if (axis_tensor.type == kTfLiteInt64) {
Expect(
axis_tensor.allocation_type == kTfLiteMmapRo &&
*axis_tensor.data.i64 <= std::numeric_limits<int32_t>::max() &&
*axis_tensor.data.i64 >= std::numeric_limits<int32_t>::min(),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports axis as int32. If the axis type is int64 and "
"constant we can convert it to int32 if the value isn't too "
"large.",
&val_ctx);
} else {
Expect(axis_tensor.type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Axis should be Int32", &val_ctx);
}
if (builtin_code == kTfLiteBuiltinArgMax) {
auto builtin =
reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data);
Expect(builtin->output_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI only supports int32 output.", &val_ctx);
} else {
auto builtin =
reinterpret_cast<TfLiteArgMinParams*>(node->builtin_data);
Expect(builtin->output_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI only supports int32 output.", &val_ctx);
}
} break;
case kTfLiteBuiltinMul: {
if (is_accelerator_specified) {
ExpectMaxOpVersion(version, 3, &val_ctx);
} else {
ExpectMaxOpVersion(version, 2, &val_ctx);
}
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
if (IsInt32(context->tensors[node->inputs->data[0]].type)) {
Expect(reinterpret_cast<TfLiteMulParams*>(node->builtin_data)
->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"No activation function supported", &val_ctx);
}
} else {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
}
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank must be <= 4", &val_ctx);
} break;
case kTfLiteBuiltinAveragePool2d: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
auto builtin = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
if (IsQuantized(context->tensors[node->inputs->data[0]].type)) {
Expect(is_accelerator_specified ||
(builtin->filter_width * builtin->filter_height <= 256),
NNAPIValidationFailureType::kUnsupportedOperandSize,
"Large filter window would overflow on the reference CPU path",
&val_ctx);
}
} break;
case kTfLiteBuiltinMaxPool2d: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinL2Pool2d: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectIsFloatOperator(context, node, &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
auto builtin = reinterpret_cast<TfLitePoolParams*>(node->builtin_data);
Expect(builtin->activation == kTfLiteActNone,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Before NNAPI 1.2 fused activation for l2_pool may not be "
"supported.",
&val_ctx);
}
} break;
case kTfLiteBuiltinConv2d: {
ExpectMaxOpVersion(version, 5, &val_ctx);
const auto& input_tensor = context->tensors[node->inputs->data[0]];
const auto& filter_tensor = context->tensors[node->inputs->data[1]];
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid operators not supported before NNAPI 1.2", &val_ctx);
ExpectIsFloatOrUint8Operator(context, node, &val_ctx);
if (filter_tensor.quantization.type == kTfLiteAffineQuantization) {
TfLiteAffineQuantization* quantization_params =
static_cast<TfLiteAffineQuantization*>(
filter_tensor.quantization.params);
Expect(quantization_params->scale->size <= 1,
NNAPIValidationFailureType::kUnsupportedQuantizationType,
"Per-channel quantized convolution not supported before NNAPI "
"1.2.",
&val_ctx);
}
}
const auto input_type = input_tensor.type;
if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
input_type == kTfLiteUInt8) {
ExpectIsRestrictedScalesCompliant(context, node, &val_ctx);
}
auto builtin = reinterpret_cast<TfLiteConvParams*>(node->builtin_data);
Expect(node->inputs->size == 3,
NNAPIValidationFailureType::kMissingRequiredOperand,
"Conv2D with omitted bias not supported", &val_ctx);
if (builtin->dilation_width_factor != 1 ||
builtin->dilation_height_factor != 1) {
Expect(android_sdk_version >= kMinSdkVersionForNNAPI12,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI supports dilated Conv2D since NNAPI 1.2.", &val_ctx);
}
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(input_tensor.dims->data[3] == filter_tensor.dims->data[3],
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Grouped convolution not supported before NNAPI < 1.2",
&val_ctx);
}
} break;
case kTfLiteBuiltinDepthwiseConv2d: {
ExpectMaxOpVersion(version, 3, &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
ExpectIsFloatOrUint8Operator(context, node, &val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
if (input_type == kTfLiteUInt8) {
ExpectIsRestrictedScalesCompliant(context, node, &val_ctx);
}
auto builtin =
reinterpret_cast<TfLiteDepthwiseConvParams*>(node->builtin_data);
Expect(builtin->dilation_width_factor == 1 &&
builtin->dilation_height_factor == 1,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"dilation_width_factor and dilation_height_factor expected to "
"be equal to 1",
&val_ctx);
}
} break;
case kTfLiteBuiltinFullyConnected: {
ExpectMaxOpVersion(version, 5, &val_ctx);
const auto output_type = context->tensors[node->outputs->data[0]].type;
Expect(output_type != kTfLiteInt16,
NNAPIValidationFailureType::kUnsupportedOutputType,
"Unsupported output of type kTfLiteInt16", &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid operators not supported before NNAPI 1.2", &val_ctx);
ExpectIsFloatOrUint8Operator(context, node, &val_ctx);
}
const auto input_type = context->tensors[node->inputs->data[0]].type;
if (android_sdk_version < kMinSdkVersionForNNAPI12 &&
input_type == kTfLiteUInt8) {
ExpectIsRestrictedScalesCompliant(context, node, &val_ctx);
}
auto builtin =
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data);
if (builtin->keep_num_dims) {
ExpectMinAndroidSdkVersion(android_sdk_version,
kMinSdkVersionForNNAPI13, &val_ctx);
}
} break;
case kTfLiteBuiltinHardSwish: {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinSoftmax: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
const auto& output = context->tensors[node->outputs->data[0]];
ExpectTypeIn(output.type, {kTfLiteFloat32, kTfLiteUInt8, kTfLiteInt8},
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output type should be one of kTfLiteFloat32, kTfLiteUInt8, "
"kTfLiteInt8.",
&val_ctx);
const auto& input = context->tensors[node->inputs->data[0]];
const int input_rank = input.dims->size;
Expect(input_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank should be <= 4", &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(
input_rank == 2 || input_rank == 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Before API level 29 only 2D and 4D input tensors were supported.",
&val_ctx);
}
} break;
case kTfLiteBuiltinReshape: {
ExpectOpVersion(version, 1, &val_ctx);
if (android_sdk_version < kNNAPIRuntimeFeatureLevel6) {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
} else {
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
}
const auto& input = context->tensors[node->inputs->data[0]];
Expect(input.dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank should be <= 4", &val_ctx);
const auto& output = context->tensors[node->outputs->data[0]];
Expect(output.dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Output rank should be <= 4", &val_ctx);
if (node->inputs->size >= 2) {
Expect(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo,
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"The shape input tensor must be constant.", &val_ctx);
}
if (node->inputs->size == 1) {
auto* params =
reinterpret_cast<TfLiteReshapeParams*>(node->builtin_data);
int num_dimensions = params->num_dimensions;
if (num_dimensions == 1 && params->shape[0] == 0) {
num_dimensions = 0;
}
Expect(num_dimensions > 0,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"New shape rank should be > 0", &val_ctx);
}
} break;
case kTfLiteBuiltinResizeBilinear: {
ExpectMaxOpVersion(version, 3, &val_ctx);
const auto& input = context->tensors[node->inputs->data[0]];
const auto output_dims = context->tensors[node->outputs->data[0]].dims;
Expect(input.dims->size == 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input should have rank 4", &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
Expect(node->inputs->size >= 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expected at least 2 inputs", &val_ctx);
if (node->inputs->size >= 2) {
Expect(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo,
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"The size input tensor must be constant.", &val_ctx);
}
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(output_dims->data[1] == output_dims->data[2],
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Require width == height due to driver differences in NNAPI "
"< 1.2",
&val_ctx);
}
auto builtin =
reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data);
if (android_sdk_version <= kMinSdkVersionForNNAPI12) {
Expect(!builtin->align_corners,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support align_corners == true.", &val_ctx);
Expect(!builtin->half_pixel_centers,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support half_pixel_centers == true.", &val_ctx);
}
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(input.type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI 1.0 & 1.1 only supports float input.", &val_ctx);
}
} break;
case kTfLiteBuiltinResizeNearestNeighbor: {
ExpectMaxOpVersion(version, 3, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
Expect(node->inputs->size >= 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expected at least 2 inputs", &val_ctx);
if (node->inputs->size >= 2) {
Expect(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo,
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"The size input tensor must be constant.", &val_ctx);
}
auto builtin = reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
node->builtin_data);
if (android_sdk_version <= kMinSdkVersionForNNAPI12) {
Expect(!builtin->align_corners,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support align_corners == true.", &val_ctx);
Expect(!builtin->half_pixel_centers,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support half_pixel_centers == true.", &val_ctx);
}
} break;
case kTfLiteBuiltinSqueeze: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
auto builtin = reinterpret_cast<TfLiteSqueezeParams*>(node->builtin_data);
if (android_sdk_version == kMinSdkVersionForNNAPI11) {
Expect(builtin->num_squeeze_dims != 0,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI 1.1 does not support null squeeze_dims properly.",
&val_ctx);
}
} break;
case kTfLiteBuiltinUnidirectionalSequenceLstm: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid version of this op is not supported by NN API.", &val_ctx);
Expect(node->inputs->size == 20 || node->inputs->size == 24,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Supporting only operation with 20 or 24 inputs", &val_ctx);
} break;
case kTfLiteBuiltinL2Normalization: {
ExpectMaxOpVersion(version, 2, &val_ctx);
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
ExpectIsFloatOperator(context, node, &val_ctx);
const auto& input = context->tensors[node->inputs->data[0]];
Expect(input.dims->size == 4,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expected 4 inputs", &val_ctx);
}
auto builtin = reinterpret_cast<TfLiteL2NormParams*>(node->builtin_data);
Expect(builtin->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"Expected no activation", &val_ctx);
} break;
case kTfLiteBuiltinLocalResponseNormalization: {
ExpectOpVersion(version, 1, &val_ctx);
} break;
case kTfLiteBuiltinLshProjection: {
ExpectOpVersion(version, 1, &val_ctx);
if (reinterpret_cast<TfLiteLSHProjectionParams*>(node->builtin_data)
->type == kTfLiteLshProjectionSparse) {
Expect(android_sdk_version >= kMinSdkVersionForNNAPI12,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI does not support sparse projection correctly pre-Q",
&val_ctx);
Expect(node->inputs->size == 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
" NNAPI does not support weights for sparse projects.",
&val_ctx);
}
} break;
case kTfLiteBuiltinConcatenation: {
ExpectMaxOpVersion(version, 2, &val_ctx);
Expect(reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data)
->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"No activation function supported", &val_ctx);
Expect(context->tensors[node->inputs->data[0]].dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank should be less than 4", &val_ctx);
const auto& input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat16, kTfLiteFloat32,
kTfLiteUInt8, kTfLiteInt8);
if (input_type == kTfLiteUInt8 &&
android_sdk_version < kMinSdkVersionForNNAPI12) {
auto first_param = context->tensors[node->inputs->data[0]].params;
for (int i = 1; i < node->inputs->size; i++) {
auto curr_param = context->tensors[node->inputs->data[i]].params;
if (!Expect(curr_param.scale == first_param.scale &&
curr_param.zero_point == first_param.zero_point,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI 1.0-1 only supported concatenating quantized "
"tensor of the same scale and offset.",
&val_ctx)) {
break;
}
}
}
} break;
case kTfLiteBuiltinDequantize: {
if (android_sdk_version >= kMinSdkVersionForNNAPI13 &&
context->tensors[node->inputs->data[0]].type == kTfLiteFloat16 &&
context->tensors[node->inputs->data[0]].allocation_type !=
kTfLiteMmapRo) {
return true;
}
Expect(version == 1 || version == 2,
NNAPIValidationFailureType::kUnsupportedOperatorVersion,
"Supported op versions are 1 and 2 only", &val_ctx);
const auto& input = context->tensors[node->inputs->data[0]];
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteUInt8);
} else {
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteUInt8, kTfLiteInt8);
if (android_sdk_version == kMinSdkVersionForNNAPI12 &&
input.type == kTfLiteInt8) {
const auto zero_point = input.params.zero_point;
Expect(zero_point == 0,
NNAPIValidationFailureType::kUnsupportedInputType,
"NN API supports int8 type since version 1.2 but only for "
"symmetric quantization.",
&val_ctx);
}
}
} break;
case kTfLiteBuiltinDensify: {
if (android_sdk_version >= kMinSdkVersionForNNAPI13 &&
context->tensors[node->inputs->data[0]].allocation_type ==
kTfLiteMmapRo) {
return true;
}
return false;
} break;
case kTfLiteBuiltinFloor: {
ExpectOpVersion(version, 1, &val_ctx);
} break;
case kTfLiteBuiltinRelu:
case kTfLiteBuiltinReluN1To1:
case kTfLiteBuiltinRelu6:
case kTfLiteBuiltinLogistic: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinTanh: {
ExpectMaxOpVersion(version, 2, &val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
Expect(IsFloat(input_type) ||
(IsQuantized(input_type) &&
android_sdk_version >= kMinSdkVersionForNNAPI12),
NNAPIValidationFailureType::kUnsupportedInputType,
" NNAPI only support float tanh.", &val_ctx);
} break;
case kTfLiteBuiltinSub: {
ExpectMaxOpVersion(version, 3, &val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
Expect((android_sdk_version >= kMinSdkVersionForNNAPI11 &&
IsFloat(input_type)) ||
(android_sdk_version >= kMinSdkVersionForNNAPI12 &&
IsQuantized(input_type)) ||
(android_sdk_version >= kMinSdkVersionForNNAPI13 &&
IsInt32(input_type)),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float sub.", &val_ctx);
if (IsInt32(input_type)) {
Expect(reinterpret_cast<TfLiteSubParams*>(node->builtin_data)
->activation == kTfLiteActNone,
NNAPIValidationFailureType::kNoActivationExpected,
"No activation function supported", &val_ctx);
}
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank must be <= 4", &val_ctx);
} break;
case kTfLiteBuiltinDiv: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
Expect(context->tensors[node->inputs->data[0]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float div.", &val_ctx);
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Input rank must be <= 4", &val_ctx);
} break;
case kTfLiteBuiltinPad:
case kTfLiteBuiltinPadv2: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
const TfLiteIntArrayView input_shape(
context->tensors[node->inputs->data[0]].dims);
Expect(!HasZeroes(input_shape),
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NN API pad ops do not support input tensors with no elements",
&val_ctx);
Expect(node->inputs->size >= 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expecting at least 2 inputs", &val_ctx);
if (node->inputs->size == 3) {
Expect(
android_sdk_version >= kMinSdkVersionForNNAPI12,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Specification of the padding value is supported from NNAPI 1.2.",
&val_ctx);
} else {
if (android_sdk_version < kMinSdkVersionForNNAPI12) {
Expect(context->tensors[node->inputs->data[0]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Only Float32 inputs are supported before NNAPI 1.2",
&val_ctx);
}
}
} break;
case kTfLiteBuiltinUnidirectionalSequenceRnn: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid version of this op is not supported by NN API.", &val_ctx);
} break;
case kTfLiteBuiltinSpaceToBatchNd: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
} break;
case kTfLiteBuiltinBatchToSpaceNd: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
auto crops = context->tensors[node->inputs->data[2]];
auto crops_data = crops.data.i32;
Expect(crops_data && crops.bytes == 16 && crops_data[0] == 0 &&
crops_data[1] == 0 && crops_data[2] == 0 && crops_data[3] == 0,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"All crops should be 0.", &val_ctx);
} break;
case kTfLiteBuiltinStridedSlice: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
} break;
case kTfLiteBuiltinTranspose: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
Expect((node->inputs->size > 1) &&
(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo),
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"Dynamically-sized tensors not supported.", &val_ctx);
} break;
case kTfLiteBuiltinAbs:
case kTfLiteBuiltinExp:
case kTfLiteBuiltinLog:
case kTfLiteBuiltinPow: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
ExpectIsFloatOperator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinRsqrt: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
if (android_sdk_version < kNNAPIRuntimeFeatureLevel7) {
ExpectIsFloatOperator(context, node, &val_ctx);
} else {
ExpectIsFloatOrQuant8Operator(context, node, &val_ctx);
}
} break;
case kTfLiteBuiltinSlice: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
const auto begin_type = context->tensors[node->inputs->data[1]].type;
const auto size_type = context->tensors[node->inputs->data[2]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8, kTfLiteInt8);
Expect(begin_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Begin type should be Int32", &val_ctx);
Expect(size_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Size type should be Int32", &val_ctx);
} break;
case kTfLiteBuiltinCos:
case kTfLiteBuiltinSin: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
ExpectIsFloatOperator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinTransposeConv: {
ExpectMaxOpVersion(version, 4, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect((node->inputs->size > 1) &&
(context->tensors[node->inputs->data[0]].allocation_type ==
kTfLiteMmapRo) &&
(context->tensors[node->inputs->data[1]].allocation_type ==
kTfLiteMmapRo),
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"Dynamically-sized tensors not supported.", &val_ctx);
} break;
case kTfLiteBuiltinSqrt: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
ExpectIsFloatOperator(context, node, &val_ctx);
} break;
case kTfLiteBuiltinRnn: {
ExpectOpVersion(version, 1, &val_ctx);
Expect(node->inputs->size == 5,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expected 5 input", &val_ctx);
if (node->inputs->size >= 2) {
Expect(
context->tensors[node->inputs->data[ 1]].type ==
kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float32 weights.", &val_ctx);
}
} break;
case kTfLiteBuiltinSpaceToDepth: {
ExpectMaxOpVersion(version, 2, &val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
} break;
case kTfLiteBuiltinSvdf: {
ExpectOpVersion(version, 1, &val_ctx);
Expect(node->inputs->size == 5,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"Expected input of rank 5", &val_ctx);
if (node->inputs->size >= 2) {
Expect(
context->tensors[node->inputs->data[ 1]].type ==
kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float32 weights.", &val_ctx);
}
Expect(android_sdk_version >= kMinSdkVersionForNNAPI11,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"SVDF does not support rank > 1 on NNAPI 1.0.", &val_ctx);
Expect(context->tensors[node->inputs->data[ 1]]
.type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Weights should be Float32", &val_ctx);
} break;
case kTfLiteBuiltinLstm: {
ExpectMaxOpVersion(version, 3, &val_ctx);
Expect(
android_sdk_version >= kMinSdkVersionForNNAPI11,
NNAPIValidationFailureType::kUnsupportedAndroidVersion,
"NNAPI 1.0 has a bug for optional tensors which would affect LSTM.",
&val_ctx);
Expect(android_sdk_version >= kMinSdkVersionForNNAPI12 ||
!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid operators not supported before NNAPI 1.2.", &val_ctx);
const auto weight_input_index =
isLstmBasicKernel(node) ? 2
: 4 ;
const TfLiteType weight_type =
context->tensors[node->inputs->data[weight_input_index]].type;
if (isLstmBasicKernel(node)) {
Expect(weight_type == kTfLiteUInt8,
NNAPIValidationFailureType::kUnsupportedInputType,
"Basic LSTM Kernels support only UINT8 weights", &val_ctx);
const auto input_quantization_params =
context->tensors[node->inputs->data[0]].params;
Expect(input_quantization_params.scale == 1. / 128. &&
input_quantization_params.zero_point == 128,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Invalid input quantization", &val_ctx);
const auto output_quantization_params =
context->tensors[node->outputs->data[0]].params;
Expect(output_quantization_params.scale == 1. / 128. &&
output_quantization_params.zero_point == 128,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Invalid output quantization", &val_ctx);
const auto cell_state_quantization_params =
context->tensors[node->outputs->data[1]].params;
Expect(cell_state_quantization_params.scale == 16. / 32768. ||
cell_state_quantization_params.zero_point == 0,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Invalid cell state quantization", &val_ctx);
auto is_const_tensor = [&node, &context](int tensor_idx) {
return context->tensors[node->inputs->data[tensor_idx]]
.allocation_type == kTfLiteMmapRo;
};
Expect(is_const_tensor(2 ),
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"Weights tensor should be constant", &val_ctx);
Expect(is_const_tensor(3 ),
NNAPIValidationFailureType::kInputTensorShouldHaveConstantShape,
"Biases tensor should be constant", &val_ctx);
return val_ctx.is_valid;
} else {
if (node->inputs->size == 24) {
ExpectMinAndroidSdkVersion(android_sdk_version,
kMinSdkVersionForNNAPI12, &val_ctx);
}
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
Expect(weight_type == kTfLiteFloat32 || weight_type == kTfLiteUInt8 ||
weight_type == kTfLiteInt8,
NNAPIValidationFailureType::kUnsupportedInputType,
"Weight has to be Float32 or UINT8 or INT8", &val_ctx);
} else {
Expect(weight_type == kTfLiteFloat32 || weight_type == kTfLiteUInt8,
NNAPIValidationFailureType::kUnsupportedInputType,
"Weight has to be Float32 or UINT8", &val_ctx);
}
}
} break;
case kTfLiteBuiltinMean: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
if (android_sdk_version >= kMinSdkVersionForNNAPI12) {
Expect(context->tensors[node->inputs->data[0]].type == kTfLiteFloat32 ||
IsQuantized(context->tensors[node->inputs->data[0]].type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Expected Float32 or Quantized input", &val_ctx);
} else {
Expect(context->tensors[node->inputs->data[0]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Expected Float32 input", &val_ctx);
}
Expect(context->tensors[node->outputs->data[0]].dims->size > 0,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI does not support generating a scalar as output for MEAN.",
&val_ctx);
Expect(context->tensors[node->inputs->data[0]].dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support mean of a tensor with rank > 4", &val_ctx);
} break;
case kTfLiteBuiltinEmbeddingLookup: {
ExpectOpVersion(version, 1, &val_ctx);
Expect(context->tensors[node->inputs->data[1]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only support float32 values.", &val_ctx);
} break;
case kTfLiteBuiltinHashtableLookup: {
ExpectOpVersion(version, 1, &val_ctx);
Expect(context->tensors[node->outputs->data[0]].type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI only support float32 output.", &val_ctx);
} break;
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinMinimum: {
ExpectMaxOpVersion(version, 3, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteInt32);
const TfLiteTensor& operand0 = context->tensors[node->inputs->data[0]];
if (operand0.dims->size == 0) {
Expect(operand0.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"Scalar operand should be constant", &val_ctx);
}
const TfLiteTensor& operand1 = context->tensors[node->inputs->data[1]];
if (operand1.dims->size == 0) {
Expect(operand1.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"Scalar operand should be constant", &val_ctx);
}
} break;
case kTfLiteBuiltinCast: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
const TfLiteType output_type =
context->tensors[node->outputs->data[0]].type;
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8, kTfLiteInt8);
ExpectTypeIn(
output_type,
{kTfLiteFloat32, kTfLiteInt32, kTfLiteUInt8, kTfLiteInt8},
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output type should be one of kTfLiteFloat32, kTfLiteInt32, "
"kTfLiteUInt8, kTfLiteInt8.",
&val_ctx);
} else {
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8);
ExpectTypeIn(
output_type, {kTfLiteFloat32, kTfLiteInt32, kTfLiteUInt8},
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output type should be one of kTfLiteFloat32, kTfLiteInt32, "
"kTfLiteUInt8.",
&val_ctx);
}
} break;
case kTfLiteBuiltinLeakyRelu:
case kTfLiteBuiltinPrelu: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
} break;
case kTfLiteBuiltinTile: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt8,
kTfLiteUInt8, kTfLiteInt32);
const auto multipliers_type =
context->tensors[node->inputs->data[1]].type;
Expect(multipliers_type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Multipliers should be Int32", &val_ctx);
} break;
case kTfLiteBuiltinLogicalOr:
case kTfLiteBuiltinLogicalAnd:
case kTfLiteBuiltinLogicalNot: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
Expect(input_type == kTfLiteBool,
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be bool", &val_ctx);
} break;
case kTfLiteBuiltinLess:
case kTfLiteBuiltinLessEqual:
case kTfLiteBuiltinGreater:
case kTfLiteBuiltinGreaterEqual:
case kTfLiteBuiltinEqual:
case kTfLiteBuiltinNotEqual: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteBool, kTfLiteInt32);
} break;
case kTfLiteBuiltinNeg: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32);
} break;
case kTfLiteBuiltinTopkV2: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto& input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8, kTfLiteInt8);
const auto& k_param = context->tensors[node->inputs->data[1]];
Expect(k_param.type == kTfLiteInt32 &&
k_param.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"K param should be a constant of type Int32", &val_ctx);
} break;
case kTfLiteBuiltinSelect: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto value_type = context->tensors[node->inputs->data[1]].type;
EXPECT_INPUT_TYPE_IN(value_type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteUInt8, kTfLiteInt8);
TfLiteIntArray* condition_shape =
context->tensors[node->inputs->data[0]].dims;
TfLiteIntArray* input_shape =
context->tensors[node->inputs->data[1]].dims;
Expect(TfLiteIntArrayEqual(condition_shape, input_shape),
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Condition and inputs tensors should have the same shape",
&val_ctx);
} break;
case kTfLiteBuiltinGather: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
const auto& positions = context->tensors[node->inputs->data[1]];
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteFloat16,
kTfLiteInt32, kTfLiteUInt8, kTfLiteInt8);
Expect(positions.type == kTfLiteInt32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Positions type should be one of kTfLiteInt32", &val_ctx);
Expect(positions.dims->size != 0,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"0-dimension args are not supported by NNAPI.", &val_ctx);
} break;
case kTfLiteBuiltinBidirectionalSequenceLstm: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(!IsHybridOperator(context, builtin_code, node),
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"Hybrid version of this op is not supported by NN API.", &val_ctx);
} break;
case kTfLiteBuiltinExpandDims: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteFloat16,
kTfLiteInt32, kTfLiteUInt8, kTfLiteInt8);
const auto axis = context->tensors[node->inputs->data[1]];
Expect(axis.type == kTfLiteInt32 && axis.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports constant int32 axis tensor.", &val_ctx);
} break;
case kTfLiteBuiltinSplit: {
ExpectOpVersion(version, 3, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const TfLiteTensor& input = context->tensors[node->inputs->data[1]];
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteInt32);
} else {
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt32);
}
const TfLiteTensor& axis = context->tensors[node->inputs->data[0]];
Expect(axis.type == kTfLiteInt32 && axis.allocation_type == kTfLiteMmapRo,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports constant int32 axis tensor.", &val_ctx);
} break;
case kTfLiteBuiltinSplitV: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const TfLiteTensor& input = context->tensors[node->inputs->data[0]];
const TfLiteTensor& size_splits = context->tensors[node->inputs->data[1]];
const TfLiteTensor& axis = context->tensors[node->inputs->data[2]];
EXPECT_INPUT_TYPE_IN(input.type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteInt32);
bool size_splits_is_int32_const_vector =
size_splits.type == kTfLiteInt32 && size_splits.dims->size == 1 &&
size_splits.allocation_type == kTfLiteMmapRo;
bool axis_is_int32_const =
axis.type == kTfLiteInt32 && axis.allocation_type == kTfLiteMmapRo;
Expect(size_splits_is_int32_const_vector,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports constant int32 size_splits vector.",
&val_ctx);
Expect(axis_is_int32_const,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports constant int32 axis tensor.", &val_ctx);
if (size_splits_is_int32_const_vector && axis_is_int32_const) {
Expect(std::all_of(size_splits.data.i32,
size_splits.data.i32 + size_splits.dims->data[0],
[](auto size) { return size != 0; }),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports non-zero split sizes.", &val_ctx);
Expect(ComputeSplitVUnknownSplitSize(context, node) != 0,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports non-zero split sizes.", &val_ctx);
}
} break;
case kTfLiteBuiltinLogSoftmax: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
Expect(input_type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"Input should be Float32.", &val_ctx);
} break;
case kTfLiteBuiltinQuantize: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto value_type = context->tensors[node->inputs->data[0]].type;
Expect(value_type == kTfLiteFloat32 || IsQuantized(value_type),
NNAPIValidationFailureType::kUnsupportedInputType,
"Value should be quantized or Float32.", &val_ctx);
if (IsQuantized(value_type)) {
const auto quantization_params =
context->tensors[node->inputs->data[0]].params;
Expect(quantization_params.scale > 0.f,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Quantization scale should be > 0.", &val_ctx);
}
const auto output_type = context->tensors[node->outputs->data[0]].type;
if (android_sdk_version < kMinSdkVersionForNNAPI13) {
Expect(output_type == kTfLiteUInt8,
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output should be kTfLiteUInt8.", &val_ctx);
} else {
ExpectTypeIn(output_type, {kTfLiteUInt8, kTfLiteInt8},
NNAPIValidationFailureType::kUnsupportedOutputType,
"Output should be kTfLiteUInt8.", &val_ctx);
}
const auto quantization_params =
context->tensors[node->outputs->data[0]].params;
Expect(quantization_params.scale > 0.f,
NNAPIValidationFailureType::kUnsupportedQuantizationParameters,
"Quantization scale should be > 0.", &val_ctx);
} break;
case kTfLiteBuiltinReduceAny: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(context->tensors[node->outputs->data[0]].dims->size != 0,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI does not support generating a scalar as output.", &val_ctx);
} break;
case kTfLiteBuiltinReduceMin:
case kTfLiteBuiltinReduceMax: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
const auto input_tensor = context->tensors[node->inputs->data[0]];
const auto input_type = input_tensor.type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
Expect(input_tensor.dims->size != 0,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI does not support generating a scalar as output.", &val_ctx);
} break;
case kTfLiteBuiltinDepthToSpace: {
const TfLiteType input_type =
context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
} break;
case kTfLiteBuiltinReduceProd:
case kTfLiteBuiltinSum: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI12,
&val_ctx);
Expect(context->tensors[node->outputs->data[0]].dims->size != 0,
NNAPIValidationFailureType::kUnsupportedOutputType,
"NNAPI does not support generating a scalar as output", &val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
Expect(input_type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports floating point input.", &val_ctx);
} break;
case kTfLiteBuiltinElu: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
Expect(input_type == kTfLiteFloat32,
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports floating point input.", &val_ctx);
} break;
case kTfLiteBuiltinFill: {
ExpectOpVersion(version, 1, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto& dims_tensor = context->tensors[node->inputs->data[0]];
Expect(IsConstantTensor(&dims_tensor),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI doesn't support dynamic dimensions tensor.", &val_ctx);
EXPECT_INPUT_TYPE_IN(dims_tensor.type, kTfLiteInt32, kTfLiteInt64);
if (IsConstantTensor(&dims_tensor)) {
Expect(dims_tensor.dims->data[0] != 0,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI doesn't support generating scalars from FILL", &val_ctx);
if (dims_tensor.type == kTfLiteInt64) {
bool fit_in_int32 =
std::all_of(dims_tensor.data.i64,
dims_tensor.data.i64 + dims_tensor.dims->data[0],
[](int64_t dim) {
return std::numeric_limits<int32_t>::min() <= dim &&
dim <= std::numeric_limits<int32_t>::max();
});
Expect(fit_in_int32,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI only supports int32 dimensions tensor. If the "
"dimensions type is int64 and they are constant we can "
"convert them to int32 if the value isn't too large.",
&val_ctx);
}
}
const auto& value_tensor = context->tensors[node->inputs->data[1]];
EXPECT_INPUT_TYPE_IN(value_tensor.type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteInt64);
if (value_tensor.type == kTfLiteInt64 &&
IsConstantTensor(&value_tensor)) {
Expect(
*value_tensor.data.i64 <= std::numeric_limits<int32_t>::max() &&
*value_tensor.data.i64 >= std::numeric_limits<int32_t>::min(),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI only supports int32 input. If the input type is int64 and "
"constant we can convert it to int32 if the value isn't too "
"large.",
&val_ctx);
}
} break;
case kTfLiteBuiltinPack: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
if (android_sdk_version >= kNNAPIRuntimeFeatureLevel6) {
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteInt32, kTfLiteFloat32,
kTfLiteInt8, kTfLiteUInt8);
} else {
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteInt8);
auto builtin = reinterpret_cast<TfLitePackParams*>(node->builtin_data);
Expect(builtin->axis != -1 &&
builtin->axis !=
context->tensors[node->inputs->data[0]].dims->size,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support axis being the last dimension",
&val_ctx);
}
} break;
case kTfLiteBuiltinUnpack: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI13,
&val_ctx);
const auto input_type = context->tensors[node->inputs->data[0]].type;
EXPECT_INPUT_TYPE_IN(input_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8);
Expect(context->tensors[node->inputs->data[0]].dims->size > 1,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support unpacking a rank-1 tensor", &val_ctx);
Expect(context->tensors[node->inputs->data[0]].dims->size <= 4,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support unpacking a tensor with rank > 4",
&val_ctx);
const auto* builtin =
reinterpret_cast<const TfLiteUnpackParams*>(node->builtin_data);
Expect(builtin->axis != -1 &&
builtin->axis !=
context->tensors[node->inputs->data[0]].dims->size - 1,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NNAPI does not support axis being the last dimension", &val_ctx);
} break;
case kTfLiteBuiltinSquaredDifference: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version, kMinSdkVersionForNNAPI11,
&val_ctx);
const auto input0_type = context->tensors[node->inputs->data[0]].type;
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
EXPECT_INPUT_TYPE_IN(input0_type, kTfLiteFloat32, kTfLiteUInt8,
kTfLiteInt8, kTfLiteInt32);
} else if (android_sdk_version >= kMinSdkVersionForNNAPI12) {
EXPECT_INPUT_TYPE_IN(input0_type, kTfLiteFloat32, kTfLiteUInt8);
} else {
EXPECT_INPUT_TYPE_IN(input0_type, kTfLiteFloat32);
}
const int input0_rank =
context->tensors[node->inputs->data[0]].dims->size;
const int input1_rank =
context->tensors[node->inputs->data[1]].dims->size;
Expect(input0_rank <= 4 && input1_rank <= 4,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"NNAPI does not support input rank greater than 4", &val_ctx);
} break;
case kTfLiteBuiltinBatchMatmul: {
ExpectOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version,
kNNAPIRuntimeFeatureLevel6, &val_ctx);
const auto& input0 = context->tensors[node->inputs->data[0]];
const auto& input1 = context->tensors[node->inputs->data[1]];
EXPECT_INPUT_TYPE_IN(input0.type, kTfLiteFloat32, kTfLiteInt32,
kTfLiteInt8);
Expect(input0.type == input1.type,
NNAPIValidationFailureType::kUnsupportedHybridOperator,
"NNAPI does not support hybrid batch matmul", &val_ctx);
Expect(input0.dims->size <= 4 && input0.dims->size >= 2,
NNAPIValidationFailureType::kUnsupportedOperandRank,
"NNAPI does not support input rank greater than 4 or less than 2",
&val_ctx);
Expect(!IsBroadcastBatchMatMul(context, node),
NNAPIValidationFailureType::kUnsupportedInputType,
"NNAPI does not support broadcast batch matmul", &val_ctx);
} break;
case kTfLiteBuiltinMirrorPad: {
ExpectMaxOpVersion(version, 2, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version,
kNNAPIRuntimeFeatureLevel7, &val_ctx);
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
Expect(reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data)
->mode != kTfLiteMirrorPaddingUnknown,
NNAPIValidationFailureType::kUnsupportedOperandValue,
"Unknown padding mode", &val_ctx);
const TfLiteIntArrayView input_shape(
context->tensors[node->inputs->data[0]].dims);
Expect(!HasZeroes(input_shape),
NNAPIValidationFailureType::kUnsupportedOperandValue,
"NN API pad ops do not support input tensors with no elements",
&val_ctx);
Expect(node->inputs->size == 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expecting 2 inputs", &val_ctx);
} break;
case kTfLiteBuiltinReverseV2: {
ExpectMaxOpVersion(version, 3, &val_ctx);
ExpectMinAndroidSdkVersion(android_sdk_version,
kNNAPIRuntimeFeatureLevel7, &val_ctx);
ExpectIsFloatQuant8OrInt32Operator(context, node, &val_ctx);
Expect(node->inputs->size == 2,
NNAPIValidationFailureType::kUnsupportedOperatorVariant,
"Expecting 2 inputs", &val_ctx);
} break;
default:
AddValidationFailure(NNAPIValidationFailureType::kUnsupportedOperator,
"Unsupported operation type.", &val_ctx);
}
return val_ctx.is_valid;
}
TfLiteStatus NNAPIDelegateKernel::Map(
TfLiteContext* context, int builtin_code, int version,
int android_sdk_version, const NNAPIOpMappingArgs& mapping_args,
ANeuralNetworksOperationType* nn_op_type,
NnapiDelegateVendorPlugin* vendor_plugin) {
auto add_zero_bias = [mapping_args](int input_id, int filter_id,
int num_elements) -> void {
int bias_index = -1;
mapping_args.context->AddTensors(mapping_args.context, 1, &bias_index);
TfLiteTensor* bias_tensor = &mapping_args.context->tensors[bias_index];
const auto input_type = mapping_args.context->tensors[input_id].type;
if (input_type == kTfLiteFloat32) {
bias_tensor->type = kTfLiteFloat32;
} else {
bias_tensor->type = kTfLiteInt32;
}
TfLiteIntArray* bias_shape = TfLiteIntArrayCreate(1);
bias_shape->data[0] = num_elements;
bias_tensor->allocation_type = kTfLiteDynamic;
mapping_args.context->ResizeTensor(mapping_args.context, bias_tensor,
bias_shape);
if (input_type == kTfLiteFloat32) {
memset(bias_tensor->data.f, 0, num_elements * sizeof(float));
mapping_args.builder->AddVectorFloat32Operand(bias_tensor->data.f,
num_elements);
} else {
memset(bias_tensor->data.i32, 0, num_elements * sizeof(int));
const TfLiteTensor& input_tensor =
mapping_args.context->tensors[input_id];
const TfLiteTensor& filter_tensor =
mapping_args.context->tensors[filter_id];
bias_tensor->params.scale =
input_tensor.params.scale * filter_tensor.params.scale;
mapping_args.builder->AddVectorInt32Operand(
bias_tensor->data.i32, num_elements, bias_tensor->params.scale,
0);
}
};
switch (builtin_code) {
case kTfLiteBuiltinAdd: {
auto builtin =
reinterpret_cast<TfLiteAddParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_ADD;
} break;
case kTfLiteBuiltinArgMax: {
*nn_op_type = ANEURALNETWORKS_ARGMAX;
} break;
case kTfLiteBuiltinArgMin: {
*nn_op_type = ANEURALNETWORKS_ARGMIN;
} break;
case kTfLiteBuiltinMul: {
auto builtin =
reinterpret_cast<TfLiteMulParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_MUL;
} break;
case kTfLiteBuiltinAveragePool2d: {
mapping_args.builder->AddPoolingParams(mapping_args.node->builtin_data);
*nn_op_type = ANEURALNETWORKS_AVERAGE_POOL_2D;
} break;
case kTfLiteBuiltinMaxPool2d: {
mapping_args.builder->AddPoolingParams(mapping_args.node->builtin_data);
*nn_op_type = ANEURALNETWORKS_MAX_POOL_2D;
} break;
case kTfLiteBuiltinL2Pool2d: {
mapping_args.builder->AddPoolingParams(mapping_args.node->builtin_data);
*nn_op_type = ANEURALNETWORKS_L2_POOL_2D;
} break;
case kTfLiteBuiltinConv2d: {
auto builtin =
reinterpret_cast<TfLiteConvParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->padding);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
const int input_id = mapping_args.node->inputs->data[ 0];
const int filter_id =
mapping_args.node->inputs->data[ 1];
const auto& input_tensor = context->tensors[input_id];
const auto& filter_tensor = context->tensors[filter_id];
auto is_grouped_conv = false;
if (input_tensor.dims->size != 0 && filter_tensor.dims->size != 0) {
is_grouped_conv =
input_tensor.dims->data[3] != filter_tensor.dims->data[3];
}
if (is_grouped_conv) {
mapping_args.builder->AddScalarInt32Operand(
input_tensor.dims->data[3] / filter_tensor.dims->data[3]);
}
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
if (builtin->dilation_width_factor != 1 ||
builtin->dilation_height_factor != 1) {
mapping_args.builder->AddScalarBoolOperand(false);
mapping_args.builder->AddScalarInt32Operand(
builtin->dilation_width_factor);
mapping_args.builder->AddScalarInt32Operand(
builtin->dilation_height_factor);
}
if (is_grouped_conv) {
*nn_op_type = ANEURALNETWORKS_GROUPED_CONV_2D;
} else {
*nn_op_type = ANEURALNETWORKS_CONV_2D;
}
} break;
case kTfLiteBuiltinDepthwiseConv2d: {
auto builtin = reinterpret_cast<TfLiteDepthwiseConvParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->padding);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
mapping_args.builder->AddScalarInt32Operand(builtin->depth_multiplier);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
if (builtin->dilation_width_factor != 1 ||
builtin->dilation_height_factor != 1) {
mapping_args.builder->AddScalarBoolOperand(false);
mapping_args.builder->AddScalarInt32Operand(
builtin->dilation_width_factor);
mapping_args.builder->AddScalarInt32Operand(
builtin->dilation_height_factor);
}
*nn_op_type = ANEURALNETWORKS_DEPTHWISE_CONV_2D;
} break;
case kTfLiteBuiltinFullyConnected: {
const bool is_bias_present =
mapping_args.node->inputs->size == 3 &&
mapping_args.node->inputs->data[2] != kTfLiteOptionalTensor;
if (!is_bias_present) {
const int input_tensor_id =
mapping_args.node->inputs->data[ 0];
const int filter_tensor_id =
mapping_args.node->inputs->data[ 1];
const int num_units =
mapping_args.context->tensors[filter_tensor_id].dims->data[0];
add_zero_bias(input_tensor_id, filter_tensor_id, num_units);
}
auto builtin = reinterpret_cast<TfLiteFullyConnectedParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_FULLY_CONNECTED;
} break;
case kTfLiteBuiltinHardSwish: {
*nn_op_type = ANEURALNETWORKS_HARD_SWISH;
} break;
case kTfLiteBuiltinSoftmax: {
auto builtin = reinterpret_cast<TfLiteSoftmaxParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
*nn_op_type = ANEURALNETWORKS_SOFTMAX;
} break;
case kTfLiteBuiltinReshape: {
if (mapping_args.node->inputs->size == 1) {
auto* params = reinterpret_cast<TfLiteReshapeParams*>(
mapping_args.node->builtin_data);
int num_dimensions = params->num_dimensions;
std::vector<int32_t> output_shape(num_dimensions);
for (int i = 0; i < num_dimensions; ++i) {
output_shape[i] = params->shape[i];
}
mapping_args.builder->AddVectorInt32Operand(
output_shape.data(), static_cast<uint32_t>(num_dimensions));
}
*nn_op_type = ANEURALNETWORKS_RESHAPE;
} break;
case kTfLiteBuiltinResizeBilinear: {
const int output_id = mapping_args.node->outputs->data[0];
auto& output = mapping_args.context->tensors[output_id];
const int output_height = output.dims->data[1];
const int output_width = output.dims->data[2];
mapping_args.builder->AddScalarInt32Operand(output_width);
mapping_args.builder->AddScalarInt32Operand(output_height);
auto builtin = reinterpret_cast<TfLiteResizeBilinearParams*>(
mapping_args.node->builtin_data);
if (builtin->align_corners == true ||
builtin->half_pixel_centers == true) {
mapping_args.builder->AddScalarBoolOperand(false);
mapping_args.builder->AddScalarBoolOperand(builtin->align_corners);
mapping_args.builder->AddScalarBoolOperand(builtin->half_pixel_centers);
}
*nn_op_type = ANEURALNETWORKS_RESIZE_BILINEAR;
} break;
case kTfLiteBuiltinResizeNearestNeighbor: {
const TfLiteTensor& new_shape =
mapping_args.context->tensors[mapping_args.node->inputs->data[1]];
mapping_args.builder->AddScalarInt32Operand(new_shape.data.i32[1]);
mapping_args.builder->AddScalarInt32Operand(new_shape.data.i32[0]);
mapping_args.builder->AddScalarBoolOperand(false);
auto builtin = reinterpret_cast<TfLiteResizeNearestNeighborParams*>(
mapping_args.node->builtin_data);
if (builtin->align_corners == true ||
builtin->half_pixel_centers == true) {
mapping_args.builder->AddScalarBoolOperand(builtin->align_corners);
mapping_args.builder->AddScalarBoolOperand(builtin->half_pixel_centers);
}
*nn_op_type = ANEURALNETWORKS_RESIZE_NEAREST_NEIGHBOR;
} break;
case kTfLiteBuiltinSqueeze: {
auto builtin = reinterpret_cast<TfLiteSqueezeParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddVectorInt32Operand(
builtin->num_squeeze_dims ? builtin->squeeze_dims : nullptr,
static_cast<uint32_t>(builtin->num_squeeze_dims));
*nn_op_type = ANEURALNETWORKS_SQUEEZE;
} break;
case kTfLiteBuiltinUnidirectionalSequenceLstm: {
auto builtin = reinterpret_cast<TfLiteUnidirectionalSequenceLSTMParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);
mapping_args.builder->AddScalarBoolOperand(builtin->time_major);
const bool hybrid_op = IsHybridOperator(
mapping_args.context, kTfLiteBuiltinUnidirectionalSequenceLstm,
mapping_args.node);
if (mapping_args.node->inputs->size == 24) {
for (int i = 20; i < 24; ++i) {
const int input_index = mapping_args.node->inputs->data[i];
if (input_index != kTfLiteOptionalTensor) {
mapping_args.builder->AddTensorInput(input_index, hybrid_op);
} else {
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
}
} else {
for (int i = 0; i < 4; ++i) {
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
}
*nn_op_type = ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_LSTM;
} break;
case kTfLiteBuiltinL2Normalization: {
*nn_op_type = ANEURALNETWORKS_L2_NORMALIZATION;
} break;
case kTfLiteBuiltinLocalResponseNormalization: {
auto builtin = reinterpret_cast<TfLiteLocalResponseNormParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->radius);
mapping_args.builder->AddScalarFloat32Operand(builtin->bias);
mapping_args.builder->AddScalarFloat32Operand(builtin->alpha);
mapping_args.builder->AddScalarFloat32Operand(builtin->beta);
*nn_op_type = ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION;
} break;
case kTfLiteBuiltinLshProjection: {
auto builtin = reinterpret_cast<TfLiteLSHProjectionParams*>(
mapping_args.node->builtin_data);
int type = builtin->type;
const int kNNAPILshProjectionSparse = 3;
if (builtin->type == kTfLiteLshProjectionSparse) {
type = kNNAPILshProjectionSparse;
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
mapping_args.builder->AddScalarInt32Operand(type);
*nn_op_type = ANEURALNETWORKS_LSH_PROJECTION;
} break;
case kTfLiteBuiltinConcatenation: {
auto builtin = reinterpret_cast<TfLiteConcatenationParams*>(
mapping_args.node->builtin_data);
int axis = builtin->axis < 0
? mapping_args.context
->tensors[mapping_args.node->inputs->data[0]]
.dims->size +
builtin->axis
: builtin->axis;
mapping_args.builder->AddScalarInt32Operand(axis);
*nn_op_type = ANEURALNETWORKS_CONCATENATION;
} break;
case kTfLiteBuiltinDequantize: {
*nn_op_type = ANEURALNETWORKS_DEQUANTIZE;
} break;
case kTfLiteBuiltinFloor: {
*nn_op_type = ANEURALNETWORKS_FLOOR;
} break;
case kTfLiteBuiltinRelu: {
*nn_op_type = ANEURALNETWORKS_RELU;
} break;
case kTfLiteBuiltinReluN1To1: {
*nn_op_type = ANEURALNETWORKS_RELU1;
} break;
case kTfLiteBuiltinRelu6: {
*nn_op_type = ANEURALNETWORKS_RELU6;
} break;
case kTfLiteBuiltinLogistic: {
*nn_op_type = ANEURALNETWORKS_LOGISTIC;
} break;
case kTfLiteBuiltinTanh: {
*nn_op_type = ANEURALNETWORKS_TANH;
} break;
case kTfLiteBuiltinSub: {
auto builtin =
reinterpret_cast<TfLiteSubParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_SUB;
} break;
case kTfLiteBuiltinDiv: {
auto builtin =
reinterpret_cast<TfLiteDivParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_DIV;
} break;
case kTfLiteBuiltinPad:
case kTfLiteBuiltinPadv2: {
if (mapping_args.node->inputs->size == 2) {
*nn_op_type = ANEURALNETWORKS_PAD;
} else {
const int constant_value_id = mapping_args.node->inputs->data[2];
if (constant_value_id == kTfLiteOptionalTensor) {
*nn_op_type = ANEURALNETWORKS_PAD;
} else {
*nn_op_type = ANEURALNETWORKS_PAD_V2;
}
}
} break;
case kTfLiteBuiltinUnidirectionalSequenceRnn: {
auto builtin = reinterpret_cast<TfLiteSequenceRNNParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarInt32Operand(builtin->time_major);
*nn_op_type = ANEURALNETWORKS_UNIDIRECTIONAL_SEQUENCE_RNN;
} break;
case kTfLiteBuiltinSpaceToBatchNd: {
*nn_op_type = ANEURALNETWORKS_SPACE_TO_BATCH_ND;
} break;
case kTfLiteBuiltinBatchToSpaceNd: {
*nn_op_type = ANEURALNETWORKS_BATCH_TO_SPACE_ND;
} break;
case kTfLiteBuiltinStridedSlice: {
auto builtin = reinterpret_cast<TfLiteStridedSliceParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->begin_mask);
mapping_args.builder->AddScalarInt32Operand(builtin->end_mask);
mapping_args.builder->AddScalarInt32Operand(builtin->shrink_axis_mask);
*nn_op_type = ANEURALNETWORKS_STRIDED_SLICE;
} break;
case kTfLiteBuiltinTranspose: {
*nn_op_type = ANEURALNETWORKS_TRANSPOSE;
} break;
case kTfLiteBuiltinAbs: {
*nn_op_type = ANEURALNETWORKS_ABS;
} break;
case kTfLiteBuiltinExp: {
*nn_op_type = ANEURALNETWORKS_EXP;
} break;
case kTfLiteBuiltinLog: {
*nn_op_type = ANEURALNETWORKS_LOG;
} break;
case kTfLiteBuiltinRsqrt: {
*nn_op_type = ANEURALNETWORKS_RSQRT;
} break;
case kTfLiteBuiltinPow: {
*nn_op_type = ANEURALNETWORKS_POW;
} break;
case kTfLiteBuiltinSlice: {
*nn_op_type = ANEURALNETWORKS_SLICE;
} break;
case kTfLiteBuiltinCos: {
*nn_op_type = ANEURALNETWORKS_SIN;
} break;
case kTfLiteBuiltinSin: {
*nn_op_type = ANEURALNETWORKS_SIN;
} break;
case kTfLiteBuiltinTransposeConv: {
int input_tensor_flags = 0;
const int input_tensor_id =
mapping_args.node->inputs->data[ 2];
const int weight_tensor_id =
mapping_args.node->inputs->data[ 1];
const bool hybrid_op = false;
if (android_sdk_version >= kMinSdkVersionForNNAPI13) {
mapping_args.builder->AddTensorInput(
input_tensor_id, hybrid_op,
input_tensor_flags | NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED);
} else {
mapping_args.builder->AddTensorInput(
input_tensor_id, hybrid_op,
input_tensor_flags | NN_TENSOR_FLAG_INT8_CONVERSION);
}
mapping_args.builder->AddTensorInput(
weight_tensor_id, hybrid_op,
input_tensor_flags | NN_TENSOR_FLAG_FORCE_PER_CHANNEL);
const bool is_bias_present =
mapping_args.node->inputs->size == 4 &&
mapping_args.node->inputs->data[ 3] !=
kTfLiteOptionalTensor;
if (is_bias_present) {
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[ 3], hybrid_op);
} else {
const TfLiteTensor& output_shape =
mapping_args.context->tensors[mapping_args.node->inputs
->data[ 0]];
const int output_depth = output_shape.data.i32[3];
add_zero_bias(input_tensor_id, weight_tensor_id, output_depth);
}
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[ 0], hybrid_op);
auto builtin = reinterpret_cast<TfLiteTransposeConvParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->padding);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_width);
mapping_args.builder->AddScalarInt32Operand(builtin->stride_height);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarBoolOperand(false);
*nn_op_type = ANEURALNETWORKS_TRANSPOSE_CONV;
} break;
case kTfLiteBuiltinSqrt: {
*nn_op_type = ANEURALNETWORKS_SQRT;
} break;
case kTfLiteBuiltinRnn: {
int ann_index;
mapping_args.builder->AddStateFloat32Tensor(
mapping_args.node->inputs->data[ 4],
&ann_index);
mapping_args.model_state_outputs->push_back(ann_index);
mapping_args.model_state_tfl_inputs->push_back(
mapping_args.node->inputs->data[ 4]);
auto builtin =
reinterpret_cast<TfLiteRNNParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_RNN;
} break;
case kTfLiteBuiltinSpaceToDepth: {
auto builtin = reinterpret_cast<TfLiteSpaceToDepthParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->block_size);
*nn_op_type = ANEURALNETWORKS_SPACE_TO_DEPTH;
} break;
case kTfLiteBuiltinSvdf: {
int ann_index;
mapping_args.builder->AddStateFloat32Tensor(
mapping_args.node->inputs->data[ 4],
&ann_index);
mapping_args.model_state_outputs->push_back(ann_index);
mapping_args.model_state_tfl_inputs->push_back(
mapping_args.node->inputs->data[ 4]);
auto builtin =
reinterpret_cast<TfLiteSVDFParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->rank);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
*nn_op_type = ANEURALNETWORKS_SVDF;
} break;
case kTfLiteBuiltinLstm: {
if (isLstmBasicKernel(mapping_args.node)) {
const auto output_dims =
mapping_args.context->tensors[mapping_args.node->outputs->data[1]]
.dims;
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[0 ],
false,
false);
const auto weight_tensor =
mapping_args.context->tensors[mapping_args.node->inputs
->data[2 ]];
std::vector<uint8_t> recurrent_to_input;
std::vector<uint8_t> input_to_input;
std::vector<uint8_t> recurrent_to_cell;
std::vector<uint8_t> input_to_cell;
std::vector<uint8_t> recurrent_to_forget;
std::vector<uint8_t> input_to_forget;
std::vector<uint8_t> recurrent_to_output;
std::vector<uint8_t> input_to_output;
tflite::delegate::nnapi::DecomposeQuantLstmWeightsTensor(
weight_tensor.data.uint8, weight_tensor.dims, &recurrent_to_input,
&input_to_input, &recurrent_to_cell, &input_to_cell,
&recurrent_to_forget, &input_to_forget, &recurrent_to_output,
&input_to_output);
TfLiteIntArray* recurrent_weight_dims = TfLiteIntArrayCreate(2);
TfLiteIntArray* input_weight_dims = TfLiteIntArrayCreate(2);
tflite::delegate::nnapi::SetWeightSubmatrixDims(
weight_tensor.dims, recurrent_weight_dims, input_weight_dims);
int new_tensor_index = -1;
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
input_weight_dims, input_to_input, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
input_weight_dims, input_to_forget, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
input_weight_dims, input_to_cell, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
input_weight_dims, input_to_output, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
recurrent_weight_dims, recurrent_to_input, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
recurrent_weight_dims, recurrent_to_forget, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
recurrent_weight_dims, recurrent_to_cell, weight_tensor.params,
&new_tensor_index);
mapping_args.builder->AddNewInputConstantTensor<uint8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
recurrent_weight_dims, recurrent_to_output, weight_tensor.params,
&new_tensor_index);
TfLiteIntArrayFree(input_weight_dims);
TfLiteIntArrayFree(recurrent_weight_dims);
const auto bias_size = output_dims->data[1];
const TfLiteTensor& biases_tensor =
mapping_args.context->tensors[mapping_args.node->inputs
->data[3 ]];
std::vector<int32_t> input_bias;
std::vector<int32_t> cell_bias;
std::vector<int32_t> forget_bias;
std::vector<int32_t> output_bias;
delegate::nnapi::DecomposeBiasTensor(biases_tensor.data.i32, bias_size,
&input_bias, &cell_bias,
&forget_bias, &output_bias);
int input_bias_tensor = -1;
mapping_args.builder->AddNewInputConstantTensor<int32_t>(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size}, input_bias,
biases_tensor.params, &input_bias_tensor);
int forget_bias_tensor = -1;
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size},
forget_bias, biases_tensor.params, &forget_bias_tensor);
int cell_gate_bias_tensor = -1;
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size}, cell_bias,
biases_tensor.params, &cell_gate_bias_tensor);
int output_gate_bias_tensor = -1;
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, {bias_size},
output_bias, biases_tensor.params, &output_gate_bias_tensor);
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[4 ],
false,
false);
mapping_args.builder->AddTensorInput(
mapping_args.node->inputs->data[1 ],
false,
false);
mapping_args.feedback_loops->push_back(std::make_tuple(
mapping_args.node->outputs->data[0 ],
mapping_args.node->inputs->data[1 ]));
mapping_args.feedback_loops->push_back(std::make_tuple(
mapping_args.node->outputs->data[1 ],
mapping_args.node->inputs->data[4 ]));
mapping_args.builder->AddTensorOutput(
mapping_args.node->outputs->data[1 ], 0);
mapping_args.builder->AddTensorOutput(
mapping_args.node->outputs->data[0 ], 0);
*nn_op_type = ANEURALNETWORKS_QUANTIZED_16BIT_LSTM;
} else {
auto builtin = reinterpret_cast<TfLiteLSTMParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);
mapping_args.builder->AddAdditionalFloat32OutputTensor(2);
int ann_index;
mapping_args.builder->AddStateFloat32Tensor(
mapping_args.node->inputs->data[ 18],
&ann_index);
mapping_args.model_state_outputs->push_back(ann_index);
mapping_args.model_state_tfl_inputs->push_back(
mapping_args.node->inputs
->data[ 18]);
mapping_args.builder->AddStateFloat32Tensor(
mapping_args.node->inputs->data[ 19],
&ann_index);
mapping_args.model_state_outputs->push_back(ann_index);
mapping_args.model_state_tfl_inputs->push_back(
mapping_args.node->inputs->data[ 19]);
const bool hybrid_op = IsHybridOperator(
mapping_args.context, kTfLiteBuiltinLstm, mapping_args.node);
if (mapping_args.node->inputs->size == 24) {
for (int i = 20; i < 24; ++i) {
const auto input_index = mapping_args.node->inputs->data[i];
if (input_index != kTfLiteOptionalTensor) {
mapping_args.builder->AddTensorInput(input_index, hybrid_op);
} else {
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
}
}
*nn_op_type = ANEURALNETWORKS_LSTM;
}
} break;
case kTfLiteBuiltinMean: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
int32_t keep_dims = 0;
if (builtin->keep_dims) keep_dims = 1;
mapping_args.builder->AddScalarInt32Operand(keep_dims);
*nn_op_type = ANEURALNETWORKS_MEAN;
} break;
case kTfLiteBuiltinEmbeddingLookup: {
*nn_op_type = ANEURALNETWORKS_EMBEDDING_LOOKUP;
} break;
case kTfLiteBuiltinHashtableLookup: {
*nn_op_type = ANEURALNETWORKS_HASHTABLE_LOOKUP;
} break;
case kTfLiteBuiltinMaximum: {
*nn_op_type = ANEURALNETWORKS_MAXIMUM;
} break;
case kTfLiteBuiltinMinimum: {
*nn_op_type = ANEURALNETWORKS_MINIMUM;
} break;
case kTfLiteBuiltinCast: {
*nn_op_type = ANEURALNETWORKS_CAST;
} break;
case kTfLiteBuiltinLeakyRelu: {
const auto input_type =
mapping_args.context->tensors[mapping_args.node->inputs->data[0]]
.type;
auto builtin = reinterpret_cast<TfLiteLeakyReluParams*>(
mapping_args.node->builtin_data);
TfLiteTensor alpha_tensor;
alpha_tensor.type = input_type;
alpha_tensor.allocation_type = kTfLiteDynamic;
alpha_tensor.dims = TfLiteIntArrayCreate(1);
alpha_tensor.dims->data[0] = 1;
alpha_tensor.params.zero_point = 0;
int new_tensor_index = -1;
if (input_type == kTfLiteFloat32) {
alpha_tensor.params.scale = 0;
std::vector<float> alpha_value = {builtin->alpha};
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32, alpha_tensor.dims,
alpha_value, alpha_tensor.params, &new_tensor_index);
} else if (input_type == kTfLiteInt8 &&
android_sdk_version >= kMinSdkVersionForNNAPI13) {
alpha_tensor.params.scale = builtin->alpha;
std::vector<int8_t> alpha_value = {1};
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, kTfLiteInt8,
alpha_tensor.dims, alpha_value, alpha_tensor.params,
&new_tensor_index);
} else {
alpha_tensor.params.scale = builtin->alpha;
std::vector<uint8_t> alpha_value = {1};
mapping_args.builder->AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, kTfLiteUInt8,
alpha_tensor.dims, alpha_value, alpha_tensor.params,
&new_tensor_index);
}
*nn_op_type = ANEURALNETWORKS_PRELU;
} break;
case kTfLiteBuiltinPrelu: {
*nn_op_type = ANEURALNETWORKS_PRELU;
} break;
case kTfLiteBuiltinTile: {
*nn_op_type = ANEURALNETWORKS_TILE;
} break;
case kTfLiteBuiltinLogicalOr: {
*nn_op_type = ANEURALNETWORKS_LOGICAL_OR;
} break;
case kTfLiteBuiltinLogicalAnd: {
*nn_op_type = ANEURALNETWORKS_LOGICAL_AND;
} break;
case kTfLiteBuiltinLogicalNot: {
*nn_op_type = ANEURALNETWORKS_LOGICAL_NOT;
} break;
case kTfLiteBuiltinLess: {
*nn_op_type = ANEURALNETWORKS_LESS;
} break;
case kTfLiteBuiltinLessEqual: {
*nn_op_type = ANEURALNETWORKS_LESS_EQUAL;
} break;
case kTfLiteBuiltinGreater: {
*nn_op_type = ANEURALNETWORKS_GREATER;
} break;
case kTfLiteBuiltinGreaterEqual: {
*nn_op_type = ANEURALNETWORKS_GREATER_EQUAL;
} break;
case kTfLiteBuiltinEqual: {
*nn_op_type = ANEURALNETWORKS_EQUAL;
} break;
case kTfLiteBuiltinNotEqual: {
*nn_op_type = ANEURALNETWORKS_NOT_EQUAL;
} break;
case kTfLiteBuiltinNeg: {
*nn_op_type = ANEURALNETWORKS_NEG;
} break;
case kTfLiteBuiltinTopkV2: {
const TfLiteTensor& k_param =
mapping_args.context->tensors[mapping_args.node->inputs->data[1]];
mapping_args.builder->AddScalarInt32Operand(*k_param.data.i32);
*nn_op_type = ANEURALNETWORKS_TOPK_V2;
} break;
case kTfLiteBuiltinSelect: {
*nn_op_type = ANEURALNETWORKS_SELECT;
} break;
case kTfLiteBuiltinGather: {
auto builtin = reinterpret_cast<TfLiteGatherParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->axis);
mapping_args.builder->AddTensorInput(mapping_args.node->inputs->data[1],
false,
0);
*nn_op_type = ANEURALNETWORKS_GATHER;
} break;
case kTfLiteBuiltinBidirectionalSequenceLstm: {
auto builtin = reinterpret_cast<TfLiteBidirectionalSequenceLSTMParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->activation);
mapping_args.builder->AddScalarFloat32Operand(builtin->cell_clip);
mapping_args.builder->AddScalarFloat32Operand(builtin->proj_clip);
mapping_args.builder->AddScalarBoolOperand(builtin->merge_outputs);
mapping_args.builder->AddScalarBoolOperand(builtin->time_major);
for (int i = 0; i < 8; ++i) {
mapping_args.builder->AddVectorFloat32Operand(nullptr, 0);
}
*nn_op_type = ANEURALNETWORKS_BIDIRECTIONAL_SEQUENCE_LSTM;
} break;
case kTfLiteBuiltinExpandDims: {
const TfLiteTensor& axis_param =
mapping_args.context->tensors[mapping_args.node->inputs->data[1]];
mapping_args.builder->AddScalarInt32Operand(*axis_param.data.i32);
*nn_op_type = ANEURALNETWORKS_EXPAND_DIMS;
} break;
case kTfLiteBuiltinSplit: {
const TfLiteTensor& axis =
mapping_args.context->tensors[mapping_args.node->inputs->data[0]];
auto builtin =
reinterpret_cast<TfLiteSplitParams*>(mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(*axis.data.i32);
mapping_args.builder->AddScalarInt32Operand(builtin->num_splits);
*nn_op_type = ANEURALNETWORKS_SPLIT;
} break;
case kTfLiteBuiltinLogSoftmax: {
mapping_args.builder->AddScalarFloat32Operand(1);
mapping_args.builder->AddScalarInt32Operand(-1);
*nn_op_type = ANEURALNETWORKS_LOG_SOFTMAX;
} break;
case kTfLiteBuiltinQuantize: {
auto input_index = mapping_args.node->inputs->data[0];
if (IsQuantized(mapping_args.context->tensors[input_index].type)) {
mapping_args.builder->AddDequantize(0, input_index, kTfLiteFloat32,
mapping_args.node_index);
}
*nn_op_type = ANEURALNETWORKS_QUANTIZE;
} break;
case kTfLiteBuiltinReduceAny: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_ANY;
} break;
case kTfLiteBuiltinReduceMin: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_MIN;
} break;
case kTfLiteBuiltinReduceMax: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_MAX;
} break;
case kTfLiteBuiltinDepthToSpace: {
auto builtin = reinterpret_cast<TfLiteDepthToSpaceParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarInt32Operand(builtin->block_size);
*nn_op_type = ANEURALNETWORKS_DEPTH_TO_SPACE;
} break;
case kTfLiteBuiltinReduceProd: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_PROD;
} break;
case kTfLiteBuiltinSum: {
auto builtin = reinterpret_cast<TfLiteReducerParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->keep_dims);
*nn_op_type = ANEURALNETWORKS_REDUCE_SUM;
} break;
case kTfLiteBuiltinElu: {
mapping_args.builder->AddScalarFloat32Operand(1.0);
*nn_op_type = ANEURALNETWORKS_ELU;
} break;
case kTfLiteBuiltinFill: {
*nn_op_type = ANEURALNETWORKS_FILL;
} break;
case kTfLiteBuiltinBatchMatmul: {
auto builtin = reinterpret_cast<TfLiteBatchMatMulParams*>(
mapping_args.node->builtin_data);
mapping_args.builder->AddScalarBoolOperand(builtin->adj_x);
mapping_args.builder->AddScalarBoolOperand(builtin->adj_y);
*nn_op_type = ANEURALNETWORKS_BATCH_MATMUL;
} break;
case kTfLiteBuiltinPack: {
*nn_op_type = ANEURALNETWORKS_PACK;
} break;
case kTfLiteBuiltinMirrorPad: {
constexpr int kNnapiModeReflect = 0;
constexpr int kNnapiModeSymmetric = 1;
auto builtin = reinterpret_cast<TfLiteMirrorPaddingParams*>(
mapping_args.node->builtin_data);
int32_t nn_mirror_mode = -1;
if (builtin->mode == kTfLiteMirrorPaddingReflect) {
nn_mirror_mode = kNnapiModeReflect;
} else if (builtin->mode == kTfLiteMirrorPaddingSymmetric) {
nn_mirror_mode = kNnapiModeSymmetric;
}
mapping_args.builder->AddScalarInt32Operand(nn_mirror_mode);
*nn_op_type = ANEURALNETWORKS_MIRROR_PAD;
} break;
case kTfLiteBuiltinReverseV2: {
*nn_op_type = ANEURALNETWORKS_REVERSE;
} break;
default:
return kTfLiteError;
}
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::Init(TfLiteContext* context,
const TfLiteDelegateParams* params,
int* nnapi_errno) {
for (auto node_index : TfLiteIntArrayView(params->nodes_to_replace)) {
nodes_.push_back(node_index);
}
densify_output_to_node_mapping_ = std::vector<int>(context->tensors_size, -1);
non_const_dequantize_output_to_node_mapping_ =
std::vector<int>(context->tensors_size, -1);
const auto delegate_options =
StatefulNnApiDelegate::GetOptions(params->delegate);
if (nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI12 &&
ShouldUseTargetDevices(delegate_options, nnapi_)) {
TF_LITE_ENSURE_STATUS(GetTargetDevices(context, params->delegate, nnapi_,
nnapi_errno, &nnapi_devices_));
if (nnapi_devices_.empty()) {
TF_LITE_KERNEL_LOG(
context, "NNAPI delegate requested but no accelerators available.");
return kTfLiteError;
}
if (!delegate_options.disable_debugging_diagnostics_callbacks) {
if (nnapi_->SL_ANeuralNetworksDiagnostic_registerCallbacks != nullptr) {
nnapi_->SL_ANeuralNetworksDiagnostic_registerCallbacks(
[](const void* nnapi,
const ANeuralNetworksDiagnosticCompilationInfo* info) {
return LogCompilationInfoOnce(static_cast<const NnApi*>(nnapi),
info);
},
[](const void* nnapi,
const ANeuralNetworksDiagnosticExecutionInfo* info) {
return LogExecutionInfoOnce(static_cast<const NnApi*>(nnapi),
info);
},
const_cast<NnApi*>(nnapi_));
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Registered diagnostics callbacks in NNAPI SL driver"
"SL_ANeuralNetworksDiagnostic_registerCallbacks.");
} else {
TFLITE_LOG_PROD(TFLITE_LOG_WARNING,
"NNAPI SL driver did not implement "
"SL_ANeuralNetworksDiagnostic_registerCallbacks!");
}
}
}
if (nnapi_->android_sdk_version < kMinSdkVersionForNNAPI12 &&
delegate_options.allow_dynamic_dimensions &&
delegate_options.vendor_plugin != nullptr) {
TF_LITE_KERNEL_LOG(context,
"Models with dynamic dimensions and vendor plugin is "
"not supported before NNAPI 1.2 (API level 29).");
return kTfLiteError;
}
tensor_memory_map_ =
&StatefulNnApiDelegate::GetTensorMemoryMap(params->delegate);
tensor_max_size_hints_.resize(context->tensors_size, 0);
for (const auto it : delegate_options.tensor_max_size_hints) {
auto tensor_index = it.first;
if (tensor_index >= context->tensors_size || tensor_index < 0) continue;
if (!HasUnspecifiedDimension(&context->tensors[tensor_index])) continue;
auto max_size_hint = it.second;
tensor_max_size_hints_[tensor_index] = max_size_hint;
}
if (!nn_model_) {
ANeuralNetworksModel* model = nullptr;
RETURN_TFLITE_ERROR_IF_NN_ERROR(context,
nnapi_->ANeuralNetworksModel_create(&model),
"creating NNAPI model", nnapi_errno);
nn_model_.reset(model);
TF_LITE_ENSURE_STATUS(BuildGraph(context, delegate_options,
params->input_tensors,
params->output_tensors, nnapi_errno));
}
auto* cache = StatefulNnApiDelegate::GetCache(params->delegate);
if (cache) {
uint64_t token_parts[4];
auto partition_entry = cache->GetEntryForKernel(kNnapiId, context, params);
token_parts[0] = partition_entry.GetFingerprint();
token_parts[1] = partition_entry.GetFingerprint();
token_parts[2] = partition_entry.GetFingerprint();
token_parts[3] = partition_entry.GetFingerprint();
std::vector<uint8_t> nnapi_cache_token(33, 0);
uint8_t* p = reinterpret_cast<uint8_t*>(token_parts);
for (int i = 0; i < 4 * sizeof(uint64_t); i++) {
nnapi_cache_token[i] = p[i];
}
nn_compilation_cache_token_ = nnapi_cache_token;
}
nn_execution_cache_.SetMaxCacheSize(
delegate_options.max_execution_cache_size);
initialised_ = true;
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::Prepare(TfLiteContext* context,
TfLiteNode* node, int* nnapi_errno) {
if (!initialised_) {
return kTfLiteError;
}
const auto delegate_options =
StatefulNnApiDelegate::GetOptions(node->delegate);
if (nn_compilation_) {
return kTfLiteOk;
}
ANeuralNetworksCompilation* compilation = nullptr;
if (!nnapi_devices_.empty()) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksCompilation_createForDevices(
nn_model_.get(), nnapi_devices_.data(), nnapi_devices_.size(),
&compilation),
"creating NNAPI model for given devices", nnapi_errno);
} else {
if (nnapi_->ANeuralNetworksCompilation_create != nullptr) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(context,
nnapi_->ANeuralNetworksCompilation_create(
nn_model_.get(), &compilation),
"creating NNAPI compilation",
nnapi_errno);
} else {
TF_LITE_KERNEL_LOG(
context,
"Attempted to call ANeuralNetworksCompilation_create from NNAPI "
"delegate that is constructed from a support library");
return kTfLiteError;
}
}
auto preference = delegate_options.execution_preference;
if (preference !=
StatefulNnApiDelegate::Options::ExecutionPreference::kUndefined) {
const int preference_result =
nnapi_->ANeuralNetworksCompilation_setPreference(compilation,
preference);
if (preference_result != ANEURALNETWORKS_NO_ERROR) {
nnapi_->ANeuralNetworksCompilation_free(compilation);
compilation = nullptr;
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, preference_result,
"setting compilation preferences",
nnapi_errno);
}
if (!nn_compilation_cache_token_.empty()) {
const char* cache_dir = delegate_options.cache_dir;
const int set_caching_result =
nnapi_->ANeuralNetworksCompilation_setCaching(
compilation, cache_dir, nn_compilation_cache_token_.data());
if (set_caching_result != ANEURALNETWORKS_NO_ERROR) {
nnapi_->ANeuralNetworksCompilation_free(compilation);
compilation = nullptr;
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, set_caching_result,
"configuring NNAPI caching", nnapi_errno);
}
if (nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI13) {
if (delegate_options.max_compilation_timeout_duration_ns > 0) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksCompilation_setTimeout(
compilation,
delegate_options.max_compilation_timeout_duration_ns),
"setting compilation timeout", nnapi_errno);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksCompilation_setPriority(
compilation, delegate_options.execution_priority),
"setting compilation priority", nnapi_errno);
}
if (delegate_options.vendor_compilation_hints && vendor_plugin_) {
TF_LITE_ENSURE_STATUS(vendor_plugin_->ConfigureCompilationHints(
delegate_options.vendor_compilation_hints, compilation));
}
const int finish_result =
nnapi_->ANeuralNetworksCompilation_finish(compilation);
if (finish_result != ANEURALNETWORKS_NO_ERROR) {
nnapi_->ANeuralNetworksCompilation_free(compilation);
compilation = nullptr;
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, finish_result,
"completing NNAPI compilation", nnapi_errno);
nn_compilation_.reset(compilation);
bool should_use_burst_mode = delegate_options.use_burst_computation;
if (!nnapi_devices_.empty() &&
target_feature_level_ >= kNNAPIRuntimeFeatureLevel5 &&
target_feature_level_ <= kNNAPIRuntimeFeatureLevel7) {
should_use_burst_mode = true;
}
if (should_use_burst_mode &&
nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI12 &&
nnapi_->ANeuralNetworksBurst_create) {
ANeuralNetworksBurst* burst = nullptr;
const int create_burst_result =
nnapi_->ANeuralNetworksBurst_create(nn_compilation_.get(), &burst);
if (create_burst_result != ANEURALNETWORKS_NO_ERROR) {
nnapi_->ANeuralNetworksBurst_free(burst);
burst = nullptr;
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, create_burst_result,
"creating NNAPI burst", nnapi_errno);
nn_burst_.reset(burst);
}
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::GetOperationsSupportedByTargetNnApiDevices(
TfLiteContext* context, std::vector<int>* supported_nodes,
int* nnapi_errno) {
if (!nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices) {
return kTfLiteError;
}
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping_util_->context);
const int nnapi_model_size =
mapping_context->nnapi_to_tflite_op_mapping_.size();
std::unique_ptr<bool[]> nnapi_ops_support_flags(new bool[nnapi_model_size]);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices(
nn_model_.get(), nnapi_devices_.data(), nnapi_devices_.size(),
nnapi_ops_support_flags.get()),
"Checking supported operations for devices", nnapi_errno);
auto tflite_ops_support_status = std::map<int, bool>();
std::for_each(nodes_.begin(), nodes_.end(),
[&tflite_ops_support_status](int tflite_node_index) {
tflite_ops_support_status[tflite_node_index] = true;
});
for (int nnapi_op_index = 0; nnapi_op_index < nnapi_model_size;
nnapi_op_index++) {
const auto tflite_op_index =
mapping_context->nnapi_to_tflite_op_mapping_[nnapi_op_index];
tflite_ops_support_status[tflite_op_index] &=
nnapi_ops_support_flags[nnapi_op_index];
if (!tflite_ops_support_status[tflite_op_index]) {
if (std::count(non_const_dequantize_output_to_node_mapping_.begin(),
non_const_dequantize_output_to_node_mapping_.end(), -1) <
non_const_dequantize_output_to_node_mapping_.size() ||
std::count(densify_output_to_node_mapping_.begin(),
densify_output_to_node_mapping_.end(),
-1) < densify_output_to_node_mapping_.size()) {
return kTfLiteOk;
}
}
}
supported_nodes->clear();
std::for_each(nodes_.begin(), nodes_.end(),
[&supported_nodes, &tflite_ops_support_status](int node_index) {
if (tflite_ops_support_status[node_index]) {
supported_nodes->push_back(node_index);
}
});
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::Invoke(TfLiteContext* context,
TfLiteNode* node, int* nnapi_errno) {
const bool allow_padding =
nnapi_->nnapi_runtime_feature_level > kMinSdkVersionForNNAPI13 &&
nnapi_->ANeuralNetworksExecution_enableInputAndOutputPadding != nullptr;
const auto delegate_options =
StatefulNnApiDelegate::GetOptions(node->delegate);
bool execution_is_reusable =
nnapi_->nnapi_runtime_feature_level > kMinSdkVersionForNNAPI13 &&
delegate_options.max_execution_cache_size > 0;
bool can_infer_output_shape = !delegate_options.allow_dynamic_dimensions ||
delegate_options.vendor_plugin == nullptr;
ANeuralNetworksExecution* execution = nullptr;
NNAPIExecutionCache::Signature signature;
if (execution_is_reusable) {
signature = CreateExecutionCacheSignature(context, node, delegate_options,
*tensor_memory_map_);
execution = nn_execution_cache_.Get(signature);
}
bool should_create_new_execution = execution == nullptr;
UniqueExecution unique_execution(nullptr, NNFreeExecution(nnapi_));
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(context,
nnapi_->ANeuralNetworksExecution_create(
nn_compilation_.get(), &execution),
"creating NNAPI execution", nnapi_errno);
unique_execution.reset(execution);
if (nnapi_->nnapi_runtime_feature_level > kMinSdkVersionForNNAPI13) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_setReusable(execution,
true),
"making execution reusable", nnapi_errno);
}
if (delegate_options.vendor_execution_hints && vendor_plugin_) {
TF_LITE_ENSURE_STATUS(vendor_plugin_->ConfigureExecutionHints(
delegate_options.vendor_execution_hints, execution));
}
if (allow_padding) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_enableInputAndOutputPadding(
execution, true),
"setting allow padding for execution intputs and outputs",
nnapi_errno);
}
if (nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI13) {
if (delegate_options.max_execution_timeout_duration_ns > 0) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_setTimeout(
execution, delegate_options.max_execution_timeout_duration_ns),
"setting execution timeout", nnapi_errno);
}
if (delegate_options.max_execution_loop_timeout_duration_ns > 0) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_setLoopTimeout(
execution,
delegate_options.max_execution_loop_timeout_duration_ns),
"setting execution loop timeout", nnapi_errno);
}
}
if (delegate_options.allow_dynamic_dimensions) {
size_t total_input_byte_size = 0;
for (int i : TfLiteIntArrayView(node->inputs)) {
if (i != kTfLiteOptionalTensor &&
context->tensors[i].allocation_type != kTfLiteMmapRo &&
mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(), i) != -1) {
if (context->tensors[i].buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
const TfLiteType nn_type_conversion =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(),
i);
int tensor_size = 0;
if (nn_type_conversion == kTfLiteNoType) {
tensor_size = context->tensors[i].bytes;
} else {
size_t type_size;
TF_LITE_ENSURE_OK(
context,
GetSizeOfType(context, nn_type_conversion, &type_size));
tensor_size = NumElements(&context->tensors[i]) * type_size;
}
total_input_byte_size += tensor_size;
total_input_byte_size += GetNumPaddingBytes(tensor_size);
}
}
if (total_input_byte_size > nn_input_memory_->get_byte_size()) {
nn_input_memory_ = std::make_unique<NNMemory>(nnapi_, "input_pool",
total_input_byte_size);
nn_execution_cache_.Clear();
}
size_t total_output_byte_size = 0;
for (int i : TfLiteIntArrayView(node->outputs)) {
const auto& tensor = context->tensors[i];
if (tensor.buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
size_t tensor_size = tensor.bytes;
if (!can_infer_output_shape && HasUnspecifiedDimension(&tensor)) {
if (tensor_max_size_hints_[i] == 0) {
TF_LITE_KERNEL_LOG(context,
"Missing max tensor size for tensor#%d. When a "
"vendor plugin is supplied, max tensor size is "
"required for all dynamic output tensors.",
i);
return kTfLiteError;
}
tensor_size = std::max(tensor_size, tensor_max_size_hints_[i]);
}
total_output_byte_size += tensor_size;
total_output_byte_size += GetNumPaddingBytes(tensor_size);
}
if (total_output_byte_size > nn_output_memory_->get_byte_size()) {
nn_output_memory_ = std::make_unique<NNMemory>(nnapi_, "output_pool",
total_output_byte_size);
nn_execution_cache_.Clear();
}
}
if (execution_is_reusable) {
nn_execution_cache_.Put(signature, std::move(unique_execution));
unique_execution = nullptr;
}
}
int relative_input_index = 0;
const bool use_int8_asymm_signed =
target_feature_level_ >= kMinSdkVersionForNNAPI13;
size_t input_offset = 0;
for (auto absolute_input_index : TfLiteIntArrayView(node->inputs)) {
if (absolute_input_index == kTfLiteOptionalTensor) {
continue;
}
ANeuralNetworksOperandType input_nn_operand_type;
ANeuralNetworksOperandType* input_nn_operand_type_ptr = nullptr;
TfLiteTensor* tensor = &context->tensors[absolute_input_index];
TfLiteType ann_type_equivalent =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(),
absolute_input_index);
if (delegate_options.allow_dynamic_dimensions &&
::tflite::HasUnspecifiedDimension(tensor)) {
input_nn_operand_type = ConvertTensorTypeToNNType(
tensor, ann_type_equivalent, use_int8_asymm_signed);
input_nn_operand_type_ptr = &input_nn_operand_type;
}
if (tensor->allocation_type != kTfLiteMmapRo) {
if (tensor->buffer_handle != kTfLiteNullBufferHandle &&
tensor->buffer_handle < tensor_memory_map_->size()) {
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setInputFromMemory(
execution, relative_input_index, input_nn_operand_type_ptr,
tensor_memory_map_->at(tensor->buffer_handle).memory, 0,
tensor->bytes),
"associating NNAPI execution input with a memory object", tensor,
nnapi_errno);
}
relative_input_index++;
continue;
}
int tensor_size = 0;
int padding_bytes = 0;
if (ann_type_equivalent != kTfLiteNoType) {
const auto num_elements = NumElements(tensor);
uint8_t* input_ptr = nn_input_memory_->get_data_ptr() + input_offset;
if (tensor->type == kTfLiteUInt8 &&
ann_type_equivalent == kTfLiteInt32) {
for (int i = 0; i < num_elements; ++i) {
reinterpret_cast<int32_t*>(input_ptr)[i] =
static_cast<const int32_t>(tensor->data.uint8[i]);
}
} else if (tensor->type == kTfLiteInt8 &&
ann_type_equivalent == kTfLiteUInt8) {
for (int i = 0; i < num_elements; ++i) {
input_ptr[i] = static_cast<const uint8_t>(
static_cast<int32_t>(tensor->data.int8[i]) + 128);
}
} else if (tensor->type == kTfLiteInt8 &&
ann_type_equivalent == kTfLiteInt32) {
if (use_int8_asymm_signed) {
for (int i = 0; i < num_elements; ++i) {
reinterpret_cast<int32_t*>(input_ptr)[i] =
static_cast<const int32_t>(tensor->data.int8[i]);
}
} else {
for (int i = 0; i < num_elements; ++i) {
reinterpret_cast<int32_t*>(input_ptr)[i] =
static_cast<const int32_t>(tensor->data.int8[i]) + 128;
}
}
} else if (tensor->type == kTfLiteInt64 &&
ann_type_equivalent == kTfLiteInt32) {
int32_t* input_ptr_i32 = reinterpret_cast<int32_t*>(input_ptr);
for (int i = 0; i < num_elements; ++i) {
if (input_ptr_i32[i] < std::numeric_limits<int32_t>::min() ||
input_ptr_i32[i] > std::numeric_limits<int32_t>::max()) {
TF_LITE_KERNEL_LOG(context,
"NN API Delegate: int64 value out of bounds "
"for int32 target NNAPI tensor\n");
return kTfLiteError;
}
input_ptr_i32[i] = static_cast<int32_t>(tensor->data.i64[i]);
}
} else {
TF_LITE_KERNEL_LOG(
context,
"NN API Delegate: unsupported tensor types conversion: "
"from type code %d to type code %d.\n",
tensor->type, ann_type_equivalent);
return kTfLiteError;
}
size_t type_size;
TF_LITE_ENSURE_OK(
context, GetSizeOfType(context, ann_type_equivalent, &type_size));
tensor_size = NumElements(tensor) * type_size;
padding_bytes = GetNumPaddingBytes(tensor_size);
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setInputFromMemory(
execution, relative_input_index, input_nn_operand_type_ptr,
nn_input_memory_->get_handle(), input_offset,
GetNNTensorSize(tensor_size, allow_padding)),
"associating NNAPI execution input with a memory object", tensor,
nnapi_errno);
}
} else if (mapping_util_->TfLiteIndexToNnIndex(
mapping_util_.get(), absolute_input_index) != -1) {
memcpy(nn_input_memory_->get_data_ptr() + input_offset,
tensor->data.raw, tensor->bytes);
tensor_size = tensor->bytes;
padding_bytes = GetNumPaddingBytes(tensor_size);
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setInputFromMemory(
execution, relative_input_index, input_nn_operand_type_ptr,
nn_input_memory_->get_handle(), input_offset,
GetNNTensorSize(tensor_size, allow_padding)),
"associating NNAPI execution input with a memory object", tensor,
nnapi_errno);
}
}
input_offset += tensor_size + padding_bytes;
relative_input_index++;
}
}
int relative_output_index = 0;
size_t output_offset = 0;
for (auto output_index : TfLiteIntArrayView(node->outputs)) {
if (mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(),
output_index) == -1) {
continue;
}
ANeuralNetworksOperandType output_nn_operand_type;
ANeuralNetworksOperandType* output_nn_operand_type_ptr = nullptr;
TfLiteTensor* tensor = &context->tensors[output_index];
if (delegate_options.allow_dynamic_dimensions && can_infer_output_shape &&
::tflite::HasUnspecifiedDimension(tensor)) {
TfLiteType ann_type_equivalent =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(),
output_index);
output_nn_operand_type = ConvertTensorTypeToNNType(
tensor, ann_type_equivalent, use_int8_asymm_signed);
output_nn_operand_type_ptr = &output_nn_operand_type;
}
if (tensor->buffer_handle != kTfLiteNullBufferHandle &&
tensor->buffer_handle < tensor_memory_map_->size() &&
should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setOutputFromMemory(
execution, relative_output_index, output_nn_operand_type_ptr,
tensor_memory_map_->at(tensor->buffer_handle).memory, 0,
tensor->bytes),
"associating NNAPI execution output to a memory object", tensor,
nnapi_errno);
} else {
size_t tensor_size = tensor->bytes;
if (!can_infer_output_shape && HasUnspecifiedDimension(tensor)) {
tensor_size =
std::max(tensor->bytes, tensor_max_size_hints_[output_index]);
}
int padding_bytes = GetNumPaddingBytes(tensor_size);
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR_FOR_TENSOR(
context,
nnapi_->ANeuralNetworksExecution_setOutputFromMemory(
execution, relative_output_index, output_nn_operand_type_ptr,
nn_output_memory_->get_handle(), output_offset,
GetNNTensorSize(tensor_size, allow_padding)),
"associating NNAPI execution output to a memory object", tensor,
nnapi_errno);
}
output_offset += tensor_size + padding_bytes;
}
relative_output_index++;
}
for (size_t i = 0; i < model_state_tfl_inputs_.size(); i++) {
int state_tensor_idx = model_state_tfl_inputs_[i];
TfLiteTensor* tensor = &context->tensors[state_tensor_idx];
int padding_bytes = GetNumPaddingBytes(tensor->bytes);
if (should_create_new_execution) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_setOutputFromMemory(
execution, relative_output_index, nullptr,
nn_output_memory_->get_handle(), output_offset,
GetNNTensorSize(tensor->bytes, allow_padding)),
"associating NNAPI execution state output to a memory object",
nnapi_errno);
}
output_offset += tensor->bytes + padding_bytes;
relative_output_index++;
}
if (nnapi_->android_sdk_version < kMinSdkVersionForNNAPI12) {
ANeuralNetworksEvent* event = nullptr;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_startCompute(execution, &event),
"starting async computation", nnapi_errno);
const int wait_result = nnapi_->ANeuralNetworksEvent_wait(event);
nnapi_->ANeuralNetworksEvent_free(event);
RETURN_TFLITE_ERROR_IF_NN_ERROR(context, wait_result,
"waiting for async computation completion",
nnapi_errno);
} else {
if (nn_burst_) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_burstCompute(execution,
nn_burst_.get()),
"running burst computation", nnapi_errno);
} else {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi_->ANeuralNetworksExecution_compute(execution),
"running computation", nnapi_errno);
}
}
if (!can_infer_output_shape) {
relative_output_index = 0;
for (auto output_index : TfLiteIntArrayView(node->outputs)) {
TfLiteTensor* tensor = &context->tensors[output_index];
if (HasUnspecifiedDimension(tensor)) {
auto* new_dims = TfLiteIntArrayCreate(tensor->dims->size);
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksExecution_getOutputOperandDimensions(
execution, relative_output_index,
reinterpret_cast<uint32_t*>(new_dims->data)),
"get output operand dimensions", nnapi_errno);
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, tensor, new_dims));
}
relative_output_index++;
}
}
output_offset = 0;
for (auto output_index : TfLiteIntArrayView(node->outputs)) {
TfLiteTensor* tensor = &context->tensors[output_index];
if (tensor->buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
TfLiteType ann_type_equivalent =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(),
output_index);
if (tensor->type == kTfLiteInt8 && ann_type_equivalent == kTfLiteUInt8) {
uint8_t* output_ptr = reinterpret_cast<uint8_t*>(
nn_output_memory_->get_data_ptr() + output_offset);
const auto num_elements = NumElements(tensor);
for (int i = 0; i < num_elements; ++i) {
output_ptr[i] =
static_cast<uint8_t>(static_cast<int32_t>(output_ptr[i]) - 128);
}
}
memcpy(tensor->data.raw, nn_output_memory_->get_data_ptr() + output_offset,
tensor->bytes);
size_t tensor_size = tensor->bytes;
if (!can_infer_output_shape && HasUnspecifiedDimension(tensor)) {
tensor_size =
std::max(tensor->bytes, tensor_max_size_hints_[output_index]);
}
output_offset += tensor_size;
output_offset += GetNumPaddingBytes(tensor_size);
}
for (size_t i = 0; i < model_state_tfl_inputs_.size(); i++) {
int state_tensor_idx = model_state_tfl_inputs_[i];
TfLiteTensor* tensor = &context->tensors[state_tensor_idx];
memcpy(tensor->data.raw, nn_output_memory_->get_data_ptr() + output_offset,
tensor->bytes);
output_offset += tensor->bytes;
output_offset += GetNumPaddingBytes(tensor->bytes);
}
for (auto feedback_loop : feedback_loops_) {
int output_tensor_idx;
int input_tensor_idx;
std::tie(output_tensor_idx, input_tensor_idx) = feedback_loop;
TfLiteTensor& src = context->tensors[output_tensor_idx];
TfLiteTensor& dest = context->tensors[input_tensor_idx];
memcpy(dest.data.raw, src.data.raw, src.bytes);
}
return kTfLiteOk;
}
void NNAPIDelegateKernel::AddDequantizeOperatorsWhereNeeded(
const TfLiteContext* context, int builtin_code, const TfLiteNode* node,
int tflite_node_index, NNAPIOpBuilder* builder, int* nnapi_errno) {
int input_tensor_index = -1;
std::vector<int> inputs_to_potentially_dequantize;
switch (builtin_code) {
case kTfLiteBuiltinConv2d:
case kTfLiteBuiltinFullyConnected: {
input_tensor_index = 0;
inputs_to_potentially_dequantize = {1, 2};
break;
}
case kTfLiteBuiltinLstm: {
input_tensor_index = 0;
inputs_to_potentially_dequantize = {1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 20, 21, 22, 23};
break;
}
default:
return;
}
int tensor_id = node->inputs->data[input_tensor_index];
if (tensor_id < 0) return;
if (!IsFloat(context->tensors[tensor_id].type)) return;
for (int i : inputs_to_potentially_dequantize) {
if (i < 0 || i >= node->inputs->size) continue;
tensor_id = node->inputs->data[i];
if (tensor_id < 0) continue;
const TfLiteType type = context->tensors[tensor_id].type;
if (!IsQuantized(type)) continue;
builder->AddDequantize(i, node->inputs->data[i], type, tflite_node_index);
}
}
TfLiteStatus NNAPIDelegateKernel::DensifyAndDequantizeConstTensor(
TfLiteContext* context, int densify_node_id, bool should_dequantize,
NNAPIOpBuilder& builder) {
TfLiteNode* densify_node;
TfLiteRegistration* reg;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, densify_node_id, &densify_node, ®));
int sparse_weight_tid = densify_node->inputs->data[0];
auto input_tensor = context->tensors[sparse_weight_tid];
auto output_tensor = context->tensors[densify_node->outputs->data[0]];
if (input_tensor.sparsity == nullptr) {
return kTfLiteError;
}
const int dims_count = output_tensor.dims->size;
std::vector<int> vector_shape(dims_count);
for (int i = 0; i < dims_count; i++) {
vector_shape[i] = output_tensor.dims->data[i];
}
size_t dense_size;
int new_tensor_index = -1;
switch (input_tensor.type) {
case kTfLiteFloat32: {
dense_size = output_tensor.bytes / sizeof(float);
std::vector<float> output_data(dense_size);
tflite::internal::sparsity::FormatConverter<float> converter(
vector_shape, *input_tensor.sparsity);
converter.SparseToDense(static_cast<const float*>(input_tensor.data.data),
dense_size, output_data.data(), context);
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor<float>(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32, output_tensor.dims,
output_data, output_tensor.params, &new_tensor_index));
break;
}
case kTfLiteFloat16: {
dense_size = output_tensor.bytes / sizeof(Eigen::half);
std::vector<uint16_t> output_data(dense_size);
Eigen::half* unpacked_fp16_data =
reinterpret_cast<Eigen::half*>(output_data.data());
tflite::internal::sparsity::FormatConverter<Eigen::half> converter(
vector_shape, *input_tensor.sparsity);
converter.SparseToDense(
static_cast<const Eigen::half*>(input_tensor.data.data), dense_size,
unpacked_fp16_data, context);
if (should_dequantize) {
std::vector<float> float_dense_data(dense_size);
for (int i = 0; i < dense_size; ++i) {
float_dense_data[i] = fp16_ieee_to_fp32_value(
reinterpret_cast<uint16_t*>(output_data.data())[i]);
}
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor<float>(
ANEURALNETWORKS_TENSOR_FLOAT32, kTfLiteFloat32, output_tensor.dims,
float_dense_data, output_tensor.params, &new_tensor_index));
} else {
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor<uint16_t>(
ANEURALNETWORKS_TENSOR_FLOAT16, kTfLiteFloat16, output_tensor.dims,
output_data, output_tensor.params, &new_tensor_index));
}
break;
}
case kTfLiteInt8: {
dense_size = output_tensor.bytes / sizeof(int8_t);
std::vector<int8_t> output_data(dense_size);
tflite::internal::sparsity::FormatConverter<int8_t> converter(
vector_shape, *input_tensor.sparsity);
converter.SparseToDense(
static_cast<const int8_t*>(input_tensor.data.data), dense_size,
output_data.data(), context);
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor<int8_t>(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, kTfLiteInt8,
output_tensor.dims, output_data, output_tensor.params,
&new_tensor_index));
break;
}
default: {
return kTfLiteError;
}
}
return kTfLiteOk;
}
TfLiteIntArray* ResizeTfLiteIntArray(TfLiteIntArray* old_array, int new_size,
int init_value) {
TfLiteIntArray* ret = TfLiteIntArrayCreate(new_size);
if (ret) {
int size_to_copy = 0;
if (old_array) {
size_to_copy = new_size > old_array->size ? old_array->size : new_size;
memcpy(ret->data, old_array->data, size_to_copy * sizeof(int));
}
for (int i = size_to_copy; i < ret->size; i++) {
ret->data[i] = init_value;
}
}
TfLiteIntArrayFree(old_array);
return ret;
}
void NNFreeMappingUtil::operator()(NnapiMappingUtilCInterface* mapping_util) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping_util->context);
delete (mapping_context);
mapping_util->context = nullptr;
free(mapping_util);
}
class NnapiMappingUtilCInterfaceImpl {
public:
static int TfLiteIndexToNnIndex(NnapiMappingUtilCInterface* mapping,
int index) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
const size_t max_size = mapping_context->lite_tensor_to_ann_tensor_.size();
if (index >= 0 && index < max_size)
return mapping_context->lite_tensor_to_ann_tensor_[index];
else
return -1;
}
static int AddNewNonTensorOperand(NnapiMappingUtilCInterface* mapping) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
return mapping_context->next_ann_tensor_index_++;
}
static int AddDelegateGeneratedInputAnnTensorOperand(
NnapiMappingUtilCInterface* mapping) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
return mapping_context->next_ann_tensor_index_++;
}
static int AddNewNnTensorIndex(NnapiMappingUtilCInterface* mapping,
int tflite_index) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
const size_t current_size =
mapping_context->lite_tensor_to_ann_tensor_.size();
if (tflite_index >= current_size) {
mapping_context->lite_tensor_to_ann_tensor_.resize(tflite_index + 1, -1);
}
const int new_tensor_index = mapping_context->next_ann_tensor_index_++;
mapping_context->lite_tensor_to_ann_tensor_[tflite_index] =
new_tensor_index;
return new_tensor_index;
}
static TfLiteType TfLiteIndexToNnTypeConversion(
NnapiMappingUtilCInterface* mapping, int index) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
const size_t max_size = mapping_context->index_to_type_conversion_.size();
if (index >= 0 && index < max_size)
return static_cast<TfLiteType>(
mapping_context->index_to_type_conversion_[index]);
else
return kTfLiteNoType;
}
static void AddTypeConversion(NnapiMappingUtilCInterface* mapping,
int tflite_index, TfLiteType tflite_type) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
const size_t current_size =
mapping_context->index_to_type_conversion_.size();
if (tflite_index >= current_size) {
mapping_context->index_to_type_conversion_.resize(tflite_index + 1,
kTfLiteNoType);
}
mapping_context->index_to_type_conversion_[tflite_index] = tflite_type;
}
static void AddNnapiToTfliteOpMapping(NnapiMappingUtilCInterface* mapping,
int tflite_node_index) {
NnapiMappingContext* mapping_context =
reinterpret_cast<NnapiMappingContext*>(mapping->context);
mapping_context->nnapi_to_tflite_op_mapping_.push_back(tflite_node_index);
}
};
NnapiMappingUtilCInterface*
NNAPIDelegateKernel::NnapiMappingUtilCInterfaceCreate() {
NnapiMappingUtilCInterface* mapping =
static_cast<NnapiMappingUtilCInterface*>(
malloc(sizeof(NnapiMappingUtilCInterface)));
mapping->context = new NnapiMappingContext();
mapping->TfLiteIndexToNnIndex =
NnapiMappingUtilCInterfaceImpl::TfLiteIndexToNnIndex;
mapping->AddNewNonTensorOperand =
NnapiMappingUtilCInterfaceImpl::AddNewNonTensorOperand;
mapping->AddDelegateGeneratedInputAnnTensorOperand =
NnapiMappingUtilCInterfaceImpl::AddDelegateGeneratedInputAnnTensorOperand;
mapping->AddNewNnTensorIndex =
NnapiMappingUtilCInterfaceImpl::AddNewNnTensorIndex;
mapping->TfLiteIndexToNnTypeConversion =
NnapiMappingUtilCInterfaceImpl::TfLiteIndexToNnTypeConversion;
mapping->AddTypeConversion =
NnapiMappingUtilCInterfaceImpl::AddTypeConversion;
mapping->AddNnapiToTfliteOpMapping =
NnapiMappingUtilCInterfaceImpl::AddNnapiToTfliteOpMapping;
return mapping;
}
TfLiteStatus NNAPIDelegateKernel::AddOpsAndTensors(
TfLiteContext* context, int* nnapi_errno, bool allow_dynamic_dimensions) {
DequantizeMapping dequantize_mapping;
NNAPIOpBuilder builder(nnapi_, context, mapping_util_.get(),
&dequantize_mapping, &allocation_memory_mapping_,
nn_model_.get(), nnapi_errno,
allow_dynamic_dimensions);
target_feature_level_ = nnapi_->nnapi_runtime_feature_level;
if (!nnapi_devices_.empty()) {
TF_LITE_ENSURE_STATUS(GetTargetFeatureLevel(
context, nnapi_, nnapi_devices_, &target_feature_level_, nnapi_errno));
}
for (auto node_index : nodes_) {
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_index, &node, ®istration));
if (IsDequantizeConstFloat16(context, node, registration)) {
builder.AddTensorInput(node->inputs->data[0], false,
NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION |
NN_TENSOR_FLAG_SCALAR_AS_TENSOR);
}
if (IsDensifyConstTensor(context, node, registration)) {
densify_output_to_node_mapping_[node->outputs->data[0]] = node_index;
}
if (IsDequantizeNonConstFloat16(context, node, registration)) {
non_const_dequantize_output_to_node_mapping_[node->outputs->data[0]] =
node_index;
}
}
builder.ClearInputOuputLists();
for (auto node_index : nodes_) {
TfLiteNode* node;
TfLiteRegistration* reg;
TF_LITE_ENSURE_STATUS(
context->GetNodeAndRegistration(context, node_index, &node, ®));
if (IsDensifyConstTensor(context, node, reg) ||
IsDequantizeNonConstFloat16(context, node, reg)) {
continue;
}
if (vendor_plugin_ && vendor_plugin_->ValidateNode(context, reg, node)) {
TF_LITE_ENSURE_STATUS(vendor_plugin_->MapNode(
context, node, node_index, mapping_util_.get(), nn_model_.get()));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinPack &&
target_feature_level_ < kNNAPIRuntimeFeatureLevel6) {
TF_LITE_ENSURE_STATUS(
builder.TransformPackIntoSupportedOps(node_index, node, reg));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinUnpack) {
TF_LITE_ENSURE_STATUS(
builder.TransformUnpackIntoSupportedOps(node_index, node, reg));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinSplitV) {
TF_LITE_ENSURE_STATUS(
builder.TransformSplitVIntoSupportedOps(node_index, node, reg));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinSquaredDifference) {
TF_LITE_ENSURE_STATUS(builder.TransformSquaredDifferenceIntoSupportedOps(
node_index, node, reg));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinCos) {
TF_LITE_ENSURE_STATUS(
builder.TransformCosIntoSupportedOps(node_index, node, reg));
continue;
}
if (target_feature_level_ >= kMinSdkVersionForNNAPI13 &&
reg->builtin_code == kTfLiteBuiltinLstm && isLstmFullKernel(node) &&
context->tensors[node->inputs->data[0]].type == kTfLiteInt8) {
const auto quant8_full_lstm_op_code = ANEURALNETWORKS_QUANTIZED_LSTM;
constexpr int kInputTensor = 0;
constexpr int kInputToInputWeightsTensor = 1;
constexpr int kRecurrentToInputWeightsTensor = 5;
constexpr int kInputGateBiasTensor = 12;
constexpr int kForgetGateBiasTensor = 13;
constexpr int kCellGateBiasTensor = 14;
constexpr int kOutputGateBiasTensor = 15;
constexpr int kProjectionWeightsTensor = 16;
constexpr int kProjectionBiasTensor = 17;
constexpr int kPrevOutputTensor = 18;
for (int input_pos = 0; input_pos < node->inputs->size; ++input_pos) {
const auto input_index = node->inputs->data[input_pos];
if (input_index == kTfLiteOptionalTensor) {
if (input_pos == kInputToInputWeightsTensor ||
input_pos == kRecurrentToInputWeightsTensor ||
input_pos == kProjectionWeightsTensor) {
TF_LITE_ENSURE_STATUS(builder.AddVectorInt8Operand(nullptr, 0));
} else if (input_pos == kInputGateBiasTensor ||
input_pos == kForgetGateBiasTensor ||
input_pos == kCellGateBiasTensor ||
input_pos == kOutputGateBiasTensor ||
input_pos == kProjectionBiasTensor) {
TF_LITE_ENSURE_STATUS(builder.AddVectorInt32Operand(nullptr, 0));
} else {
TF_LITE_ENSURE_STATUS(builder.AddVectorInt16Operand(nullptr, 0));
}
} else {
int flags =
(input_pos == kInputTensor || input_pos == kPrevOutputTensor)
? NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED
: 0;
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, false, flags));
}
}
auto builtin = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data);
TF_LITE_ENSURE_STATUS(
builder.AddScalarFloat32Operand(builtin->cell_clip));
TF_LITE_ENSURE_STATUS(
builder.AddScalarFloat32Operand(builtin->proj_clip));
TF_LITE_ENSURE_EQ(context, node->intermediates->size, 5);
for (int intermediate_pos = 0;
intermediate_pos < node->intermediates->size; ++intermediate_pos) {
const auto intermediate_index =
node->intermediates->data[intermediate_pos];
const TfLiteTensor& tensor = context->tensors[intermediate_index];
TfLiteAffineQuantization* quantization_params =
static_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
if (intermediate_pos == 4) {
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
quantization_params->zero_point->data[0]));
}
TF_LITE_ENSURE_STATUS(builder.AddScalarFloat32Operand(
quantization_params->scale->data[0]));
}
int ann_index;
builder.AddStateInt8AsymTensor(
node->inputs->data[ 18], &ann_index);
model_state_outputs_.push_back(ann_index);
model_state_tfl_inputs_.push_back(
node->inputs->data[ 18]);
builder.AddStateInt16Tensor(
node->inputs->data[ 19], &ann_index);
model_state_outputs_.push_back(ann_index);
model_state_tfl_inputs_.push_back(
node->inputs->data[ 19]);
for (int output_pos = 0; output_pos < node->outputs->size; ++output_pos) {
const auto output_index = node->outputs->data[output_pos];
TF_LITE_ENSURE_STATUS(builder.AddTensorOutput(
output_index, NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED));
}
builder.FinalizeAddOperation(quant8_full_lstm_op_code, node_index);
continue;
}
const bool hybrid_op = IsHybridOperator(context, reg->builtin_code, node);
const bool scalar_as_tensor = IsScalarInputSupported(reg->builtin_code);
const bool need_int8_conversion =
target_feature_level_ < kMinSdkVersionForNNAPI13 &&
NeedInt8Conversion(context, reg->builtin_code, node);
const bool use_int8_asymm_signed =
target_feature_level_ >= kMinSdkVersionForNNAPI13 && !hybrid_op;
if (IsDequantizeConstFloat16(context, node, reg)) {
continue;
}
int input_tensor_flags = 0;
if (scalar_as_tensor) {
input_tensor_flags |= NN_TENSOR_FLAG_SCALAR_AS_TENSOR;
}
if (use_int8_asymm_signed) {
input_tensor_flags |= NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED;
}
if (reg->builtin_code == kTfLiteBuiltinHardSwish &&
nnapi_->android_sdk_version < kMinSdkVersionForNNAPI13) {
builder.TransformHardSwishIntoSupportedOps(
node->inputs->data[0], node->outputs->data[0], need_int8_conversion,
node_index);
continue;
}
if (reg->builtin_code == kTfLiteBuiltinPack) {
const auto* builtin =
reinterpret_cast<TfLitePackParams*>(node->builtin_data);
auto& input_tensor = context->tensors[node->inputs->data[0]];
int axis = builtin->axis < 0 ? input_tensor.dims->size + builtin->axis + 1
: builtin->axis;
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(axis));
}
for (int input_pos = 0; input_pos < node->inputs->size; ++input_pos) {
if (node->inputs->data[input_pos] != kTfLiteOptionalTensor &&
context->tensors[node->inputs->data[input_pos]].type ==
kTfLiteFloat16 &&
IsConstantTensor(&context->tensors[node->inputs->data[input_pos]])) {
input_tensor_flags |= NN_TENSOR_FLAG_HALF_TO_FLOAT_CONVERSION;
}
if (reg->builtin_code == kTfLiteBuiltinTransposeConv) {
continue;
}
if (reg->builtin_code == kTfLiteBuiltinFullyConnected &&
node->inputs->data[input_pos] == kTfLiteOptionalTensor) {
continue;
}
const auto input_index = node->inputs->data[input_pos];
if (reg->builtin_code == kTfLiteBuiltinConv2d && input_pos == 1) {
int densify_node_id = -1;
bool should_dequantize = false;
int dequantize_node_id =
non_const_dequantize_output_to_node_mapping_[input_index];
if (dequantize_node_id != -1) {
should_dequantize = true;
TfLiteNode* dequant_node;
TfLiteRegistration* reg;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, dequantize_node_id, &dequant_node, ®));
densify_node_id =
densify_output_to_node_mapping_[dequant_node->inputs->data[0]];
} else {
densify_node_id = densify_output_to_node_mapping_[input_index];
}
if (densify_node_id != -1) {
TF_LITE_ENSURE_STATUS(DensifyAndDequantizeConstTensor(
context, densify_node_id, should_dequantize, builder));
continue;
}
}
if (need_int8_conversion &&
(input_pos == 0 ||
reg->builtin_code == kTfLiteBuiltinFullyConnected ||
reg->builtin_code == kTfLiteBuiltinConv2d ||
reg->builtin_code == kTfLiteBuiltinDepthwiseConv2d ||
reg->builtin_code == kTfLiteBuiltinAdd ||
reg->builtin_code == kTfLiteBuiltinMul ||
reg->builtin_code == kTfLiteBuiltinSub ||
reg->builtin_code == kTfLiteBuiltinConcatenation ||
reg->builtin_code == kTfLiteBuiltinMaximum ||
reg->builtin_code == kTfLiteBuiltinMinimum ||
reg->builtin_code == kTfLiteBuiltinLeakyRelu ||
reg->builtin_code == kTfLiteBuiltinLess ||
reg->builtin_code == kTfLiteBuiltinLessEqual ||
reg->builtin_code == kTfLiteBuiltinPrelu ||
reg->builtin_code == kTfLiteBuiltinGreater ||
reg->builtin_code == kTfLiteBuiltinGreaterEqual ||
reg->builtin_code == kTfLiteBuiltinEqual ||
reg->builtin_code == kTfLiteBuiltinNotEqual ||
reg->builtin_code == kTfLiteBuiltinSelect)) {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(
input_index, hybrid_op,
input_tensor_flags | NN_TENSOR_FLAG_INT8_CONVERSION));
continue;
}
if (reg->builtin_code == kTfLiteBuiltinLstm && isLstmFullKernel(node) &&
input_pos >= 20) {
continue;
}
if (reg->builtin_code == kTfLiteBuiltinLstm && isLstmBasicKernel(node)) {
continue;
}
if (reg->builtin_code == kTfLiteBuiltinUnidirectionalSequenceLstm) {
if (input_pos >= 20) {
continue;
}
if (input_index == kTfLiteOptionalTensor) {
TF_LITE_ENSURE_STATUS(builder.AddVectorFloat32Operand(nullptr, 0));
continue;
}
}
if ((reg->builtin_code == kTfLiteBuiltinSplit) &&
(input_index == node->inputs->data[0])) {
continue;
}
if ((reg->builtin_code == kTfLiteBuiltinPadv2 ||
reg->builtin_code == kTfLiteBuiltinPad) &&
node->inputs->size == 3 && input_pos == 2) {
const int constant_value_id = node->inputs->data[2];
if (constant_value_id == kTfLiteOptionalTensor) {
continue;
}
const TfLiteTensor constant_value = context->tensors[constant_value_id];
switch (constant_value.type) {
case kTfLiteFloat16:
if (constant_value.allocation_type == kTfLiteMmapRo) {
builder.AddScalarFloat32Operand(constant_value.data.f16->data);
} else {
builder.AddSingleValueTensorAsScalarOperand(
constant_value_id, ANEURALNETWORKS_TENSOR_FLOAT16);
}
break;
case kTfLiteFloat32:
if (constant_value.allocation_type == kTfLiteMmapRo) {
builder.AddScalarFloat32Operand(*constant_value.data.f);
} else {
builder.AddSingleValueTensorAsScalarOperand(
constant_value_id, ANEURALNETWORKS_FLOAT32);
}
break;
case kTfLiteUInt8:
if (constant_value.allocation_type == kTfLiteMmapRo) {
builder.AddScalarInt32Operand(
static_cast<int32_t>(*constant_value.data.uint8));
} else {
builder.AddSingleValueTensorAsScalarOperand(
constant_value_id, ANEURALNETWORKS_INT32);
}
break;
case kTfLiteInt8:
if (constant_value.allocation_type == kTfLiteMmapRo) {
if (need_int8_conversion) {
builder.AddScalarInt32Operand(
static_cast<int32_t>(*constant_value.data.int8) + 128);
} else {
builder.AddScalarInt32Operand(*constant_value.data.int8);
}
} else {
builder.AddSingleValueTensorAsScalarOperand(
constant_value_id, ANEURALNETWORKS_INT32);
}
break;
default:
TF_LITE_KERNEL_LOG(context,
"Unsupported type of pad value for pad_v2\n");
return kTfLiteError;
}
continue;
}
if (input_index == kTfLiteOptionalTensor &&
(reg->builtin_code == kTfLiteBuiltinLstm ||
reg->builtin_code == kTfLiteBuiltinSvdf ||
reg->builtin_code == kTfLiteBuiltinBidirectionalSequenceLstm)) {
TF_LITE_ENSURE_STATUS(builder.AddVectorFloat32Operand(nullptr, 0));
} else if (reg->builtin_code == kTfLiteBuiltinResizeBilinear ||
reg->builtin_code == kTfLiteBuiltinResizeNearestNeighbor) {
if (input_pos == 0) {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
} else if (reg->builtin_code == kTfLiteBuiltinTopkV2 && input_pos > 0) {
continue;
} else if (reg->builtin_code == kTfLiteBuiltinGather) {
if (input_pos == 0) {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
continue;
} else if (reg->builtin_code == kTfLiteBuiltinExpandDims &&
input_pos == 1) {
continue;
} else if (reg->builtin_code == kTfLiteBuiltinBatchToSpaceNd &&
input_pos == 2) {
continue;
} else if (reg->builtin_code == kTfLiteBuiltinArgMin ||
reg->builtin_code == kTfLiteBuiltinArgMax) {
if (input_pos == 0) {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
} else {
const int axis_id = node->inputs->data[1];
const TfLiteTensor& axis_tensor = context->tensors[axis_id];
switch (axis_tensor.type) {
case kTfLiteInt32:
if (axis_tensor.allocation_type == kTfLiteMmapRo) {
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
static_cast<int32_t>(*axis_tensor.data.i32)));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddSingleValueTensorAsScalarOperand(
axis_id, ANEURALNETWORKS_INT32));
}
break;
case kTfLiteInt64:
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
static_cast<int32_t>(*axis_tensor.data.i64)));
break;
default:
return kTfLiteError;
}
}
} else if (reg->builtin_code == kTfLiteBuiltinMaximum ||
reg->builtin_code == kTfLiteBuiltinMinimum) {
const TfLiteTensor& operand_tensor =
context->tensors[node->inputs->data[input_pos]];
if (operand_tensor.dims->size == 0) {
int tensor_index;
TF_LITE_ENSURE_EQ(context, operand_tensor.allocation_type,
kTfLiteMmapRo);
switch (operand_tensor.type) {
case kTfLiteFloat32:
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_FLOAT32, operand_tensor.type, {1},
std::vector<float>(1, operand_tensor.data.f[0]),
operand_tensor.params, &tensor_index));
break;
case kTfLiteUInt8:
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, operand_tensor.type, {1},
std::vector<uint8_t>(1, operand_tensor.data.uint8[0]),
operand_tensor.params, &tensor_index));
break;
case kTfLiteInt8: {
auto params = operand_tensor.params;
if (params.scale == 0.0) {
params.scale = 1.0;
}
if (use_int8_asymm_signed) {
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED,
operand_tensor.type, {1},
std::vector<int8_t>(1, operand_tensor.data.int8[0]), params,
&tensor_index));
} else {
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, operand_tensor.type,
{1},
std::vector<int8_t>(1, operand_tensor.data.int8[0] + 128),
params, &tensor_index));
}
} break;
case kTfLiteInt32:
TF_LITE_ENSURE_STATUS(builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, operand_tensor.type, {1},
std::vector<int32_t>(1, operand_tensor.data.i32[0]),
operand_tensor.params, &tensor_index));
break;
default:
return kTfLiteError;
}
} else {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
} else if ((reg->builtin_code == kTfLiteBuiltinReduceAny ||
reg->builtin_code == kTfLiteBuiltinReduceMax ||
reg->builtin_code == kTfLiteBuiltinReduceMin ||
reg->builtin_code == kTfLiteBuiltinReduceProd ||
reg->builtin_code == kTfLiteBuiltinSum ||
reg->builtin_code == kTfLiteBuiltinMean) &&
(input_pos == 1)) {
const TfLiteTensor& axis_tensor =
context->tensors[node->inputs->data[input_pos]];
if (axis_tensor.dims->size == 0) {
TF_LITE_ENSURE_STATUS(
builder.AddVectorInt32Operand(axis_tensor.data.i32, 1));
} else {
TF_LITE_ENSURE_STATUS(builder.AddTensorInput(input_index, hybrid_op,
input_tensor_flags));
}
} else if (reg->builtin_code == kTfLiteBuiltinFill) {
if (input_pos == 0) {
const int dims_id = node->inputs->data[0];
const TfLiteTensor& dims_tensor = context->tensors[dims_id];
switch (dims_tensor.type) {
case kTfLiteInt32:
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, hybrid_op));
break;
case kTfLiteInt64: {
const int dims_size = dims_tensor.dims->data[0];
std::vector<int32_t> dims_int32(dims_size);
std::copy(dims_tensor.data.i64, dims_tensor.data.i64 + dims_size,
dims_int32.begin());
int new_tensor_index = -1;
builder.AddNewInputConstantTensor(
ANEURALNETWORKS_TENSOR_INT32, kTfLiteInt32, dims_tensor.dims,
dims_int32, dims_tensor.params, &new_tensor_index);
} break;
default:
return kTfLiteError;
}
} else {
const int value_id = node->inputs->data[1];
const TfLiteTensor& value_tensor = context->tensors[value_id];
switch (value_tensor.type) {
case kTfLiteFloat32:
if (value_tensor.allocation_type == kTfLiteMmapRo) {
TF_LITE_ENSURE_STATUS(
builder.AddScalarFloat32Operand(*value_tensor.data.f));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddSingleValueTensorAsScalarOperand(
value_id, ANEURALNETWORKS_FLOAT32));
}
break;
case kTfLiteInt32:
if (value_tensor.allocation_type == kTfLiteMmapRo) {
TF_LITE_ENSURE_STATUS(
builder.AddScalarInt32Operand(*value_tensor.data.i32));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddSingleValueTensorAsScalarOperand(
value_id, ANEURALNETWORKS_INT32));
}
break;
case kTfLiteInt64:
if (value_tensor.allocation_type == kTfLiteMmapRo) {
TF_LITE_ENSURE_STATUS(builder.AddScalarInt32Operand(
static_cast<int32_t>(*value_tensor.data.i64)));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddSingleValueTensorAsScalarOperand(
value_id, ANEURALNETWORKS_INT32));
}
break;
default:
return kTfLiteError;
}
}
} else {
TF_LITE_ENSURE_STATUS(
builder.AddTensorInput(input_index, hybrid_op, input_tensor_flags));
}
}
int nn_op_type;
TF_LITE_ENSURE_STATUS(
Map(context, reg->builtin_code, reg->version, target_feature_level_,
{context, &builder, node, node_index, &model_state_outputs_,
&model_state_tfl_inputs_, &feedback_loops_, nnapi_errno},
&nn_op_type));
int output_tensor_flags = 0;
if (need_int8_conversion) {
output_tensor_flags |= NN_TENSOR_FLAG_INT8_CONVERSION;
}
if (use_int8_asymm_signed) {
output_tensor_flags |= NN_TENSOR_FLAG_USE_INT8_ASYMM_SIGNED;
}
int fc_nn_intermediate_output_index = -1;
int mean_nn_intermediate_output_index = -1;
for (int output_pos = 0; output_pos < node->outputs->size; ++output_pos) {
auto output_index = node->outputs->data[output_pos];
if (reg->builtin_code == kTfLiteBuiltinLstm && isLstmBasicKernel(node)) {
continue;
}
if (reg->builtin_code == kTfLiteBuiltinFullyConnected &&
reinterpret_cast<TfLiteFullyConnectedParams*>(node->builtin_data)
->keep_num_dims) {
auto& output_tensor = context->tensors[output_index];
int num_units = output_tensor.dims->data[output_tensor.dims->size - 1];
std::vector<uint32_t> output_dims(2);
output_dims[0] = NumElements(output_tensor.dims) / num_units;
output_dims[1] = num_units;
TF_LITE_ENSURE_STATUS(builder.AddIntermediateOutputTensor(
output_tensor.type, output_dims.size(), output_dims.data(),
output_tensor.params.scale, output_tensor.params.zero_point,
&fc_nn_intermediate_output_index));
} else if (reg->builtin_code == kTfLiteBuiltinMean &&
IsMeanWithDifferentInputOutputQuantization(context, node)) {
auto& input_tensor = context->tensors[node->inputs->data[0]];
auto& output_tensor = context->tensors[output_index];
TF_LITE_ENSURE_STATUS(builder.AddIntermediateOutputTensor(
output_tensor.type, output_tensor.dims->size,
reinterpret_cast<const uint32_t*>(output_tensor.dims->data),
input_tensor.params.scale, input_tensor.params.zero_point,
&mean_nn_intermediate_output_index, need_int8_conversion));
} else {
TF_LITE_ENSURE_STATUS(
builder.AddTensorOutput(output_index, output_tensor_flags));
}
}
AddDequantizeOperatorsWhereNeeded(context, reg->builtin_code, node,
node_index, &builder, nnapi_errno);
TF_LITE_ENSURE_OK(context_,
builder.FinalizeAddOperation(nn_op_type, node_index));
if (fc_nn_intermediate_output_index > -1) {
TF_LITE_ENSURE_STATUS(builder.AppendReshape(
fc_nn_intermediate_output_index, node->outputs->data[0], node_index));
}
if (mean_nn_intermediate_output_index > -1) {
TF_LITE_ENSURE_STATUS(builder.AppendRequantize(
mean_nn_intermediate_output_index, node->outputs->data[0], node_index,
output_tensor_flags));
}
}
return kTfLiteOk;
}
TfLiteStatus NNAPIDelegateKernel::BuildGraph(
TfLiteContext* context,
const StatefulNnApiDelegate::Options& delegate_options,
const TfLiteIntArray* input_tensors, const TfLiteIntArray* output_tensors,
int* nnapi_errno) {
TF_LITE_ENSURE_STATUS(AddOpsAndTensors(
context, nnapi_errno, delegate_options.allow_dynamic_dimensions));
std::vector<uint32_t> inputs;
inputs.reserve(input_tensors->size);
std::vector<uint32_t> outputs;
outputs.reserve(output_tensors->size);
size_t total_input_byte_size = 0;
for (int i : TfLiteIntArrayView(input_tensors)) {
if (i != kTfLiteOptionalTensor &&
context->tensors[i].allocation_type != kTfLiteMmapRo &&
mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(), i) != -1) {
inputs.push_back(
mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(), i));
if (context->tensors[i].buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
const TfLiteType nn_type_conversion =
mapping_util_->TfLiteIndexToNnTypeConversion(mapping_util_.get(), i);
int tensor_size = 0;
if (nn_type_conversion == kTfLiteNoType) {
tensor_size =
std::max(context->tensors[i].bytes, tensor_max_size_hints_[i]);
} else {
size_t type_size;
TF_LITE_ENSURE_OK(
context, GetSizeOfType(context, nn_type_conversion, &type_size));
tensor_size = NumElements(&context->tensors[i]) * type_size;
}
total_input_byte_size += tensor_size;
total_input_byte_size += GetNumPaddingBytes(tensor_size);
}
}
size_t total_output_byte_size = 0;
for (int i : TfLiteIntArrayView(output_tensors)) {
const int output_tensor_ann_index =
mapping_util_->TfLiteIndexToNnIndex(mapping_util_.get(), i);
if (output_tensor_ann_index != -1) {
outputs.push_back(output_tensor_ann_index);
}
if (context->tensors[i].buffer_handle != kTfLiteNullBufferHandle) {
continue;
}
size_t tensor_size =
std::max(context->tensors[i].bytes, tensor_max_size_hints_[i]);
total_output_byte_size += tensor_size;
total_output_byte_size += GetNumPaddingBytes(tensor_size);
}
for (int i = 0; i < model_state_outputs_.size(); i++) {
outputs.push_back(model_state_outputs_[i]);
auto tfl_state_idx = model_state_tfl_inputs_[i];
total_output_byte_size += context->tensors[tfl_state_idx].bytes;
total_output_byte_size +=
GetNumPaddingBytes(context->tensors[tfl_state_idx].bytes);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksModel_identifyInputsAndOutputs(
nn_model_.get(), inputs.size(), inputs.data(), outputs.size(),
outputs.data()),
"identifying model inputs and outputs", nnapi_errno);
auto allow_fp16 =
context->allow_fp32_relax_to_fp16 | delegate_options.allow_fp16;
if (nnapi_->android_sdk_version >= kMinSdkVersionForNNAPI11) {
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context,
nnapi_->ANeuralNetworksModel_relaxComputationFloat32toFloat16(
nn_model_.get(), allow_fp16),
"set relaxed computation mode for fp32 if possible", nnapi_errno);
}
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi_->ANeuralNetworksModel_finish(nn_model_.get()),
"finalizing the model", nnapi_errno);
nn_input_memory_ =
std::make_unique<NNMemory>(nnapi_, "input_pool", total_input_byte_size);
nn_output_memory_ =
std::make_unique<NNMemory>(nnapi_, "output_pool", total_output_byte_size);
return kTfLiteOk;
}
void NNAPIDelegateKernel::LogCompilationInfoOnce(
const NnApi* nnapi, const ANeuralNetworksDiagnosticCompilationInfo* info) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO,
"NNAPI SL compilation callback called.");
const int32_t session_id =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getSessionId(info);
const int32_t error_code =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getErrorCode(info);
const uint64_t compilation_time_ns =
nnapi
->SL_ANeuralNetworksDiagnosticCompilationInfo_getCompilationTimeNanos(
info);
const int64_t nnapi_version =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getNnApiVersion(info);
const uint8_t model_arch_hash_first_byte =
*nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getModelArchHash(
info);
const std::string device_ids_string = std::string(
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getDeviceIds(info));
const ANeuralNetworksDiagnosticDataClass input_data_class =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getInputDataClass(
info);
const ANeuralNetworksDiagnosticDataClass output_data_class =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_getOutputDataClass(
info);
const bool is_caching_enabled =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_isCachingEnabled(info);
const bool is_control_flow_used =
nnapi->SL_ANeuralNetworksDiagnosticCompilationInfo_isControlFlowUsed(
info);
TFLITE_LOG_PROD_ONCE(
TFLITE_LOG_INFO,
"Compilation info: getSessionId=%d getErrorCode=%d "
"getCompilationTimeNanos=%" PRIu64 " getNnApiVersion=%" PRId64
" getDeviceIds=%s getModelArchHash=%x getInputDataClass=%d "
"getOutputDataClass=%d isCachingEnabled=%s isControlFlowUser=%s",
session_id, error_code, compilation_time_ns, nnapi_version,
device_ids_string.c_str(), unsigned{model_arch_hash_first_byte},
input_data_class, output_data_class, is_caching_enabled ? "Y" : "N",
is_control_flow_used ? "Y" : "N");
}
void NNAPIDelegateKernel::LogExecutionInfoOnce(
const NnApi* nnapi, const ANeuralNetworksDiagnosticExecutionInfo* info) {
TFLITE_LOG_PROD_ONCE(TFLITE_LOG_INFO, "NNAPI SL execution callback called.");
const int32_t session_id =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getSessionId(info);
const int32_t error_code =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getErrorCode(info);
const int64_t nnapi_version =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getNnApiVersion(info);
const uint8_t model_arch_hash_first_byte =
*nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getModelArchHash(info);
const std::string device_ids_string = std::string(
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getDeviceIds(info));
const ANeuralNetworksDiagnosticDataClass input_data_class =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getInputDataClass(info);
const ANeuralNetworksDiagnosticDataClass output_data_class =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getOutputDataClass(info);
const bool is_caching_enabled =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_isCachingEnabled(info);
const bool is_control_flow_used =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_isControlFlowUsed(info);
const ANeuralNetworksDiagnosticExecutionMode execution_mode =
nnapi->SL_ANeuralNetworksDiagnosticExecutionInfo_getExecutionMode(info);
const uint64_t runtime_time_ns =
nnapi
->SL_ANeuralNetworksDiagnosticExecutionInfo_getRuntimeExecutionTimeNanos(
info);
const uint64_t driver_time_ns =
nnapi
->SL_ANeuralNetworksDiagnosticExecutionInfo_getDriverExecutionTimeNanos(
info);
const uint64_t hardware_time_ns =
nnapi
->SL_ANeuralNetworksDiagnosticExecutionInfo_getHardwareExecutionTimeNanos(
info);
TFLITE_LOG_PROD_ONCE(
TFLITE_LOG_INFO,
"Execution info: getSessionId=%d getErrorCode=%d "
"getNnApiVersion=%" PRId64
" getModelArchHash=%x getDeviceIds=%s getInputDataClass=%d "
"getOutputDataClass=%d isCachingEnabled=%s isControlFlowUsed=%s "
"getExecutionMode=%d getRuntimeExecutionTimeNanos=%" PRIu64
" getDriverExecutionTimeNanos=%" PRIu64
" getHardwareExecutionTimeNanos=%" PRIu64,
session_id, error_code, nnapi_version,
unsigned{model_arch_hash_first_byte}, device_ids_string.c_str(),
input_data_class, output_data_class, is_caching_enabled ? "Y" : "N",
is_control_flow_used ? "Y" : "N", execution_mode, runtime_time_ns,
driver_time_ns, hardware_time_ns);
}
}
}
using ::tflite::delegate::nnapi::kMinSdkVersionForNNAPI;
using ::tflite::delegate::nnapi::kMinSdkVersionForNNAPI11;
using ::tflite::delegate::nnapi::kMinSdkVersionForNNAPI12;
using ::tflite::delegate::nnapi::NNAPIDelegateKernel;
StatefulNnApiDelegate::Data::Data(const NnApi* nnapi) : nnapi(nnapi) {}
StatefulNnApiDelegate::Data::Data(std::unique_ptr<const NnApi> nnapi)
: nnapi(nnapi.get()), owned_nnapi(std::move(nnapi)) {}
StatefulNnApiDelegate::Data::~Data() {
std::for_each(std::begin(delegate_state_cache),
std::end(delegate_state_cache),
[](const std::pair<int, NNAPIDelegateKernel*>& entry) {
delete entry.second;
});
}
void StatefulNnApiDelegate::Data::CacheDelegateKernel(
const TfLiteDelegateParams* delegate_params,
NNAPIDelegateKernel* delegate_state) {
const int cache_key = delegate_params->nodes_to_replace->data[0];
delegate_state_cache.emplace(cache_key, delegate_state);
}
NNAPIDelegateKernel* StatefulNnApiDelegate::Data::MaybeGetCachedDelegateKernel(
const TfLiteDelegateParams* delegate_params) {
const int cache_key = delegate_params->nodes_to_replace->data[0];
const auto cached_state = delegate_state_cache.find(cache_key);
if (cached_state != std::end(delegate_state_cache)) {
auto result = cached_state->second;
delegate_state_cache.erase(cached_state);
return result;
} else {
return nullptr;
}
}
void StatefulNnApiDelegate::StatefulNnApiDelegateConstructorImpl(
const Options& options) {
if (options.accelerator_name) {
delegate_data_.accelerator_name = options.accelerator_name;
}
if (options.cache_dir) {
delegate_data_.cache_dir = options.cache_dir;
}
if (options.model_token) {
delegate_data_.model_token = options.model_token;
}
delegate_data_.execution_preference = options.execution_preference;
delegate_data_.disallow_nnapi_cpu = options.disallow_nnapi_cpu;
delegate_data_.max_number_delegated_partitions =
options.max_number_delegated_partitions;
delegate_data_.allow_fp16 = options.allow_fp16;
delegate_data_.execution_priority = options.execution_priority;
delegate_data_.max_compilation_timeout_duration_ns =
options.max_compilation_timeout_duration_ns;
delegate_data_.max_execution_timeout_duration_ns =
options.max_execution_timeout_duration_ns;
delegate_data_.max_execution_loop_timeout_duration_ns =
options.max_execution_loop_timeout_duration_ns;
if (delegate_data_.nnapi->android_sdk_version >= kMinSdkVersionForNNAPI11) {
delegate_data_.allow_dynamic_dimensions = options.allow_dynamic_dimensions;
}
delegate_data_.use_burst_computation = options.use_burst_computation;
delegate_data_.vendor_compilation_hints = options.vendor_compilation_hints;
delegate_data_.vendor_execution_hints = options.vendor_execution_hints;
delegate_data_.vendor_plugin = options.vendor_plugin;
delegate_data_.max_execution_cache_size = options.max_execution_cache_size;
delegate_data_.tensor_max_size_hints = options.tensor_max_size_hints;
delegate_data_.disable_debugging_diagnostics_callbacks =
options.disable_debugging_diagnostics_callbacks;
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"Created TensorFlow Lite delegate for NNAPI.");
Prepare = DoPrepare;
CopyFromBufferHandle = DoCopyFromBufferHandle;
CopyToBufferHandle = DoCopyToBufferHandle;
FreeBufferHandle = DoFreeBufferHandle;
data_ = &delegate_data_;
if (delegate_data_.allow_dynamic_dimensions) {
flags |= kTfLiteDelegateFlagsAllowDynamicTensors;
if (!delegate_data_.vendor_plugin) {
flags |= kTfLiteDelegateFlagsRequirePropagatedShapes;
}
}
}
StatefulNnApiDelegate::StatefulNnApiDelegate(const NnApi* nnapi)
: StatefulNnApiDelegate(nnapi, Options()) {}
StatefulNnApiDelegate::StatefulNnApiDelegate(Options options)
: StatefulNnApiDelegate(NnApiImplementation(), options) {}
StatefulNnApiDelegate::StatefulNnApiDelegate(
const NnApiSLDriverImplFL5* nnapi_support_library_driver, Options options)
: TfLiteDelegate(TfLiteDelegateCreate()),
delegate_data_(
CreateNnApiFromSupportLibrary(nnapi_support_library_driver)) {
StatefulNnApiDelegateConstructorImpl(options);
}
StatefulNnApiDelegate::StatefulNnApiDelegate(const NnApi* nnapi,
Options options)
: TfLiteDelegate(TfLiteDelegateCreate()), delegate_data_(nnapi) {
StatefulNnApiDelegateConstructorImpl(options);
}
StatefulNnApiDelegate::StatefulNnApiDelegate()
: StatefulNnApiDelegate(Options()) {}
const StatefulNnApiDelegate::Options StatefulNnApiDelegate::GetOptions(
TfLiteDelegate* delegate) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
StatefulNnApiDelegate::Options options;
options.execution_preference = delegate_data->execution_preference;
options.accelerator_name = delegate_data->accelerator_name.empty()
? nullptr
: delegate_data->accelerator_name.c_str();
options.cache_dir = delegate_data->cache_dir.empty()
? nullptr
: delegate_data->cache_dir.c_str();
options.model_token = delegate_data->model_token.empty()
? nullptr
: delegate_data->model_token.c_str();
options.disallow_nnapi_cpu = delegate_data->disallow_nnapi_cpu;
options.max_number_delegated_partitions =
delegate_data->max_number_delegated_partitions;
options.allow_fp16 = delegate_data->allow_fp16;
options.execution_priority = delegate_data->execution_priority;
options.max_compilation_timeout_duration_ns =
delegate_data->max_compilation_timeout_duration_ns;
options.max_execution_timeout_duration_ns =
delegate_data->max_execution_timeout_duration_ns;
options.max_execution_loop_timeout_duration_ns =
delegate_data->max_execution_loop_timeout_duration_ns;
options.allow_dynamic_dimensions = delegate_data->allow_dynamic_dimensions;
options.use_burst_computation = delegate_data->use_burst_computation;
options.vendor_compilation_hints = delegate_data->vendor_compilation_hints;
options.vendor_execution_hints = delegate_data->vendor_execution_hints;
options.vendor_plugin = delegate_data->vendor_plugin;
options.max_execution_cache_size = delegate_data->max_execution_cache_size;
options.tensor_max_size_hints = delegate_data->tensor_max_size_hints;
options.disable_debugging_diagnostics_callbacks =
delegate_data->disable_debugging_diagnostics_callbacks;
return options;
}
const std::vector<StatefulNnApiDelegate::MemoryRegistration>&
StatefulNnApiDelegate::GetTensorMemoryMap(TfLiteDelegate* delegate) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
return delegate_data->tensor_memory_map;
}
delegates::Serialization* StatefulNnApiDelegate::GetCache(
TfLiteDelegate* delegate) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
return delegate_data->cache.get();
}
TfLiteBufferHandle StatefulNnApiDelegate::RegisterNnapiMemory(
ANeuralNetworksMemory* memory, CopyToHostTensorFnPtr callback,
void* callback_context) {
uint64_t timestamp = delegate_data_.next_buffer_handle_timestamp++;
int map_size = delegate_data_.tensor_memory_map.size();
for (int i = 0; i < map_size; i++) {
if (delegate_data_.tensor_memory_map[i].memory == nullptr) {
delegate_data_.tensor_memory_map[i] = {memory, callback, callback_context,
timestamp};
return i;
}
}
delegate_data_.tensor_memory_map.push_back(
{memory, callback, callback_context, timestamp});
return map_size;
}
TfLiteStatus StatefulNnApiDelegate::DoCopyFromBufferHandle(
TfLiteContext* context, TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle, TfLiteTensor* tensor) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
if (buffer_handle < 0 ||
buffer_handle >= delegate_data->tensor_memory_map.size()) {
return kTfLiteError;
}
auto memory = delegate_data->tensor_memory_map[buffer_handle].memory;
auto callback = delegate_data->tensor_memory_map[buffer_handle].callback;
auto callback_context =
delegate_data->tensor_memory_map[buffer_handle].callback_context;
if (!memory || !callback) {
return kTfLiteError;
}
return callback(tensor, memory, 0, tensor->bytes, callback_context);
}
TfLiteStatus StatefulNnApiDelegate::DoCopyToBufferHandle(
TfLiteContext* context, TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle, TfLiteTensor* tensor) {
return kTfLiteError;
}
void StatefulNnApiDelegate::DoFreeBufferHandle(TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle* handle) {
auto delegate_data = reinterpret_cast<Data*>(delegate->data_);
if (*handle >= 0 && *handle < delegate_data->tensor_memory_map.size()) {
delegate_data->tensor_memory_map[*handle] = {nullptr, nullptr, nullptr};
*handle = kTfLiteNullBufferHandle;
}
}
int StatefulNnApiDelegate::GetNnApiErrno() const {
return delegate_data_.nnapi_errno;
}
TfLiteStatus StatefulNnApiDelegate::GetNodesSupportedByAccelerator(
TfLiteContext* context, TfLiteDelegate* delegate, const NnApi* nnapi,
const std::vector<int>& supported_nodes,
std::vector<int>* device_supported_nodes, int* num_partitions,
TfLiteDelegateParams** params_array, int* nnapi_errno) {
auto* delegate_data = static_cast<Data*>(delegate->data_);
auto supported_nodes_int_array = BuildTfLiteArray(supported_nodes);
TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
context, supported_nodes_int_array.get(), params_array, num_partitions));
delegate_data->delegate_state_cache.clear();
for (int idx = 0; idx < *num_partitions; idx++) {
const auto& partition_params = (*params_array)[idx];
std::unique_ptr<NNAPIDelegateKernel> kernel_state(
new NNAPIDelegateKernel(nnapi, delegate_data->vendor_plugin));
TfLiteDelegateParams params_with_delegate = partition_params;
params_with_delegate.delegate = delegate;
TF_LITE_ENSURE_STATUS(
kernel_state->Init(context, ¶ms_with_delegate, nnapi_errno));
std::vector<int> supported_partition_nodes;
TF_LITE_ENSURE_STATUS(
kernel_state->GetOperationsSupportedByTargetNnApiDevices(
context, &supported_partition_nodes, nnapi_errno));
device_supported_nodes->insert(device_supported_nodes->end(),
supported_partition_nodes.begin(),
supported_partition_nodes.end());
bool model_fully_supported = (supported_partition_nodes.size() ==
partition_params.nodes_to_replace->size);
if (model_fully_supported) {
delegate_data->CacheDelegateKernel(&partition_params,
kernel_state.release());
}
}
if (device_supported_nodes->size() != supported_nodes.size()) {
auto device_sup_nodes_int_array = BuildTfLiteArray(*device_supported_nodes);
TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
context, device_sup_nodes_int_array.get(), params_array,
num_partitions));
}
return kTfLiteOk;
}
TfLiteStatus StatefulNnApiDelegate::LimitDelegatedPartitions(
int max_partitions,
std::vector<TfLiteDelegateParams> partition_params_array,
std::vector<int>* nodes_to_delegate) {
int num_partitions = partition_params_array.size();
if (max_partitions <= 0 || num_partitions <= max_partitions) {
return kTfLiteOk;
}
int number_delegated_partitions = std::count_if(
partition_params_array.begin(), partition_params_array.end(),
[nodes_to_delegate](const TfLiteDelegateParams& partition_params) {
return std::find(nodes_to_delegate->begin(), nodes_to_delegate->end(),
partition_params.nodes_to_replace->data[0]) !=
nodes_to_delegate->end();
});
if (number_delegated_partitions > max_partitions) {
std::sort(partition_params_array.begin(), partition_params_array.end(),
[](const TfLiteDelegateParams& left,
const TfLiteDelegateParams& right) -> bool {
return left.nodes_to_replace->size >
right.nodes_to_replace->size;
});
nodes_to_delegate->clear();
for (int i = 0; i < max_partitions; i++) {
const TfLiteDelegateParams& partition_params = partition_params_array[i];
nodes_to_delegate->insert(nodes_to_delegate->end(),
partition_params.nodes_to_replace->data,
partition_params.nodes_to_replace->data +
partition_params.nodes_to_replace->size);
}
}
return kTfLiteOk;
}
static std::vector<int> GetSupportedOpsWithFp16WeightRemapping(
TfLiteContext* context, int target_feature_level,
bool is_accelerator_specified, int max_number_delegated_partitions) {
std::vector<int> supported_nodes;
delegates::IsNodeSupportedFn node_supported_fn =
[=](TfLiteContext* context, TfLiteNode* node,
TfLiteRegistration* registration,
std::string* unsupported_details) -> bool {
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
const auto is_supported = NNAPIDelegateKernel::Validate(
context, registration, target_feature_level, node,
is_accelerator_specified, nullptr, &map_failures);
if (!is_supported) {
if (unsupported_details) {
for (auto& failure : map_failures) {
unsupported_details->append(failure.message.c_str());
}
}
return false;
}
return true;
};
delegates::FP16GraphPartitionHelper partition_helper(context,
node_supported_fn);
std::set<std::string> unsupported_nodes_info;
if (partition_helper.Partition(&unsupported_nodes_info) == kTfLiteOk) {
supported_nodes = partition_helper.GetNodesOfFirstNLargestPartitions();
}
return supported_nodes;
}
TfLiteStatus StatefulNnApiDelegate::DoPrepare(TfLiteContext* context,
TfLiteDelegate* delegate) {
auto* delegate_data = static_cast<Data*>(delegate->data_);
int* nnapi_errno = &(delegate_data->nnapi_errno);
const NnApi* nnapi = delegate_data->nnapi;
*nnapi_errno = 0;
if (nnapi->android_sdk_version < kMinSdkVersionForNNAPI ||
!nnapi->nnapi_exists) {
return kTfLiteOk;
}
int target_feature_level = nnapi->android_sdk_version;
const StatefulNnApiDelegate::Options delegate_options =
StatefulNnApiDelegate::GetOptions(delegate);
if (nnapi->android_sdk_version >= kMinSdkVersionForNNAPI12) {
if (ShouldUseTargetDevices(delegate_options, nnapi)) {
std::vector<ANeuralNetworksDevice*> devices;
TF_LITE_ENSURE_STATUS(
GetTargetDevices(context, delegate, nnapi, nnapi_errno, &devices));
if (devices.empty()) {
if (delegate_options.accelerator_name) {
return kTfLiteError;
} else {
return kTfLiteOk;
}
}
TF_LITE_ENSURE_STATUS(GetTargetFeatureLevel(
context, nnapi, devices, &target_feature_level, nnapi_errno));
} else {
uint32_t device_count = 0;
RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, nnapi->ANeuralNetworks_getDeviceCount(&device_count),
"getting number of NNAPI devices", nnapi_errno);
if (device_count <= 1) {
return kTfLiteOk;
}
}
}
std::vector<int> supported_nodes;
TfLiteIntArray* execution_plan;
TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
IntArrayUniquePtr plan(TfLiteIntArrayCopy(execution_plan));
const bool is_accelerator_specified = ShouldUseTargetDevices(
delegate_options, nnapi, true);
std::vector<delegate::nnapi::NNAPIValidationFailure> map_failures;
std::vector<int> fp16_to_fp32(context->tensors_size, -1);
bool should_prune_fp16_dequantize = false;
for (int i = 0; i < plan->size; ++i) {
const int node_id = plan->data[i];
TfLiteNode* node = nullptr;
TfLiteRegistration* registration = nullptr;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_id, &node, ®istration));
if (IsDequantizeConstFloat16(context, node, registration)) {
should_prune_fp16_dequantize = true;
fp16_to_fp32[node->inputs->data[0]] = node->outputs->data[0];
}
}
if (should_prune_fp16_dequantize) {
supported_nodes = GetSupportedOpsWithFp16WeightRemapping(
context, target_feature_level, is_accelerator_specified,
delegate_options.max_number_delegated_partitions);
} else {
for (int node_index : TfLiteIntArrayView(plan.get())) {
TfLiteNode* node;
TfLiteRegistration* registration;
TF_LITE_ENSURE_STATUS(context->GetNodeAndRegistration(
context, node_index, &node, ®istration));
if (NNAPIDelegateKernel::Validate(
context, registration, target_feature_level, node,
is_accelerator_specified, delegate_options.vendor_plugin,
&map_failures)) {
supported_nodes.push_back(node_index);
}
#ifdef NNAPI_VERBOSE_VALIDATION
for (auto& failure : map_failures) {
TFLITE_LOG_PROD(
TFLITE_LOG_WARNING,
"Operator %s (v%d) refused by NNAPI delegate: %s",
tflite::EnumNameBuiltinOperator(
static_cast<BuiltinOperator>(registration->builtin_code)),
registration->version, failure.message.c_str());
}
map_failures.clear();
#endif
}
}
if (supported_nodes.empty()) {
return kTfLiteOk;
}
static const TfLiteRegistration nnapi_delegate_kernel = {
.init = [](TfLiteContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteDelegateParams* params =
reinterpret_cast<const TfLiteDelegateParams*>(buffer);
auto* delegate_data = static_cast<Data*>(params->delegate->data_);
int* nnapi_errno = &(delegate_data->nnapi_errno);
NNAPIDelegateKernel* kernel_state =
delegate_data->MaybeGetCachedDelegateKernel(params);
if (!kernel_state) {
kernel_state = new NNAPIDelegateKernel(delegate_data->nnapi,
delegate_data->vendor_plugin);
kernel_state->Init(context, params, nnapi_errno);
}
return kernel_state;
},
.free = [](TfLiteContext* context, void* buffer) -> void {
delete reinterpret_cast<NNAPIDelegateKernel*>(buffer);
},
.prepare = [](TfLiteContext* context, TfLiteNode* node) -> TfLiteStatus {
NNAPIDelegateKernel* state =
reinterpret_cast<NNAPIDelegateKernel*>(node->user_data);
int* nnapi_errno =
&(static_cast<Data*>(node->delegate->data_)->nnapi_errno);
return state->Prepare(context, node, nnapi_errno);
},
.invoke = [](TfLiteContext* context, TfLiteNode* node) -> TfLiteStatus {
NNAPIDelegateKernel* state =
reinterpret_cast<NNAPIDelegateKernel*>(node->user_data);
int* nnapi_errno =
&(static_cast<Data*>(node->delegate->data_)->nnapi_errno);
return state->Invoke(context, node, nnapi_errno);
},
.profiling_string = nullptr,
.builtin_code = kTfLiteBuiltinDelegate,
.custom_name = "TfLiteNnapiDelegate",
.version = 1,
};
const char* cache_dir = delegate_options.cache_dir;
const char* model_token = delegate_options.model_token;
delegates::SerializationParams params = {model_token, cache_dir};
if (nnapi->android_sdk_version >= kMinSdkVersionForNNAPI12 && cache_dir &&
model_token) {
delegate_data->cache = std::make_unique<delegates::Serialization>(params);
}
delegates::Serialization* cache_ptr = delegate_data->cache.get();
if (cache_ptr) {
std::string accelerator_id = NnApiBackendId(delegate_options);
TfLiteIntArray* cached_nodes_to_delegate = nullptr;
if (delegates::GetDelegatedNodes(context, cache_ptr, accelerator_id,
&cached_nodes_to_delegate) == kTfLiteOk) {
if (cached_nodes_to_delegate->size == 0) return kTfLiteOk;
auto status = context->ReplaceNodeSubsetsWithDelegateKernels(
context, nnapi_delegate_kernel, cached_nodes_to_delegate, delegate);
TfLiteIntArrayFree(cached_nodes_to_delegate);
return status;
}
}
std::vector<int> nodes_to_delegate;
int num_partitions;
TfLiteDelegateParams* params_array;
if (is_accelerator_specified &&
nnapi->android_sdk_version >= kMinSdkVersionForNNAPI12) {
TF_LITE_ENSURE_STATUS(GetNodesSupportedByAccelerator(
context, delegate, nnapi, supported_nodes, &nodes_to_delegate,
&num_partitions, ¶ms_array, nnapi_errno));
} else {
nodes_to_delegate = supported_nodes;
auto supported_nodes_int_array = BuildTfLiteArray(supported_nodes);
TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
context, supported_nodes_int_array.get(), ¶ms_array,
&num_partitions));
}
if (should_prune_fp16_dequantize &&
supported_nodes.size() != nodes_to_delegate.size()) {
for (int execution_plan_index = 0; execution_plan_index < plan->size;
++execution_plan_index) {
int node_index = plan->data[execution_plan_index];
TfLiteNode* node = nullptr;
TfLiteRegistration* reg = nullptr;
TF_LITE_ENSURE_STATUS(
context->GetNodeAndRegistration(context, node_index, &node, ®));
if (reg->builtin_code == kTfLiteBuiltinDequantize) continue;
for (int i = 0; i < node->inputs->size; ++i) {
const int original_input_idx = node->inputs->data[i];
if (original_input_idx == kTfLiteOptionalTensor) continue;
if (context->tensors[original_input_idx].type == kTfLiteFloat16 &&
fp16_to_fp32[original_input_idx] != -1) {
node->inputs->data[i] = fp16_to_fp32[original_input_idx];
}
}
}
return kTfLiteOk;
}
TF_LITE_ENSURE_STATUS(
LimitDelegatedPartitions(delegate_options.max_number_delegated_partitions,
std::vector<TfLiteDelegateParams>(
params_array, params_array + num_partitions),
&nodes_to_delegate));
auto nodes_to_delegate_int_array = BuildTfLiteArray(nodes_to_delegate);
if (cache_ptr) {
std::string accelerator_id = NnApiBackendId(delegate_options);
if (delegates::SaveDelegatedNodes(context, cache_ptr, accelerator_id,
nodes_to_delegate_int_array.get()) !=
kTfLiteOk) {
TF_LITE_KERNEL_LOG(context, "Could not save delegated nodes");
}
}
if (nodes_to_delegate_int_array->size == 0) {
return kTfLiteOk;
} else {
return context->ReplaceNodeSubsetsWithDelegateKernels(
context, nnapi_delegate_kernel, nodes_to_delegate_int_array.get(),
delegate);
}
}
TfLiteDelegate* NnApiDelegate() {
static StatefulNnApiDelegate* delegate = new StatefulNnApiDelegate();
return delegate;
}
} | #include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include <sys/mman.h>
#include <algorithm>
#include <functional>
#include <initializer_list>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "tensorflow/lite/context_util.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_plugin.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
MATCHER(QuantizedNear, "") {
const int diff = abs(std::get<0>(arg) - std::get<1>(arg));
if (diff > 1) {
*result_listener << "Quantized values can be at most off by one: " << diff;
return false;
}
return true;
}
class SingleOpModelWithNNAPI : public SingleOpModel {
public:
SingleOpModelWithNNAPI() { options_.disallow_nnapi_cpu = false; }
~SingleOpModelWithNNAPI() { stateful_delegate_.reset(); }
explicit SingleOpModelWithNNAPI(
const StatefulNnApiDelegate::Options& options) {
options_ = options;
options_.disallow_nnapi_cpu = false;
}
TfLiteStatus ResizeInputTensor(int tensor_index,
const std::vector<int>& dims) {
return interpreter_->ResizeInputTensor(tensor_index, dims);
}
StatefulNnApiDelegate* GetDelegate() { return stateful_delegate_.get(); }
void SetBufferHandle(int index, TfLiteBufferHandle handle) {
interpreter_->SetBufferHandle(index, handle, stateful_delegate_.get());
}
void MarkInputTensorDataStale(int index) {
interpreter_->tensor(index)->data_is_stale = true;
}
TfLiteStatus AllocateTensors() { return interpreter_->AllocateTensors(); }
void SetTensorMaxSize(uint32_t tensor_index, size_t max_size) {
options_.tensor_max_size_hints.emplace(tensor_index, max_size);
}
void ApplyNNAPIDelegate() {
stateful_delegate_ = std::make_unique<StatefulNnApiDelegate>(options_);
SetDelegate(stateful_delegate_.get());
ApplyDelegate();
}
protected:
void SetData(int index, TensorType type, const std::vector<float>& data) {
switch (type) {
case TensorType_FLOAT32:
PopulateTensor(index, data);
break;
case TensorType_INT32:
QuantizeAndPopulate<int32_t>(index, data);
break;
case TensorType_UINT8:
QuantizeAndPopulate<uint8_t>(index, data);
break;
case TensorType_INT8:
QuantizeAndPopulate<int8_t>(index, data);
break;
default:
FAIL() << "Type not supported: " << type;
break;
}
}
void GetData(int index, TensorType type, std::vector<float>* output) {
switch (type) {
case TensorType_FLOAT32:
*output = ExtractVector<float>(index);
break;
case TensorType_UINT8:
*output = Dequantize<uint8_t>(ExtractVector<uint8_t>(index),
GetScale(index), GetZeroPoint(index));
break;
default:
FAIL() << "Type not supported: " << type;
break;
}
}
void BuildInterpreterWithNNAPI(std::vector<std::vector<int>> input_shapes,
bool allow_fp32_relax_to_fp16 = false,
bool apply_delegate = true) {
BuildInterpreter(input_shapes, -1, allow_fp32_relax_to_fp16,
false, true);
if (apply_delegate) {
ApplyNNAPIDelegate();
}
}
private:
StatefulNnApiDelegate::Options options_;
std::unique_ptr<StatefulNnApiDelegate> stateful_delegate_;
};
class FloatAddOpModel : public SingleOpModelWithNNAPI {
public:
FloatAddOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false) {
Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16);
}
FloatAddOpModel(const StatefulNnApiDelegate::Options& options,
const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false)
: SingleOpModelWithNNAPI(options) {
Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16);
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
private:
void Init(const TensorData& input1, const TensorData& input2,
const TensorData& output, ActivationFunctionType activation_type,
bool allow_fp32_relax_to_fp16 = false) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_ADD, BuiltinOptions_AddOptions,
CreateAddOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)},
allow_fp32_relax_to_fp16);
}
};
TEST(NNAPIDelegate, AddWithNoActivation) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, AddScalarWithNoActivation) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, {TensorType_FLOAT32, {}},
ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.3, 0.8, 0.8}));
}
TEST(NNAPIDelegate, AddWithNoActivationRelaxed) {
FloatAddOpModel m(
{TensorType_FLOAT32, {1, 2, 2, 1}}, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE, true);
m.PopulateTensor<float>(m.input1(), {-2.0, -1.0, 1.0, 2.0});
m.PopulateTensor<float>(m.input2(), {1.0, 2.0, 3.0, 4.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.0, 1.0, 4.0, 6.0}));
}
TEST(NNAPIDelegate, AddWithRelu) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_RELU);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0.0, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, ResizeInputTensorsWorks) {
FloatAddOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3, 1.1, 1.5}));
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 1.3, 1.1, 1.5}));
}
TEST(NNAPIDelegate, ResizeDynamicBatchInputTensorsWorks) {
StatefulNnApiDelegate::Options options;
options.allow_dynamic_dimensions = true;
options.max_execution_cache_size = 1;
FloatAddOpModel m(options,
{TensorType_FLOAT32, {1, 3, 2, 1}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
{TensorType_FLOAT32, {1, 3, 2, 1}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
{TensorType_FLOAT32, {}, 0.0f,
0.0f, 0.0f,
0, false,
{},
{},
0, {},
{},
{}, {},
{1, -1, 2, 1}},
ActivationFunctionType_NONE);
auto RunTestCase1 = [&m]() {
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 3, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-1.9, 0.4, 1.0, 1.3, 1.1, 1.5}));
};
auto RunTestCase2 = [&m]() {
EXPECT_EQ(m.ResizeInputTensor(m.input1(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.ResizeInputTensor(m.input2(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input1(), {0.7, 0.8, 0.9, 0.7});
m.PopulateTensor<float>(m.input2(), {0.3, 0.5, 0.2, 0.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 1.3, 1.1, 1.5}));
};
RunTestCase1();
RunTestCase1();
RunTestCase2();
RunTestCase1();
}
TEST(NNAPIDelegate, StatefulDelegate) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithAcceleratorName) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.accelerator_name = "nnapi-reference";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithInvalidAcceleratorName) {
if (!NnApiImplementation()->ANeuralNetworksDevice_getName) {
GTEST_SKIP();
}
testing::internal::CaptureStderr();
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.accelerator_name = "foo";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
EXPECT_THAT(testing::internal::GetCapturedStderr(),
testing::HasSubstr(
"Could not find the specified NNAPI accelerator: foo"));
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithCompilationCaching) {
StatefulNnApiDelegate::Options options;
options.execution_preference =
StatefulNnApiDelegate::Options::ExecutionPreference::kLowPower;
options.cache_dir = "/data/local/tmp";
options.model_token = "NNAPIDelegate.StatefulDelegateWithCompilationCaching";
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, StatefulDelegateWithQoS) {
StatefulNnApiDelegate::Options options;
options.accelerator_name = "nnapi-reference";
options.execution_priority = ANEURALNETWORKS_PRIORITY_HIGH;
options.max_compilation_timeout_duration_ns = UINT64_MAX;
options.max_execution_timeout_duration_ns = UINT64_MAX;
options.max_execution_loop_timeout_duration_ns = UINT64_MAX;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
}
TEST(NNAPIDelegate, DISABLED_StatefulDelegateWithBufferHandles) {
if (!NnApiImplementation()->ASharedMemory_create ||
!NnApiImplementation()->ANeuralNetworksMemory_createFromFd) {
GTEST_SKIP();
}
StatefulNnApiDelegate::Options options;
options.disallow_nnapi_cpu = false;
options.max_execution_cache_size = 2;
FloatAddOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
auto* delegate = m.GetDelegate();
constexpr auto kInput1ByteSize = 4 * sizeof(float);
ANeuralNetworksMemory* input1_memory = nullptr;
int fd =
NnApiImplementation()->ASharedMemory_create("input1", kInput1ByteSize);
EXPECT_GE(fd, 0);
void* input1_memory_data =
mmap(nullptr, kInput1ByteSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
EXPECT_TRUE(input1_memory_data != nullptr);
float input1_data[] = {-2.0, 0.2, 0.7, 0.8};
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
int result = NnApiImplementation()->ANeuralNetworksMemory_createFromFd(
kInput1ByteSize, PROT_READ, fd, 0, &input1_memory);
EXPECT_EQ(result, ANEURALNETWORKS_NO_ERROR);
ASSERT_NE(input1_memory, nullptr);
struct DummyMemoryContext {
ANeuralNetworksMemory* memory_handle;
void* memory_data;
size_t byte_size;
};
DummyMemoryContext memory_context = {input1_memory, input1_memory_data,
kInput1ByteSize};
static StatefulNnApiDelegate::CopyToHostTensorFnPtr memory_callback =
[](TfLiteTensor* tensor, ANeuralNetworksMemory* memory,
size_t memory_offset, size_t byte_size,
void* callback_context) -> TfLiteStatus {
auto memory_context =
reinterpret_cast<DummyMemoryContext*>(callback_context);
if (memory != memory_context->memory_handle ||
memory_offset + byte_size > memory_context->byte_size) {
return kTfLiteError;
}
memcpy(
tensor->data.raw,
reinterpret_cast<uint8_t*>(memory_context->memory_data) + memory_offset,
byte_size);
return kTfLiteOk;
};
auto input1_handle = delegate->RegisterNnapiMemory(
input1_memory, memory_callback, &memory_context);
m.SetBufferHandle(m.input1(), input1_handle);
m.MarkInputTensorDataStale(m.input1());
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9, 0.4, 1.0, 1.3}));
for (int i = 0; i < 10; i++) {
input1_data[0] = -2.0 + i;
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
m.MarkInputTensorDataStale(m.input1());
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9 + i, 0.4, 1.0, 1.3}));
}
for (int i = 0; i < 10; i++) {
input1_data[0] = -2.0 + i;
memcpy(input1_memory_data, input1_data, kInput1ByteSize);
auto input1_handle = delegate->RegisterNnapiMemory(
input1_memory, memory_callback, &memory_context);
m.SetBufferHandle(m.input1(), input1_handle);
m.MarkInputTensorDataStale(m.input1());
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({-1.9 + i, 0.4, 1.0, 1.3}));
}
}
class FloatMulOpModel : public SingleOpModelWithNNAPI {
public:
FloatMulOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MUL, BuiltinOptions_MulOptions,
CreateMulOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(NNAPIDelegate, MulWithNoActivation) {
FloatMulOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.2, 0.04, 0.21, 0.4})));
}
class FloatPoolingOpModel : public SingleOpModelWithNNAPI {
public:
FloatPoolingOpModel(BuiltinOperator type, const TensorData& input,
int filter_width, int filter_height,
const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
type, BuiltinOptions_Pool2DOptions,
CreatePool2DOptions(builder_, Padding_VALID, 2, 2, filter_width,
filter_height, ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
};
TEST(NNAPIDelegate, AveragePoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_AVERAGE_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2.75, 5.75}));
}
TEST(NNAPIDelegate, MaxPoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_MAX_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({6, 10}));
}
TEST(NNAPIDelegate, L2PoolWithNoActivation) {
FloatPoolingOpModel m(BuiltinOperator_L2_POOL_2D,
{TensorType_FLOAT32, {1, 2, 4, 1}},
2, 2,
{TensorType_FLOAT32, {}});
m.SetInput({
0, 6, 2, 4,
3, 2, 10, 7,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3.5, 6.5}));
}
class ConvolutionOpModel : public SingleOpModelWithNNAPI {
public:
ConvolutionOpModel(
const TensorData& input, const TensorData& filter,
const TensorData& output, int stride_width = 2, int stride_height = 2,
enum Padding padding = Padding_VALID,
enum ActivationFunctionType activation = ActivationFunctionType_NONE,
int dilation_width_factor = 1, int dilation_height_factor = 1)
: input_type_(input.type), filter_type_(filter.type) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[0];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {bias_size}});
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_CONV_2D, BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(
builder_, padding, stride_width, stride_height, activation,
dilation_width_factor, dilation_height_factor)
.Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetFilter(std::initializer_list<float> data) {
SetData(filter_, filter_type_, data);
}
void SetBias(std::initializer_list<float> data) {
const auto bias_type =
(input_type_ == TensorType_FLOAT32) ? input_type_ : TensorType_INT32;
SetData(bias_, bias_type, data);
}
std::vector<float> GetOutput() {
if (input_type_ == TensorType_FLOAT32) {
return ExtractVector<float>(output_);
} else {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
}
std::vector<uint8_t> GetQuantizedOutput() {
if (input_type_ == TensorType_FLOAT32) {
return {};
} else {
return ExtractVector<uint8_t>(output_);
}
}
protected:
int input_;
int filter_;
int bias_;
int output_;
const TensorType input_type_;
const TensorType filter_type_;
};
TEST(ConvolutionOpTest, SimpleTestQuantized) {
ConvolutionOpModel m({TensorType_UINT8, {2, 2, 4, 1}, -63.5, 64},
{TensorType_UINT8, {3, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 2, 5,
18, 2, 5,
17, 4, 3,
37, 4, 3,
},
1e-5)));
EXPECT_THAT(m.GetQuantizedOutput(), ElementsAreArray({
145, 129, 132,
145, 129, 132,
144, 131, 130,
164, 131, 130,
}));
}
TEST(ConvolutionOpTest, SimpleTestQuantizedGrouped) {
ConvolutionOpModel m({TensorType_UINT8, {2, 2, 2, 2}, -63.5, 64},
{TensorType_UINT8, {2, 2, 2, 1}, -63.5, 64},
{TensorType_UINT8, {}, -127, 128});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
});
m.SetBias({1, 2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 2,
23, 6
},
1e-5)));
EXPECT_THAT(m.GetQuantizedOutput(), ElementsAreArray({
145, 129,
150, 133,
}));
}
TEST(ConvolutionOpTest, FloatInputQuantizedWeights) {
ConvolutionOpModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_UINT8, {3, 2, 2, 1}, 0, 64},
{TensorType_FLOAT32, {}});
m.SetInput({
1, 1, 1, 2,
2, 2, 2, 1,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
0, 1, 0, 1,
0, 0, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
18, 5, 7,
16, 5, 6,
17, 6, 6,
37, 10, 10,
},
0.2)));
}
TEST(ConvolutionOpTest, NoActivation) {
ConvolutionOpModel m({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}});
m.SetInput({
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
});
m.SetFilter({
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
});
m.SetBias({1, 2, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
18, 2, 5,
18, 2, 5,
17, 4, 3,
37, 4, 3,
}));
}
TEST(ConvolutionOpTest, SimpleTestQuantizedOutputMultiplierGreaterThan1) {
ConvolutionOpModel quant_op({TensorType_UINT8, {2, 2, 4, 1}, -128.5, 128},
{TensorType_UINT8, {3, 2, 2, 1}, -128.5, 128},
{TensorType_UINT8, {}, -127, 128});
ConvolutionOpModel float_op({TensorType_FLOAT32, {2, 2, 4, 1}},
{TensorType_FLOAT32, {3, 2, 2, 1}},
{TensorType_FLOAT32, {}});
std::initializer_list<float> input = {
1, 1, 1, 1,
2, 2, 2, 2,
1, 2, 3, 4,
1, 2, 3, 4,
};
std::initializer_list<float> filter = {
1, 2, 3, 4,
-1, 1, -1, 1,
-1, -1, 1, 1,
};
std::initializer_list<float> bias = {1, 2, 3};
quant_op.SetInput(input);
quant_op.SetFilter(filter);
quant_op.SetBias(bias);
ASSERT_EQ(quant_op.Invoke(), kTfLiteOk);
float_op.SetInput(input);
float_op.SetFilter(filter);
float_op.SetBias(bias);
ASSERT_EQ(float_op.Invoke(), kTfLiteOk);
EXPECT_THAT(quant_op.GetOutput(),
ElementsAreArray(ArrayFloatNear(float_op.GetOutput(), 1)));
}
TEST(ConvolutionOpTest, SimpleTestFloatWithDilation) {
const int depth = 1;
const int image_width = 9;
const int image_height = 9;
const int image_batch_count = 1;
const int filter_size = 3;
const int filter_count = 1;
const int stride_width = 1;
const int stride_height = 1;
const int dilation_width_factor = 3;
const int dilation_height_factor = 3;
const Padding padding = Padding_VALID;
ConvolutionOpModel m(
{TensorType_FLOAT32,
{image_batch_count, image_height, image_width, depth}},
{TensorType_FLOAT32, {depth, filter_size, filter_size, filter_count}},
{TensorType_FLOAT32, {}}, stride_width, stride_height, padding,
ActivationFunctionType_NONE, dilation_width_factor,
dilation_height_factor);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5, 6, 7, 8, 9});
m.SetBias({0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5, 5}));
}
class QuantizedConvolutionOpModel : public ConvolutionOpModel {
public:
using ConvolutionOpModel::ConvolutionOpModel;
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(input_, data);
}
void SetFilter(std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(filter_, data);
}
void SetBias(std::initializer_list<float> data) {
QuantizeAndPopulate<int32_t>(bias_, data);
}
std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
};
TEST(ConvolutionOpTest, SimpleTestQuantizedWithDilation) {
const int depth = 1;
const int image_width = 9;
const int image_height = 9;
const int image_batch_count = 1;
const int filter_size = 3;
const int filter_count = 1;
const int stride_width = 1;
const int stride_height = 1;
const int dilation_width_factor = 3;
const int dilation_height_factor = 3;
const Padding padding = Padding_VALID;
ConvolutionOpModel m({TensorType_UINT8,
{image_batch_count, image_height, image_width, depth},
0,
127.5},
{TensorType_UINT8,
{depth, filter_size, filter_size, filter_count},
0,
127.5},
{TensorType_UINT8, {}, 0, 255}, stride_width,
stride_height, padding, ActivationFunctionType_NONE,
dilation_width_factor, dilation_height_factor);
m.SetInput({0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0});
m.SetFilter({1, 2, 3, 4, 5, 6, 7, 8, 9});
m.SetBias({0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetQuantizedOutput(),
ElementsAreArray({5, 5, 5, 5, 5, 5, 5, 5, 5}));
}
class PerChannelQuantizedConvolutionWithConstantFilterOpModel
: public SingleOpModelWithNNAPI {
public:
PerChannelQuantizedConvolutionWithConstantFilterOpModel(
const TensorData& input, const TensorData& filter,
std::initializer_list<int8_t> filter_data,
std::initializer_list<int32_t> bias_data, const TensorData& output,
int stride_width = 2, int stride_height = 2,
enum Padding padding = Padding_VALID,
enum ActivationFunctionType activation = ActivationFunctionType_NONE,
int dilation_width_factor = 1, int dilation_height_factor = 1)
: input_type_(input.type), filter_type_(filter.type) {
CHECK(filter.per_channel_quantization);
input_ = AddInput(input);
filter_ = AddConstInput(filter, filter_data);
const int bias_size = GetShape(filter_)[0];
const int num_channels = filter.per_channel_quantization_scales.size();
const std::vector<int64_t> bias_offsets(num_channels, 0);
std::vector<float> bias_scales(num_channels);
for (int i = 0; i < num_channels; i++) {
bias_scales[i] = input.scale * filter.per_channel_quantization_scales[i];
}
const TensorData bias{TensorType_INT32,
{bias_size},
0,
0,
0,
0,
true,
bias_scales,
bias_offsets,
0};
bias_ = AddConstInput(bias, bias_data);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_CONV_2D, BuiltinOptions_Conv2DOptions,
CreateConv2DOptions(
builder_, padding, stride_width, stride_height, activation,
dilation_width_factor, dilation_height_factor)
.Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<int8_t>(input_, data);
}
std::vector<int8_t> GetOutput() { return ExtractVector<int8_t>(output_); }
protected:
int input_;
int filter_;
int bias_;
int output_;
const TensorType input_type_;
const TensorType filter_type_;
};
TEST(ConvolutionOpTest, SimplePerChannelTest) {
PerChannelQuantizedConvolutionWithConstantFilterOpModel m(
{TensorType_INT8, {1, 2, 3, 2}, -63.5, 64, 0.5, -1},
{TensorType_INT8,
{2, 2, 2, 2},
0,
0,
0,
0,
true,
{1, 2},
{0, 0},
0},
{
1, 2,
3, 4,
3, 4,
5, 6,
4, 4,
3, 3,
2, 2,
1, 1,
},
{6, -2}, {TensorType_INT8, {}, -63.5, 64, 0.5, -1},
1, 1);
m.SetInput({
3, 2,
1, -1,
-2, -3,
4, 3,
2, -2,
-3, -4,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
testing::Pointwise(QuantizedNear(), {61, 127, -115, -93}));
}
class DepthwiseConvolutionOpModel : public SingleOpModelWithNNAPI {
public:
DepthwiseConvolutionOpModel(const TensorData& input, const TensorData& filter,
const TensorData& output)
: input_type_(input.type) {
input_ = AddInput(input);
filter_ = AddInput(filter);
int bias_size = GetShape(filter_)[3];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {bias_size}});
} else {
auto bias_scale = GetScale(input_) * GetScale(filter_);
TensorData bias{TensorType_INT32, {bias_size}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
int input_depth = GetShape(input_)[3];
int output_depth = GetShape(filter_)[3];
int depth_mul = output_depth / input_depth;
SetBuiltinOp(
BuiltinOperator_DEPTHWISE_CONV_2D,
BuiltinOptions_DepthwiseConv2DOptions,
CreateDepthwiseConv2DOptions(builder_, Padding_VALID, 1, 1, depth_mul,
ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(filter_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetFilter(std::initializer_list<float> data) {
SetData(filter_, input_type_, data);
}
void SetBias(std::initializer_list<float> data) {
const auto bias_type =
(input_type_ == TensorType_FLOAT32) ? input_type_ : TensorType_INT32;
SetData(bias_, bias_type, data);
}
std::vector<float> GetOutput() {
if (input_type_ == TensorType_FLOAT32) {
return ExtractVector<float>(output_);
} else {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
}
protected:
int input_;
int filter_;
int bias_;
int output_;
const TensorType input_type_;
};
TEST(NNAPIDelegate, DepthwiseConv2DWithNoActivation) {
DepthwiseConvolutionOpModel m({TensorType_FLOAT32, {1, 3, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4}},
{TensorType_FLOAT32, {}});
m.SetInput({
1, 2, 7, 8,
3, 4, 9, 10,
5, 6, 11, 12,
});
m.SetFilter({
1, 2, 3, 4,
-9, 10, -11, 12,
5, 6, 7, 8,
13, -14, 15, -16,
});
m.SetBias({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
71, -34, 99, -20,
91, -26, 127, -4,
}));
}
TEST(QuantizedDepthwiseConv2DTest, FilterMultiplierGreaterThan1) {
DepthwiseConvolutionOpModel quant_op(
{TensorType_UINT8, {1, 3, 2, 2}, -128.5, 128},
{TensorType_UINT8, {1, 2, 2, 4}, -128.5, 128},
{TensorType_UINT8, {}, -127, 128});
DepthwiseConvolutionOpModel float_op({TensorType_FLOAT32, {1, 3, 2, 2}},
{TensorType_FLOAT32, {1, 2, 2, 4}},
{TensorType_FLOAT32, {}});
std::initializer_list<float> input = {
1, 2, 7, 8,
3, 4, 9, 10,
5, 6, 11, 12,
};
std::initializer_list<float> filter = {
1, 2, 3, 4,
-9, 10, -11, 12,
5, 6, 7, 8,
13, -14, 15, -16,
};
std::initializer_list<float> bias = {1, 2, 3, 4};
quant_op.SetInput(input);
quant_op.SetFilter(filter);
quant_op.SetBias(bias);
ASSERT_EQ(quant_op.Invoke(), kTfLiteOk);
float_op.SetInput(input);
float_op.SetFilter(filter);
float_op.SetBias(bias);
ASSERT_EQ(float_op.Invoke(), kTfLiteOk);
EXPECT_THAT(quant_op.GetOutput(),
ElementsAreArray(ArrayFloatNear(float_op.GetOutput(), 1)));
}
class FullyConnectedOpModel : public SingleOpModelWithNNAPI {
public:
FullyConnectedOpModel(
const TensorData& input, const TensorData& weights,
const TensorData& output,
enum ActivationFunctionType activation = ActivationFunctionType_NONE)
: input_type_(input.type), weights_type_(weights.type) {
input_ = AddInput(input);
weights_ = AddInput(weights);
const int units = weights.shape[0];
if (input.type == TensorType_FLOAT32) {
bias_ = AddInput({TensorType_FLOAT32, {units}});
} else {
auto bias_scale = GetScale(input_) * GetScale(weights_);
TensorData bias{TensorType_INT32, {units}, 0, 0, bias_scale};
bias_ = AddInput(bias);
}
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_FULLY_CONNECTED,
BuiltinOptions_FullyConnectedOptions,
CreateFullyConnectedOptions(builder_, activation).Union());
BuildInterpreterWithNNAPI(
{GetShape(input_), GetShape(weights_), GetShape(bias_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetWeights(std::initializer_list<float> data) {
SetData(weights_, weights_type_, data);
}
void SetBias(std::initializer_list<float> data) {
const auto bias_type =
(input_type_ == TensorType_FLOAT32) ? input_type_ : TensorType_INT32;
SetData(bias_, bias_type, data);
}
std::vector<float> GetOutput() {
if (input_type_ == TensorType_FLOAT32) {
return ExtractVector<float>(output_);
} else {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
}
protected:
int input_;
int weights_;
int bias_;
int output_;
const TensorType input_type_;
const TensorType weights_type_;
};
TEST(FullyConnectedOpTest, SimpleTest) {
FullyConnectedOpModel m({TensorType_FLOAT32, {2, 10}},
{TensorType_FLOAT32, {3, 10}},
{TensorType_FLOAT32});
m.SetWeights({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
});
m.SetBias({1, 2, 3});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAre(24, 25, 26, 58, 59, 60));
}
TEST(FullyConnectedOpTest, FloatInputQuantizedWeights) {
FullyConnectedOpModel m({TensorType_FLOAT32, {2, 10}},
{TensorType_UINT8, {3, 10}, 0, 64},
{TensorType_FLOAT32});
m.SetWeights({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
});
m.SetBias({1, 2, 3});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({24, 25, 26, 58, 59, 60}, 1.3)));
}
TEST(FullyConnectedOpTest, QuantizedOutputMultiplierGreaterThan1) {
FullyConnectedOpModel m(
{TensorType_UINT8, {2, 10}, -127, 128},
{TensorType_UINT8, {3, 10}, -127, 128},
{TensorType_UINT8, {}, -63.5, 64});
m.SetWeights({
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
});
m.SetBias({1, 2, 3});
m.SetInput({
1, 2, 3, 4, 5, 6, 7, 8, -9, -10,
1, 2, 3, 4, 5, 6, 7, -8, 9, -10,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
24, 25, 26,
58, 59, 60,
})));
}
class SoftmaxOpModel : public SingleOpModelWithNNAPI {
public:
SoftmaxOpModel(const TensorData& input, float beta) {
input_ = AddInput(input);
output_ = AddOutput(input);
SetBuiltinOp(BuiltinOperator_SOFTMAX, BuiltinOptions_SoftmaxOptions,
CreateSoftmaxOptions(builder_, beta).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
TEST(SoftmaxOpTest, SimpleTest) {
SoftmaxOpModel m({TensorType_FLOAT32, {2, 5}}, 1.0);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231},
1e-6)));
}
TEST(SoftmaxOpTest, Beta2) {
SoftmaxOpModel m({TensorType_FLOAT32, {1, 5}}, 2.0);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.000290076, 0.002143387, 0.015837606, 0.117024957, 0.864703974},
1e-6)));
}
TEST(SoftmaxOpTest, 3dInput) {
SoftmaxOpModel m({TensorType_FLOAT32, {2, 2, 5}}, 1.0);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
5.0, 1.0, 2.0, 3.0, 4.0,
-5.0, -1.0, -2.0, -3.0, -4.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231,
0.636408647, 0.011656231, 0.031684921, 0.086128544, 0.234121657,
0.011656231, 0.636408647, 0.234121657, 0.086128544, 0.031684921},
1e-6)));
}
TEST(SoftmaxOpTest, 4dInput) {
SoftmaxOpModel m({TensorType_FLOAT32, {2, 2, 1, 5}}, 1.0);
m.SetInput({
1.0, 2.0, 3.0, 4.0, 5.0,
-1.0, -2.0, -3.0, -4.0, -5.0,
5.0, 1.0, 2.0, 3.0, 4.0,
-5.0, -1.0, -2.0, -3.0, -4.0,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{0.011656231, 0.031684921, 0.086128544, 0.234121657, 0.636408647,
0.636408647, 0.234121657, 0.086128544, 0.031684921, 0.011656231,
0.636408647, 0.011656231, 0.031684921, 0.086128544, 0.234121657,
0.011656231, 0.636408647, 0.234121657, 0.086128544, 0.031684921},
1e-6)));
}
class ReshapeOpModel : public SingleOpModelWithNNAPI {
public:
ReshapeOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> new_shape) {
input_ = AddInput(TensorType_FLOAT32);
new_shape_ = AddConstInput<int>(TensorType_INT32, new_shape,
{static_cast<int>(new_shape.size())});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(
BuiltinOperator_RESHAPE, BuiltinOptions_ReshapeOptions,
CreateReshapeOptions(builder_, builder_.CreateVector<int>(new_shape))
.Union());
BuildInterpreterWithNNAPI(
{input_shape, {static_cast<int>(new_shape.size())}});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int new_shape_;
int output_;
};
TEST(NNAPIDelegate, ReshapeSimpleTest) {
ReshapeOpModel m({1, 2, 4, 1}, {2, 2, 2});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 2, 2}));
}
class SqueezeOpModel : public SingleOpModelWithNNAPI {
public:
SqueezeOpModel(const TensorData& input, const TensorData& output,
std::initializer_list<int> axis) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(
BuiltinOperator_SQUEEZE, BuiltinOptions_SqueezeOptions,
CreateSqueezeOptions(builder_, builder_.CreateVector<int>(axis))
.Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int new_shape_;
int output_;
};
TEST(NNAPIDelegate, DISABLED_SqueezeSimpleTest) {
std::initializer_list<float> data = {
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SqueezeOpModel m({TensorType_FLOAT32, {1, 24, 1}}, {TensorType_FLOAT32, {24}},
{});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
}
TEST(NNAPIDelegate, SqueezeWithAxisTest) {
std::initializer_list<float> data = {
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
SqueezeOpModel m({TensorType_FLOAT32, {1, 24, 1}}, {TensorType_FLOAT32, {24}},
{2});
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 24}));
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray({1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0}));
}
class L2NormOpModel : public SingleOpModelWithNNAPI {
public:
L2NormOpModel(const TensorData& input, const TensorData& output,
ActivationFunctionType activation_type) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_L2_NORMALIZATION, BuiltinOptions_L2NormOptions,
CreateL2NormOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int new_shape_;
int output_;
};
TEST(NNAPIDelegate, L2NormSimpleTest) {
std::initializer_list<float> data = {-1.1, 0.6, 0.7, 1.2, -0.7, 0.1};
L2NormOpModel m({TensorType_FLOAT32, {1, 1, 1, 6}},
{TensorType_FLOAT32, {1, 1, 1, 6}},
ActivationFunctionType_NONE);
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 1, 1, 6}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05}));
}
class TransposeSimpleModel : public SingleOpModelWithNNAPI {
public:
TransposeSimpleModel(std::initializer_list<int> input_shape,
std::initializer_list<int> perm_shape,
std::initializer_list<int> perm) {
input_ = AddInput(TensorType_FLOAT32);
perm_ = AddConstInput(TensorType_INT32, perm, perm_shape);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_TRANSPOSE, BuiltinOptions_TransposeOptions,
CreateTransposeOptions(builder_).Union());
BuildInterpreterWithNNAPI({input_shape, perm_shape});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int perm_;
int output_;
};
TEST(NNAPIDelegate, TransposeSimpleTest) {
TransposeSimpleModel m({2, 3, 4}, {3}, {2, 0, 1});
m.SetInput({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 3}));
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 4, 8, 12, 16, 20, 1, 5, 9, 13, 17, 21,
2, 6, 10, 14, 18, 22, 3, 7, 11, 15, 19, 23}));
}
class ElementwiseOpBaseModel : public SingleOpModelWithNNAPI {
public:
int input() const { return input_; }
int output() const { return output_; }
protected:
int input_;
int output_;
};
class ElementwiseOpFloatModel : public ElementwiseOpBaseModel {
public:
ElementwiseOpFloatModel(BuiltinOperator op,
std::initializer_list<int> input_shape) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(op, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({input_shape});
}
};
TEST(Elementwise, Abs) {
ElementwiseOpFloatModel m(BuiltinOperator_ABS, {1, 2, 4, 1});
m.PopulateTensor<float>(m.input(), {
0.f, -6.2f, 2.f, 4.f,
3.f, -2.f, 10.f, 1.f,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()), ElementsAreArray({
0.f, 6.2f, 2.f, 4.f,
3.f, 2.f, 10.f, 1.f,
}));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 2, 4, 1}));
}
TEST(Elementwise, Exp) {
ElementwiseOpFloatModel m(BuiltinOperator_EXP, {3, 1, 2});
m.PopulateTensor<float>(m.input(), {1.0, 0.0, -1.0, 1.0, 1.0, -1.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear(
{2.71828, 1, 0.367879, 2.71828, 2.71828, 0.367879})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({3, 1, 2}));
}
TEST(Elementwise, Log) {
ElementwiseOpFloatModel m(BuiltinOperator_LOG, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {1, 3.1415926, 1, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({0, 1.14473, 0, 0})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
TEST(Elementwise, Rsqrt) {
ElementwiseOpFloatModel m(BuiltinOperator_RSQRT, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {1, 2, 4, 9});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({1, 0.7071, 0.5, 0.33333})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
TEST(Elementwise, Sin) {
ElementwiseOpFloatModel m(BuiltinOperator_SIN, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {0, 3.1415926, -3.1415926, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({0, 0, 0, 0.84147})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
TEST(Elementwise, Cos) {
ElementwiseOpFloatModel m(BuiltinOperator_COS, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {0, 3.1415926, -3.1415926, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({1.0, -1, -1, 0.54030})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
TEST(Elementwise, Sqrt) {
ElementwiseOpFloatModel m(BuiltinOperator_SQRT, {1, 1, 4, 1});
m.PopulateTensor<float>(m.input(), {0, 1, 2, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.ExtractVector<float>(m.output()),
ElementsAreArray(ArrayFloatNear({0, 1, 1.41421, 2})));
EXPECT_THAT(m.GetTensorShape(m.output()), ElementsAreArray({1, 1, 4, 1}));
}
class FloatSubOpModel : public SingleOpModelWithNNAPI {
public:
FloatSubOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_SUB, BuiltinOptions_SubOptions,
CreateMulOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(NNAPIDelegate, SubWithNoActivation) {
FloatSubOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.7, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.3, 0.5});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-2.1, 0.0, 0.4, 0.3})));
}
class FloatDivOpModel : public SingleOpModelWithNNAPI {
public:
FloatDivOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output,
ActivationFunctionType activation_type) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_DIV, BuiltinOptions_DivOptions,
CreateMulOptions(builder_, activation_type).Union());
BuildInterpreterWithNNAPI({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(NNAPIDelegate, DivWithNoActivation) {
FloatDivOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}}, ActivationFunctionType_NONE);
m.PopulateTensor<float>(m.input1(), {-2.0, 0.2, 0.8, 0.8});
m.PopulateTensor<float>(m.input2(), {0.1, 0.2, 0.4, 0.2});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({-20, 1, 2, 4})));
}
class BaseConcatenationOpModel : public SingleOpModelWithNNAPI {
public:
BaseConcatenationOpModel() {}
BaseConcatenationOpModel(const TensorData& input_template, int axis,
int num_inputs) {
std::vector<std::vector<int>> all_input_shapes;
for (int i = 0; i < num_inputs; ++i) {
all_input_shapes.push_back(input_template.shape);
AddInput(input_template);
}
output_ = AddOutput({input_template.type, {}, input_template.min,
input_template.max});
SetBuiltinOp(
BuiltinOperator_CONCATENATION, BuiltinOptions_ConcatenationOptions,
CreateConcatenationOptions(builder_, axis, ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI(all_input_shapes);
}
protected:
int output_;
};
class ConcatenationOpModel : public BaseConcatenationOpModel {
public:
using BaseConcatenationOpModel::BaseConcatenationOpModel;
void SetInput(int index, std::initializer_list<float> data) {
PopulateTensor(index, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
TEST(NNAPIDelegate, ConcatenationThreeDimensionalOneInput) {
ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2}}, 1,
1);
m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
EXPECT_THAT(m0.GetOutput(), ElementsAreArray({1, 3, 4, 7}));
}
TEST(NNAPIDelegate, ConcatenationFourInputs) {
ConcatenationOpModel m0({TensorType_FLOAT32, {2, 1, 2}}, 2,
4);
m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput(3, {1.3f, 3.3f, 4.3f, 7.3f});
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
EXPECT_THAT(m0.GetOutput(),
ElementsAreArray({
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,
}));
}
class QuantizedConcatenationOpModel : public BaseConcatenationOpModel {
public:
using BaseConcatenationOpModel::BaseConcatenationOpModel;
QuantizedConcatenationOpModel(const std::vector<TensorData>& input_template,
int axis, int num_inputs,
const TensorData& output_template) {
std::vector<std::vector<int>> all_input_shapes;
CHECK_EQ(input_template.size(), num_inputs);
for (int i = 0; i < num_inputs; ++i) {
all_input_shapes.push_back(input_template[i].shape);
AddInput(input_template[i]);
}
output_ = AddOutput({output_template.type, {},
output_template.min, output_template.max});
SetBuiltinOp(
BuiltinOperator_CONCATENATION, BuiltinOptions_ConcatenationOptions,
CreateConcatenationOptions(builder_, axis, ActivationFunctionType_NONE)
.Union());
BuildInterpreterWithNNAPI(all_input_shapes);
}
void SetInput(int index, std::initializer_list<float> data) {
QuantizeAndPopulate<uint8_t>(index, data);
}
std::vector<uint8_t> GetOutput() { return ExtractVector<uint8_t>(output_); }
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
};
TEST(NNAPIDelegate, ConcatenationFourInputsQuantized) {
QuantizedConcatenationOpModel m0({TensorType_UINT8, {2, 1, 2}, -12.7, 12.8},
2,
4);
m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput(3, {1.3f, 3.3f, 4.3f, 7.3f});
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
EXPECT_THAT(m0.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,
})));
EXPECT_THAT(m0.GetOutput(), ElementsAreArray({
137, 157, 138, 158, 139, 159, 140, 160,
167, 197, 168, 198, 169, 199, 170, 200,
}));
}
TEST(NNAPIDelegate, ConcatenationFourInputsQuantizedMixedRange) {
QuantizedConcatenationOpModel m0({{TensorType_UINT8, {2, 1, 2}, -10.7, 10.8},
{TensorType_UINT8, {2, 1, 2}, 0, 12.8},
{TensorType_UINT8, {2, 1, 2}, -11, 11.8},
{TensorType_UINT8, {2, 1, 2}, 0, 7.4}},
2, 4,
{TensorType_UINT8, {2, 1, 2}, -12.7, 12.8});
m0.SetInput(0, {1.0f, 3.0f, 4.0f, 7.0f});
m0.SetInput(1, {1.1f, 3.1f, 4.1f, 7.1f});
m0.SetInput(2, {1.2f, 3.2f, 4.2f, 7.2f});
m0.SetInput(3, {1.3f, 3.3f, 4.3f, 7.3f});
ASSERT_EQ(m0.Invoke(), kTfLiteOk);
EXPECT_THAT(m0.GetDequantizedOutput(),
ElementsAreArray(ArrayFloatNear({
1.0f, 3.0f, 1.1f, 3.1f, 1.2f, 3.2f, 1.3f, 3.3f,
4.0f, 7.0f, 4.1f, 7.1f, 4.2f, 7.2f, 4.3f, 7.3f,
})));
EXPECT_THAT(m0.GetOutput(), ElementsAreArray({
137, 157, 138, 158, 139, 159, 140, 160,
167, 197, 168, 198, 169, 199, 170, 200,
}));
}
class DequantizeOpModel : public SingleOpModelWithNNAPI {
public:
DequantizeOpModel(TensorType inputType, std::initializer_list<int> shape,
float min, float max) {
input_ = AddInput({inputType, shape, min, max});
output_ = AddOutput({TensorType_FLOAT32, shape});
SetBuiltinOp(BuiltinOperator_DEQUANTIZE, BuiltinOptions_DequantizeOptions,
CreateDequantizeOptions(builder_).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
TEST(NNAPIDelegate, DequantizeFourDimensionalUint8) {
DequantizeOpModel m(TensorType_UINT8, {2, 5}, -63.5, 64);
m.SetInput<uint8_t>({0, 1, 2, 3, 4, 251, 252, 253, 254, 255});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-63.5, -63, -62.5, -62, -61.5, 62, 62.5, 63, 63.5, 64})));
}
TEST(NNAPIDelegate, DequantizeFourDimensionalInt8Symm) {
DequantizeOpModel m(TensorType_INT8, {2, 5}, -64, 63.5);
m.SetInput<int8_t>({-128, -127, -126, -125, -124, 123, 124, 125, 126, 127});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-64, -63.5, -63, -62.5, -62, 61.5, 62, 62.5, 63, 63.5})));
}
class FloorOpModel : public SingleOpModelWithNNAPI {
public:
FloorOpModel(std::initializer_list<int> input_shape, TensorType input_type) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_FLOOR, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({
input_shape,
});
}
int input() { return input_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int output_;
};
TEST(NNAPIDelegate, FloorSingleDim) {
FloorOpModel model({2}, TensorType_FLOAT32);
model.PopulateTensor<float>(model.input(), {8.5, 0.0});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(), ElementsAreArray({8, 0}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2}));
}
TEST(NNAPIDelegate, FloorMultiDims) {
FloorOpModel model({2, 1, 1, 5}, TensorType_FLOAT32);
model.PopulateTensor<float>(model.input(), {
0.0001,
8.0001,
0.9999,
9.9999,
0.5,
-0.0001,
-8.0001,
-0.9999,
-9.9999,
-0.5,
});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput(),
ElementsAreArray({0, 8, 0, 9, 0, -1, -9, -1, -10, -1}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({2, 1, 1, 5}));
}
class LocalResponseNormOpModel : public SingleOpModelWithNNAPI {
public:
LocalResponseNormOpModel(std::initializer_list<int> input_shape, int radius,
float bias, float alpha, float beta) {
input_ = AddInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION,
BuiltinOptions_LocalResponseNormalizationOptions,
CreateLocalResponseNormalizationOptions(builder_, radius, bias,
alpha, beta)
.Union());
BuildInterpreterWithNNAPI({input_shape});
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
private:
int input_;
int output_;
};
TEST(NNAPIDelegate, LocalResponseNormSameAsL2Norm) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 0.0,
1.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.55, 0.3, 0.35, 0.6, -0.35, 0.05})));
}
TEST(NNAPIDelegate, LocalResponseNormWithAlpha) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 0.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{-0.275, 0.15, 0.175, 0.3, -0.175, 0.025})));
}
TEST(NNAPIDelegate, LocalResponseNormWithBias) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 20, 9.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear({-0.22, 0.12, 0.14, 0.24, -0.14, 0.02})));
}
TEST(NNAPIDelegate, LocalResponseNormSmallRadius) {
LocalResponseNormOpModel m({1, 1, 1, 6}, 2, 9.0,
4.0, 0.5);
m.SetInput({-1.1, 0.6, 0.7, 1.2, -0.7, 0.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(
m.GetOutput(),
ElementsAreArray(ArrayFloatNear(
{-0.264926, 0.125109, 0.140112, 0.267261, -0.161788, 0.0244266})));
}
class LSHProjectionOpModel : public SingleOpModelWithNNAPI {
public:
LSHProjectionOpModel(LSHProjectionType type,
std::initializer_list<int> hash_shape,
std::initializer_list<int> input_shape,
std::initializer_list<int> weight_shape) {
hash_ = AddInput(TensorType_FLOAT32);
input_ = AddInput(TensorType_INT32);
if (weight_shape.size() > 0) {
weight_ = AddInput(TensorType_FLOAT32);
}
output_ = AddOutput(TensorType_INT32);
SetBuiltinOp(BuiltinOperator_LSH_PROJECTION,
BuiltinOptions_LSHProjectionOptions,
CreateLSHProjectionOptions(builder_, type).Union());
if (weight_shape.size() > 0) {
BuildInterpreterWithNNAPI({hash_shape, input_shape, weight_shape});
} else {
BuildInterpreterWithNNAPI({hash_shape, input_shape});
}
output_size_ = 1;
for (int i : hash_shape) {
output_size_ *= i;
if (type == LSHProjectionType_SPARSE) {
break;
}
}
}
void SetInput(std::initializer_list<int> data) {
PopulateTensor(input_, data);
}
void SetHash(std::initializer_list<float> data) {
PopulateTensor(hash_, data);
}
void SetWeight(std::initializer_list<float> f) { PopulateTensor(weight_, f); }
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
private:
int input_;
int hash_;
int weight_;
int output_;
int output_size_;
};
TEST(NNAPIDelegate, LSHProjectionDense1DInputs) {
LSHProjectionOpModel m(LSHProjectionType_DENSE, {3, 2}, {5}, {5});
m.SetInput({12345, 54321, 67890, 9876, -12345678});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
m.SetWeight({1.0, 1.0, 1.0, 1.0, 1.0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 1, 1, 1, 0));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0, 0, 0, 1, 0, 0));
#endif
}
TEST(NNAPIDelegate, LSHProjectionSparse1DInputs) {
LSHProjectionOpModel m(LSHProjectionType_SPARSE, {3, 2}, {5}, {});
m.SetInput({12345, 54321, 67890, 9876, -12345678});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 1, 8 + 0));
#endif
}
TEST(NNAPIDelegate, LSHProjectionSparse3DInputs) {
LSHProjectionOpModel m(LSHProjectionType_SPARSE, {3, 2}, {5, 2, 2}, {5});
m.SetInput({1234, 2345, 3456, 1234, 4567, 5678, 6789, 4567, 7891, 8912,
9123, 7890, -987, -876, -765, -987, -543, -432, -321, -543});
m.SetHash({0.123, 0.456, -0.321, 1.234, 5.678, -4.321});
m.SetWeight({0.12, 0.34, 0.56, 0.67, 0.78});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 0, 4 + 3, 8 + 2));
#else
EXPECT_THAT(m.GetOutput(), ElementsAre(0 + 2, 4 + 1, 8 + 1));
#endif
}
class BaseActivationsOpModel : public SingleOpModelWithNNAPI {
public:
BaseActivationsOpModel(BuiltinOperator type, const TensorData& input) {
input_ = AddInput(input);
if (input.type == TensorType_UINT8) {
output_ = AddOutput({input.type, {}, 0, 0, 1. / 256});
} else {
output_ = AddOutput({input.type, {}});
}
SetBuiltinOp(type, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({GetShape(input_)});
}
BaseActivationsOpModel(BuiltinOperator type, const TensorData& input,
const TensorData& output) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetBuiltinOp(type, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({GetShape(input_)});
}
protected:
int input_;
int output_;
};
class FloatActivationsOpModel : public BaseActivationsOpModel {
public:
using BaseActivationsOpModel::BaseActivationsOpModel;
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
};
const float kQuantizedTolerance = 2 * (1. / 256);
class QuantizedActivationsOpModel : public BaseActivationsOpModel {
public:
using BaseActivationsOpModel::BaseActivationsOpModel;
template <typename T>
void SetInput(std::initializer_list<float> data) {
QuantizeAndPopulate<T>(input_, data);
}
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
template <typename T>
std::vector<float> GetDequantizedOutput() {
return Dequantize<T>(ExtractVector<T>(output_), GetScale(output_),
GetZeroPoint(output_));
}
};
TEST(NNAPIDelegate, Relu) {
FloatActivationsOpModel m(BuiltinOperator_RELU,
{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0, -6, 2, 4,
3, -2, 10, 1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 2, 4,
3, 0, 10, 1,
}));
}
TEST(NNAPIDelegate, Relu1) {
FloatActivationsOpModel m(BuiltinOperator_RELU_N1_TO_1,
{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0.0, -0.6, 0.2, -0.4,
0.3, -2.0, 1.1, -0.1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0.0, -0.6, 0.2, -0.4,
0.3, -1.0, 1.0, -0.1,
}));
}
TEST(NNAPIDelegate, Relu6) {
FloatActivationsOpModel m(BuiltinOperator_RELU6,
{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0, -6, 2, 4,
3, -2, 10, 1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 2, 4,
3, 0, 6, 1,
}));
}
TEST(NNAPIDelegate, LogisticFloat) {
FloatActivationsOpModel m(BuiltinOperator_LOGISTIC,
{TensorType_FLOAT32, {1, 2, 4, 1}});
m.SetInput({
0, -6, 2, 4,
3, -2, 10, 1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
0.5, 0.002473, 0.880797, 0.982014,
0.952574, 0.119203, 0.999955, 0.731059,
})));
}
TEST(NNAPIDelegate, LogisticQuantized) {
QuantizedActivationsOpModel m(
BuiltinOperator_LOGISTIC,
{TensorType_UINT8, {1, 2, 4, 1}, -10, 10});
m.SetInput<uint8_t>({
0, -6, 2, 4,
3, -2, 10, 1,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetDequantizedOutput<uint8_t>(),
ElementsAreArray(ArrayFloatNear(
{
0.5, 0.002473, 0.880797, 0.982014,
0.952574, 0.119203, 0.999955, 0.731059,
},
kQuantizedTolerance)));
EXPECT_THAT(m.GetOutput<uint8_t>(),
testing::Pointwise(QuantizedNear(),
{128, 1, 227, 251, 244, 32, 255, 188}));
}
class ResizeBilinearOpModel : public SingleOpModelWithNNAPI {
public:
ResizeBilinearOpModel(const TensorData& input,
std::initializer_list<int> size_data) {
bool const_size = size_data.size() != 0;
input_ = AddInput(input);
if (const_size) {
size_ = AddConstInput(TensorType_INT32, size_data, {2});
} else {
size_ = AddInput({TensorType_INT32, {2}});
}
output_ = AddOutput(input.type);
SetBuiltinOp(BuiltinOperator_RESIZE_BILINEAR,
BuiltinOptions_ResizeBilinearOptions,
CreateResizeBilinearOptions(builder_).Union());
if (const_size) {
BuildInterpreterWithNNAPI({GetShape(input_)});
} else {
BuildInterpreterWithNNAPI({GetShape(input_), GetShape(size_)});
}
}
template <typename T>
void SetInput(std::initializer_list<T> data) {
PopulateTensor(input_, data);
}
void SetSize(std::initializer_list<int> data) { PopulateTensor(size_, data); }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
private:
int input_;
int size_;
int output_;
};
TEST(ResizeBilinear, Horizontal) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 1, 2, 1}}, {});
m.SetInput<float>({3, 6});
m.SetSize({1, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({3, 5, 6})));
}
TEST(ResizeBilinear, HorizontalConstant) {
ResizeBilinearOpModel const_m({TensorType_FLOAT32, {1, 1, 2, 1}}, {1, 3});
const_m.SetInput<float>({3, 6});
ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
EXPECT_THAT(const_m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({3, 5, 6})));
}
TEST(ResizeBilinear, Vertical) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 2, 1, 1}}, {});
m.SetInput<float>({3, 9});
m.SetSize({3, 1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({3, 7, 9})));
}
TEST(ResizeBilinear, VerticalConstant) {
ResizeBilinearOpModel const_m({TensorType_FLOAT32, {1, 2, 1, 1}}, {3, 1});
const_m.SetInput<float>({3, 9});
ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
EXPECT_THAT(const_m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({3, 7, 9})));
}
TEST(ResizeBilinear, TwoDimensional) {
ResizeBilinearOpModel m({TensorType_FLOAT32, {1, 2, 2, 1}}, {});
m.SetInput<float>({
3, 6,
9, 12
});
m.SetSize({3, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({
3, 5, 6,
7, 9, 10,
9, 11, 12,
})));
}
TEST(ResizeBilinear, TwoDimensionalConstant) {
ResizeBilinearOpModel const_m({TensorType_FLOAT32, {1, 2, 2, 1}}, {3, 3});
const_m.SetInput<float>({
3, 6,
9, 12
});
ASSERT_EQ(const_m.Invoke(), kTfLiteOk);
EXPECT_THAT(const_m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({
3, 5, 6,
7, 9, 10,
9, 11, 12,
})));
}
template <typename T>
class PadOpModel : public SingleOpModelWithNNAPI {
public:
void SetInput(std::initializer_list<T> data) {
PopulateTensor<T>(input_, data);
}
template <typename QuantizedInputOutput>
void SetQuantizedInput(std::initializer_list<float> data) {
QuantizeAndPopulate<QuantizedInputOutput>(input_, data);
}
template <typename QuantizedInputOutput>
void SetQuantizedPadValue(float data) {
QuantizeAndPopulate<QuantizedInputOutput>(constant_values_, {data});
}
void SetPaddings(std::initializer_list<int> paddings) {
PopulateTensor<int>(paddings_, paddings);
}
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
template <typename QuantizedInputOutput>
std::vector<float> GetDequantizedOutput() {
return Dequantize<QuantizedInputOutput>(
ExtractVector<QuantizedInputOutput>(output_), GetScale(output_),
GetZeroPoint(output_));
}
protected:
int input_;
int output_;
int paddings_;
int constant_values_;
};
class PadOpConstModel : public PadOpModel<float> {
public:
PadOpConstModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings,
const TensorData& output) {
input_ = AddInput(input);
paddings_ = AddConstInput(TensorType_INT32, paddings, paddings_shape);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_PAD, BuiltinOptions_PadOptions,
CreatePadOptions(builder_).Union());
BuildInterpreterWithNNAPI({input.shape});
}
};
TEST(NNAPIDelegate, PadAdvancedConstTest) {
PadOpConstModel m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2},
{0, 0, 0, 2, 1, 3, 0, 0}, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
class SpaceToBatchNDOpModel : public SingleOpModelWithNNAPI {
public:
void SetInput(std::initializer_list<float> data) {
PopulateTensor<float>(input_, data);
}
void SetBlockShape(std::initializer_list<int> data) {
PopulateTensor<int>(block_shape_, data);
}
void SetPaddings(std::initializer_list<int> data) {
PopulateTensor<int>(paddings_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input_;
int block_shape_;
int paddings_;
int output_;
};
class SpaceToBatchNDOpConstModel : public SpaceToBatchNDOpModel {
public:
SpaceToBatchNDOpConstModel(std::initializer_list<int> input_shape,
std::initializer_list<int> block_shape,
std::initializer_list<int> paddings) {
input_ = AddInput(TensorType_FLOAT32);
block_shape_ = AddConstInput(TensorType_INT32, block_shape, {2});
paddings_ = AddConstInput(TensorType_INT32, paddings, {2, 2});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_SPACE_TO_BATCH_ND,
BuiltinOptions_SpaceToBatchNDOptions,
CreateSpaceToBatchNDOptions(builder_).Union());
BuildInterpreterWithNNAPI({input_shape});
}
};
TEST(NNAPIDelegate, SpaceToBatchNDSimpleConstTest) {
SpaceToBatchNDOpConstModel m({1, 4, 4, 1}, {2, 2}, {0, 0, 0, 0});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({4, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(NNAPIDelegate, SpaceToBatchNDMultipleInputBatchesConstTest) {
SpaceToBatchNDOpConstModel m({2, 2, 4, 1}, {2, 2}, {0, 0, 0, 0});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({8, 1, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 9, 11, 2, 4, 10, 12, 5, 7,
13, 15, 6, 8, 14, 16}));
}
TEST(NNAPIDelegate, SpaceToBatchNDSimplePaddingConstTest) {
SpaceToBatchNDOpConstModel m({1, 5, 2, 1}, {3, 2}, {1, 0, 2, 0});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 2, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 5, 0, 0, 0, 6, 0, 1, 0, 7,
0, 2, 0, 8, 0, 3, 0, 9, 0, 4, 0, 10,
}));
}
TEST(NNAPIDelegate, SpaceToBatchNDComplexPaddingConstTest) {
SpaceToBatchNDOpConstModel m({1, 4, 2, 1}, {3, 2}, {1, 1, 2, 4});
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({6, 2, 4, 1}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0,
0, 1, 0, 0, 0, 7, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0,
0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0,
}));
}
template <typename input_type = float,
TensorType tensor_input_type = TensorType_FLOAT32>
class StridedSliceOpModel : public SingleOpModelWithNNAPI {
public:
StridedSliceOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> begin_shape,
std::initializer_list<int> begin_data,
std::initializer_list<int> end_shape,
std::initializer_list<int> end_data,
std::initializer_list<int> strides_shape,
std::initializer_list<int> strides_data, int begin_mask,
int end_mask, int ellipsis_mask, int new_axis_mask,
int shrink_axis_mask) {
input_ = AddInput(tensor_input_type);
begin_ = AddConstInput(TensorType_INT32, begin_data, begin_shape);
end_ = AddConstInput(TensorType_INT32, end_data, end_shape);
strides_ = AddConstInput(TensorType_INT32, strides_data, strides_shape);
output_ = AddOutput(tensor_input_type);
SetBuiltinOp(
BuiltinOperator_STRIDED_SLICE, BuiltinOptions_StridedSliceOptions,
CreateStridedSliceOptions(builder_, begin_mask, end_mask, ellipsis_mask,
new_axis_mask, shrink_axis_mask)
.Union());
BuildInterpreterWithNNAPI(
{input_shape, begin_shape, end_shape, strides_shape});
}
void SetInput(std::initializer_list<input_type> data) {
PopulateTensor<input_type>(input_, data);
}
std::vector<input_type> GetOutput() {
return ExtractVector<input_type>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int begin_;
int end_;
int strides_;
int output_;
};
TEST(StridedSliceOpTest, In1D) {
StridedSliceOpModel<> m({4}, {1}, {1}, {1}, {3}, {1}, {1}, 0, 0, 0, 0, 0);
m.SetInput({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({2, 3}));
}
TEST(StridedSliceOpTest, In1D_BeginMask) {
StridedSliceOpModel<> m({4}, {1}, {1}, {1}, {3}, {1}, {1}, 1, 0, 0, 0, 0);
m.SetInput({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({3}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 2, 3}));
}
TEST(StridedSliceOpTest, In2D_Stride2) {
StridedSliceOpModel<> m({2, 3}, {2}, {0, 0}, {2}, {2, 3}, {2}, {2, 2}, 0, 0,
0, 0, 0);
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 2}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3}));
}
TEST(StridedSliceOpTest, In2D_EndMask) {
StridedSliceOpModel<> m({2, 3}, {2}, {1, 0}, {2}, {2, 2}, {2}, {1, 1}, 0, 2,
0, 0, 0);
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({4, 5, 6}));
}
TEST(StridedSliceOpTest, In3D_IdentityShrinkAxis4) {
StridedSliceOpModel<> m({2, 3, 2}, {3}, {0, 0, 0}, {3}, {2, 3, 1}, {3},
{1, 1, 1}, 0, 0, 0, 0, 4);
m.SetInput({1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 3}));
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1, 3, 5, 7, 9, 11}));
}
static float rnn_input[] = {
0.23689353, 0.285385, 0.037029743, -0.19858193, -0.27569133,
0.43773448, 0.60379338, 0.35562468, -0.69424844, -0.93421471,
-0.87287879, 0.37144363, -0.62476718, 0.23791671, 0.40060222,
0.1356622, -0.99774903, -0.98858172, -0.38952237, -0.47685933,
0.31073618, 0.71511042, -0.63767755, -0.31729108, 0.33468103,
0.75801885, 0.30660987, -0.37354088, 0.77002847, -0.62747043,
-0.68572164, 0.0069220066, 0.65791464, 0.35130811, 0.80834007,
-0.61777675, -0.21095741, 0.41213346, 0.73784804, 0.094794154,
0.47791874, 0.86496925, -0.53376222, 0.85315156, 0.10288584,
0.86684, -0.011186242, 0.10513687, 0.87825835, 0.59929144,
0.62827742, 0.18899453, 0.31440187, 0.99059987, 0.87170351,
-0.35091716, 0.74861872, 0.17831337, 0.2755419, 0.51864719,
0.55084288, 0.58982027, -0.47443086, 0.20875752, -0.058871567,
-0.66609079, 0.59098077, 0.73017097, 0.74604273, 0.32882881,
-0.17503482, 0.22396147, 0.19379807, 0.29120302, 0.077113032,
-0.70331609, 0.15804303, -0.93407321, 0.40182066, 0.036301374,
0.66521823, 0.0300982, -0.7747041, -0.02038002, 0.020698071,
-0.90300065, 0.62870288, -0.23068321, 0.27531278, -0.095755219,
-0.712036, -0.17384434, -0.50593495, -0.18646687, -0.96508682,
0.43519354, 0.14744234, 0.62589407, 0.1653645, -0.10651493,
-0.045277178, 0.99032974, -0.88255352, -0.85147917, 0.28153265,
0.19455957, -0.55479527, -0.56042433, 0.26048636, 0.84702539,
0.47587705, -0.074295521, -0.12287641, 0.70117295, 0.90532446,
0.89782166, 0.79817224, 0.53402734, -0.33286154, 0.073485017,
-0.56172788, -0.044897556, 0.89964068, -0.067662835, 0.76863563,
0.93455386, -0.6324693, -0.083922029};
static float rnn_golden_output[] = {
0.496726, 0, 0.965996, 0, 0.0584254, 0,
0, 0.12315, 0, 0, 0.612266, 0.456601,
0, 0.52286, 1.16099, 0.0291232,
0, 0, 0.524901, 0, 0, 0,
0, 1.02116, 0, 1.35762, 0, 0.356909,
0.436415, 0.0355727, 0, 0,
0, 0, 0, 0.262335, 0, 0,
0, 1.33992, 0, 2.9739, 0, 0,
1.31914, 2.66147, 0, 0,
0.942568, 0, 0, 0, 0.025507, 0,
0, 0, 0.321429, 0.569141, 1.25274, 1.57719,
0.8158, 1.21805, 0.586239, 0.25427,
1.04436, 0, 0.630725, 0, 0.133801, 0.210693,
0.363026, 0, 0.533426, 0, 1.25926, 0.722707,
0, 1.22031, 1.30117, 0.495867,
0.222187, 0, 0.72725, 0, 0.767003, 0,
0, 0.147835, 0, 0, 0, 0.608758,
0.469394, 0.00720298, 0.927537, 0,
0.856974, 0.424257, 0, 0, 0.937329, 0,
0, 0, 0.476425, 0, 0.566017, 0.418462,
0.141911, 0.996214, 1.13063, 0,
0.967899, 0, 0, 0, 0.0831304, 0,
0, 1.00378, 0, 0, 0, 1.44818,
1.01768, 0.943891, 0.502745, 0,
0.940135, 0, 0, 0, 0, 0,
0, 2.13243, 0, 0.71208, 0.123918, 1.53907,
1.30225, 1.59644, 0.70222, 0,
0.804329, 0, 0.430576, 0, 0.505872, 0.509603,
0.343448, 0, 0.107756, 0.614544, 1.44549, 1.52311,
0.0454298, 0.300267, 0.562784, 0.395095,
0.228154, 0, 0.675323, 0, 1.70536, 0.766217,
0, 0, 0, 0.735363, 0.0759267, 1.91017,
0.941888, 0, 0, 0,
0, 0, 1.5909, 0, 0, 0,
0, 0.5755, 0, 0.184687, 0, 1.56296,
0.625285, 0, 0, 0,
0, 0, 0.0857888, 0, 0, 0,
0, 0.488383, 0.252786, 0, 0, 0,
1.02817, 1.85665, 0, 0,
0.00981836, 0, 1.06371, 0, 0, 0,
0, 0, 0, 0.290445, 0.316406, 0,
0.304161, 1.25079, 0.0707152, 0,
0.986264, 0.309201, 0, 0, 0, 0,
0, 1.64896, 0.346248, 0, 0.918175, 0.78884,
0.524981, 1.92076, 2.07013, 0.333244,
0.415153, 0.210318, 0, 0, 0, 0,
0, 2.02616, 0, 0.728256, 0.84183, 0.0907453,
0.628881, 3.58099, 1.49974, 0};
static std::initializer_list<float> rnn_weights = {
0.461459, 0.153381, 0.529743, -0.00371218, 0.676267, -0.211346,
0.317493, 0.969689, -0.343251, 0.186423, 0.398151, 0.152399,
0.448504, 0.317662, 0.523556, -0.323514, 0.480877, 0.333113,
-0.757714, -0.674487, -0.643585, 0.217766, -0.0251462, 0.79512,
-0.595574, -0.422444, 0.371572, -0.452178, -0.556069, -0.482188,
-0.685456, -0.727851, 0.841829, 0.551535, -0.232336, 0.729158,
-0.00294906, -0.69754, 0.766073, -0.178424, 0.369513, -0.423241,
0.548547, -0.0152023, -0.757482, -0.85491, 0.251331, -0.989183,
0.306261, -0.340716, 0.886103, -0.0726757, -0.723523, -0.784303,
0.0354295, 0.566564, -0.485469, -0.620498, 0.832546, 0.697884,
-0.279115, 0.294415, -0.584313, 0.548772, 0.0648819, 0.968726,
0.723834, -0.0080452, -0.350386, -0.272803, 0.115121, -0.412644,
-0.824713, -0.992843, -0.592904, -0.417893, 0.863791, -0.423461,
-0.147601, -0.770664, -0.479006, 0.654782, 0.587314, -0.639158,
0.816969, -0.337228, 0.659878, 0.73107, 0.754768, -0.337042,
0.0960841, 0.368357, 0.244191, -0.817703, -0.211223, 0.442012,
0.37225, -0.623598, -0.405423, 0.455101, 0.673656, -0.145345,
-0.511346, -0.901675, -0.81252, -0.127006, 0.809865, -0.721884,
0.636255, 0.868989, -0.347973, -0.10179, -0.777449, 0.917274,
0.819286, 0.206218, -0.00785118, 0.167141, 0.45872, 0.972934,
-0.276798, 0.837861, 0.747958, -0.0151566, -0.330057, -0.469077,
0.277308, 0.415818};
static std::initializer_list<float> rnn_recurrent_weights = {
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0.1};
static std::initializer_list<float> rnn_bias = {
0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068, -0.23566568,
-0.389184, 0.47481549, -0.4791103, 0.29931796, 0.10463274, 0.83918178,
0.37197268, 0.61957061, 0.3956964, -0.37609905};
class RNNOpModel : public SingleOpModelWithNNAPI {
public:
RNNOpModel(int batches, int units, int size,
const TensorType weights = TensorType_FLOAT32,
const TensorType recurrent_weights = TensorType_FLOAT32)
: batches_(batches), units_(units), input_size_(size) {
input_ = AddInput(TensorType_FLOAT32);
weights_ = AddInput(weights);
recurrent_weights_ = AddInput(recurrent_weights);
bias_ = AddInput(TensorType_FLOAT32);
hidden_state_ = AddVariableInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(
BuiltinOperator_RNN, BuiltinOptions_RNNOptions,
CreateRNNOptions(builder_, ActivationFunctionType_RELU).Union());
BuildInterpreterWithNNAPI({
{batches_, input_size_},
{units_, input_size_},
{units_, units_},
{units_},
{batches_, units_}
});
}
void SetBias(std::initializer_list<float> f) { PopulateTensor(bias_, f); }
void SetWeights(std::initializer_list<float> f) {
PopulateTensor(weights_, f);
}
void SetRecurrentWeights(std::initializer_list<float> f) {
PopulateTensor(recurrent_weights_, f);
}
void SetInput(std::initializer_list<float> data) {
PopulateTensor(input_, data);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_;
int recurrent_weights_;
int bias_;
int hidden_state_;
int output_;
int batches_;
int units_;
int input_size_;
};
TEST(NNAPIDelegate, RnnBlackBoxTest) {
RNNOpModel rnn(2, 16, 8);
rnn.SetWeights(rnn_weights);
rnn.SetBias(rnn_bias);
rnn.SetRecurrentWeights(rnn_recurrent_weights);
const int input_sequence_size = sizeof(rnn_input) / sizeof(float) /
(rnn.input_size() * rnn.num_batches());
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start = rnn_input + i * rnn.input_size();
float* batch_end = batch_start + rnn.input_size();
rnn.SetInput(0, batch_start, batch_end);
rnn.SetInput(rnn.input_size(), batch_start, batch_end);
ASSERT_EQ(rnn.Invoke(), kTfLiteOk);
float* golden_start = rnn_golden_output + i * rnn.num_units();
float* golden_end = golden_start + rnn.num_units();
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(rnn.GetOutput(), ElementsAreArray(ArrayFloatNear(expected)));
}
}
static float svdf_input[] = {
0.12609188, -0.46347019, -0.89598465,
0.35867718, 0.36897406, 0.73463392,
0.14278367, -1.64410412, -0.75222826,
-0.57290924, 0.12729003, 0.7567004,
0.49837467, 0.19278903, 0.26584083,
0.17660543, 0.52949083, -0.77931279,
-0.11186574, 0.13164264, -0.05349274,
-0.72674477, -0.5683046, 0.55900657,
-0.68892461, 0.37783599, 0.18263303,
-0.63690937, 0.44483393, -0.71817774,
-0.81299269, -0.86831826, 1.43940818,
-0.95760226, 1.82078898, 0.71135032,
-1.45006323, -0.82251364, -1.69082689,
-1.65087092, -1.89238167, 1.54172635,
0.03966608, -0.24936394, -0.77526885,
2.06740379, -1.51439476, 1.43768692,
0.11771342, -0.23761693, -0.65898693,
0.31088525, -1.55601168, -0.87661445,
-0.89477462, 1.67204106, -0.53235275,
-0.6230064, 0.29819036, 1.06939757,
};
static float svdf_golden_output_rank_1[] = {
0.014899, -0.0517661, -0.143725, -0.00271883,
-0.03004015, 0.09565311, 0.1587342, 0.00784263,
0.068281, -0.162217, -0.152268, 0.00323521,
0.01582633, 0.03858774, -0.03001583, -0.02671271,
-0.0317821, -0.0333089, 0.0609602, 0.0333759,
-0.01432795, 0.05524484, 0.1101355, -0.02382665,
-0.00623099, -0.077701, -0.391193, -0.0136691,
-0.02333033, 0.02293761, 0.12338032, 0.04326871,
0.201551, -0.164607, -0.179462, -0.0592739,
0.01064911, -0.17503069, 0.07821996, -0.00224009,
0.0886511, -0.0875401, -0.269283, 0.0281379,
-0.02282338, 0.09741908, 0.32973239, 0.12281385,
-0.201174, -0.586145, -0.628624, -0.0330412,
0.24780814, -0.39304617, -0.22473189, 0.02589256,
-0.0839096, -0.299329, 0.108746, 0.109808,
0.10084175, -0.06416984, 0.28936723, 0.0026358,
0.419114, -0.237824, -0.422627, 0.175115,
-0.2314795, -0.18584411, -0.4228974, -0.12928449,
0.36726, -0.522303, -0.456502, -0.175475,
0.17012937, -0.34447709, 0.38505614, -0.28158101,
};
static float svdf_golden_output_rank_2[] = {
-0.09623547, -0.10193135, 0.11083051, -0.0347917,
0.1141196, 0.12965347, -0.12652366, 0.01007236,
-0.16396809, -0.21247184, 0.11259045, -0.04156673,
0.10132131, -0.06143532, -0.00924693, 0.10084561,
0.01257364, 0.0506071, -0.19287863, -0.07162561,
-0.02033747, 0.22673416, 0.15487903, 0.02525555,
-0.1411963, -0.37054959, 0.01774767, 0.05867489,
0.09607603, -0.0141301, -0.08995658, 0.12867066,
-0.27142537, -0.16955489, 0.18521598, -0.12528358,
0.00331409, 0.11167502, 0.02218599, -0.07309391,
0.09593632, -0.28361851, -0.0773851, 0.17199151,
-0.00075242, 0.33691186, -0.1536046, 0.16572715,
-0.27916506, -0.27626723, 0.42615682, 0.3225764,
-0.37472126, -0.55655634, -0.05013514, 0.289112,
-0.24418658, 0.07540751, -0.1940318, -0.08911639,
0.00732617, 0.46737891, 0.26449674, 0.24888524,
-0.17225097, -0.54660404, -0.38795233, 0.08389944,
0.07736043, -0.28260678, 0.15666828, 1.14949894,
-0.57454878, -0.64704704, 0.73235172, -0.34616736,
0.21120001, -0.22927976, 0.02455296, -0.35906726,
};
class BaseSVDFOpModel : public SingleOpModelWithNNAPI {
public:
BaseSVDFOpModel(int batches, int units, int input_size, int memory_size,
int rank,
TensorType weights_feature_type = TensorType_FLOAT32,
TensorType weights_time_type = TensorType_FLOAT32)
: batches_(batches),
units_(units),
input_size_(input_size),
memory_size_(memory_size),
rank_(rank) {
input_ = AddInput(TensorType_FLOAT32);
weights_feature_ = AddInput(weights_feature_type);
weights_time_ = AddInput(weights_time_type);
bias_ = AddInput(TensorType_FLOAT32);
const int num_filters = units * rank;
activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(
BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
CreateSVDFOptions(builder_, rank, ActivationFunctionType_NONE).Union());
BuildInterpreterWithNNAPI({
{batches_, input_size_},
{units_ * rank, input_size_},
{units_ * rank, memory_size_},
{units_},
{batches, memory_size * num_filters}
});
PopulateTensor(bias_, std::vector<float>(units_));
}
void SetWeightsFeature(std::initializer_list<float> f) {
PopulateTensor(weights_feature_, f);
}
void SetWeightsTime(std::initializer_list<float> f) {
PopulateTensor(weights_time_, f);
}
void SetInput(int offset, float* begin, float* end) {
PopulateTensor(input_, offset, begin, end);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int input_size() { return input_size_; }
int num_units() { return units_; }
int num_batches() { return batches_; }
protected:
int input_;
int weights_feature_;
int weights_time_;
int bias_;
int activation_state_;
int output_;
int batches_;
int units_;
int input_size_;
int memory_size_;
int rank_;
};
class SVDFOpModel : public BaseSVDFOpModel {
public:
using BaseSVDFOpModel::BaseSVDFOpModel;
};
class SVDFOpTest : public ::testing::Test {
protected:
void VerifyGoldens(float golden_input[], float golden_output[],
int golden_size, BaseSVDFOpModel* svdf,
float tolerance = 1e-5) {
const int svdf_num_batches = svdf->num_batches();
const int svdf_input_size = svdf->input_size();
const int svdf_num_units = svdf->num_units();
const int input_sequence_size =
golden_size / sizeof(float) / (svdf_input_size * svdf_num_batches);
for (int i = 0; i < input_sequence_size; i++) {
float* batch_start =
golden_input + i * svdf_input_size * svdf_num_batches;
float* batch_end = batch_start + svdf_input_size * svdf_num_batches;
svdf->SetInput(0, batch_start, batch_end);
ASSERT_EQ(svdf->Invoke(), kTfLiteOk);
const float* golden_start =
golden_output + i * svdf_num_units * svdf_num_batches;
const float* golden_end =
golden_start + svdf_num_units * svdf_num_batches;
std::vector<float> expected;
expected.insert(expected.end(), golden_start, golden_end);
EXPECT_THAT(svdf->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
}
};
TEST_F(SVDFOpTest, BlackBoxTestRank1) {
SVDFOpModel svdf(2, 4, 3,
10, 1);
svdf.SetWeightsFeature({-0.31930989, -0.36118156, 0.0079667, 0.37613347,
0.22197971, 0.12416199, 0.27901134, 0.27557442,
0.3905206, -0.36137494, -0.06634006, -0.10640851});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657});
VerifyGoldens(svdf_input, svdf_golden_output_rank_1, sizeof(svdf_input),
&svdf);
}
TEST_F(SVDFOpTest, BlackBoxTestRank2) {
SVDFOpModel svdf(2, 4, 3,
10, 2);
svdf.SetWeightsFeature({-0.31930989, 0.0079667, 0.39296314, 0.37613347,
0.12416199, 0.15785322, 0.27901134, 0.3905206,
0.21931258, -0.36137494, -0.10640851, 0.31053296,
-0.36118156, -0.0976817, -0.36916667, 0.22197971,
0.15294972, 0.38031587, 0.27557442, 0.39635518,
-0.21580373, -0.06634006, -0.02702999, 0.27072677});
svdf.SetWeightsTime(
{-0.31930989, 0.37613347, 0.27901134, -0.36137494, -0.36118156,
0.22197971, 0.27557442, -0.06634006, 0.0079667, 0.12416199,
0.3905206, -0.10640851, -0.0976817, 0.15294972, 0.39635518,
-0.02702999, 0.39296314, 0.15785322, 0.21931258, 0.31053296,
-0.36916667, 0.38031587, -0.21580373, 0.27072677, 0.23622236,
0.34936687, 0.18174365, 0.35907319, -0.17493086, 0.324846,
-0.10781813, 0.27201805, 0.14324132, -0.23681851, -0.27115166,
-0.01580888, -0.14943552, 0.15465137, 0.09784451, -0.0337657,
-0.14884081, 0.19931212, -0.36002168, 0.34663299, -0.11405486,
0.12672701, 0.39463779, -0.07886535, -0.06384811, 0.08249187,
-0.26816407, -0.19905911, 0.29211238, 0.31264046, -0.28664589,
0.05698794, 0.11613581, 0.14078894, 0.02187902, -0.21781836,
-0.15567942, 0.08693647, -0.38256618, 0.36580828, -0.22922277,
-0.0226903, 0.12878349, -0.28122205, -0.10850525, -0.11955214,
0.27179423, -0.04710215, 0.31069002, 0.22672787, 0.09580326,
0.08682203, 0.1258215, 0.1851041, 0.29228821, 0.12366763});
VerifyGoldens(svdf_input, svdf_golden_output_rank_2, sizeof(svdf_input),
&svdf);
}
class LSTMOpModel : public SingleOpModelWithNNAPI {
public:
LSTMOpModel(int n_batch, int n_input, int n_cell, int n_output, bool use_cifg,
bool use_peephole, bool use_projection_weights,
bool use_projection_bias, float cell_clip, float proj_clip,
const std::vector<std::vector<int>>& input_shapes,
const TensorType weight_type)
: n_batch_(n_batch),
n_input_(n_input),
n_cell_(n_cell),
n_output_(n_output),
weight_type_(weight_type) {
input_ = AddInput(TensorType_FLOAT32);
if (use_cifg) {
input_to_input_weights_ = AddNullInput();
} else {
input_to_input_weights_ = AddInput(weight_type);
}
input_to_forget_weights_ = AddInput(weight_type);
input_to_cell_weights_ = AddInput(weight_type);
input_to_output_weights_ = AddInput(weight_type);
if (use_cifg) {
recurrent_to_input_weights_ = AddNullInput();
} else {
recurrent_to_input_weights_ = AddInput(weight_type);
}
recurrent_to_forget_weights_ = AddInput(weight_type);
recurrent_to_cell_weights_ = AddInput(weight_type);
recurrent_to_output_weights_ = AddInput(weight_type);
if (use_peephole) {
if (use_cifg) {
cell_to_input_weights_ = AddNullInput();
} else {
cell_to_input_weights_ = AddInput(weight_type);
}
cell_to_forget_weights_ = AddInput(weight_type);
cell_to_output_weights_ = AddInput(weight_type);
} else {
cell_to_input_weights_ = AddNullInput();
cell_to_forget_weights_ = AddNullInput();
cell_to_output_weights_ = AddNullInput();
}
if (use_cifg) {
input_gate_bias_ = AddNullInput();
} else {
input_gate_bias_ = AddInput(TensorType_FLOAT32);
}
forget_gate_bias_ = AddInput(TensorType_FLOAT32);
cell_bias_ = AddInput(TensorType_FLOAT32);
output_gate_bias_ = AddInput(TensorType_FLOAT32);
if (use_projection_weights) {
projection_weights_ = AddInput(weight_type);
if (use_projection_bias) {
projection_bias_ = AddInput(TensorType_FLOAT32);
} else {
projection_bias_ = AddNullInput();
}
} else {
projection_weights_ = AddNullInput();
projection_bias_ = AddNullInput();
}
input_activation_state_ = AddVariableInput(TensorType_FLOAT32);
input_cell_state_ = AddVariableInput(TensorType_FLOAT32);
const bool use_layer_norm = input_shapes.size() > 20;
if (use_layer_norm) {
const int kInputLayerNormCoeffsIndex = 20;
const int kForgetLayerNormCoeffsIndex = 21;
const int kCellLayerNormCoeffsIndex = 22;
const int kOutputLayerNormCoeffsIndex = 23;
if (use_cifg) {
input_layer_norm_coefficients_ = AddNullInput();
} else {
input_layer_norm_coefficients_ =
AddLayerNormCoeffsTensor(kInputLayerNormCoeffsIndex, input_shapes);
}
forget_layer_norm_coefficients_ =
AddLayerNormCoeffsTensor(kForgetLayerNormCoeffsIndex, input_shapes);
cell_layer_norm_coefficients_ =
AddLayerNormCoeffsTensor(kCellLayerNormCoeffsIndex, input_shapes);
output_layer_norm_coefficients_ =
AddLayerNormCoeffsTensor(kOutputLayerNormCoeffsIndex, input_shapes);
}
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_LSTM, BuiltinOptions_LSTMOptions,
CreateLSTMOptions(builder_, ActivationFunctionType_TANH,
cell_clip, proj_clip)
.Union());
BuildInterpreterWithNNAPI(input_shapes);
}
void SetInputToInputWeights(const std::vector<float>& f) {
SetData(input_to_input_weights_, weight_type_, f);
}
void SetInputToForgetWeights(const std::vector<float>& f) {
SetData(input_to_forget_weights_, weight_type_, f);
}
void SetInputToCellWeights(const std::vector<float>& f) {
SetData(input_to_cell_weights_, weight_type_, f);
}
void SetInputToOutputWeights(const std::vector<float>& f) {
SetData(input_to_output_weights_, weight_type_, f);
}
void SetRecurrentToInputWeights(const std::vector<float>& f) {
SetData(recurrent_to_input_weights_, weight_type_, f);
}
void SetRecurrentToForgetWeights(const std::vector<float>& f) {
SetData(recurrent_to_forget_weights_, weight_type_, f);
}
void SetRecurrentToCellWeights(const std::vector<float>& f) {
SetData(recurrent_to_cell_weights_, weight_type_, f);
}
void SetRecurrentToOutputWeights(const std::vector<float>& f) {
SetData(recurrent_to_output_weights_, weight_type_, f);
}
void SetCellToInputWeights(const std::vector<float>& f) {
SetData(cell_to_input_weights_, weight_type_, f);
}
void SetCellToForgetWeights(const std::vector<float>& f) {
SetData(cell_to_forget_weights_, weight_type_, f);
}
void SetCellToOutputWeights(const std::vector<float>& f) {
SetData(cell_to_output_weights_, weight_type_, f);
}
void SetInputGateBias(const std::vector<float>& f) {
PopulateTensor(input_gate_bias_, f);
}
void SetForgetGateBias(const std::vector<float>& f) {
PopulateTensor(forget_gate_bias_, f);
}
void SetCellBias(const std::vector<float>& f) {
PopulateTensor(cell_bias_, f);
}
void SetOutputGateBias(const std::vector<float>& f) {
PopulateTensor(output_gate_bias_, f);
}
void SetProjectionWeights(const std::vector<float>& f) {
SetData(projection_weights_, weight_type_, f);
}
void SetProjectionBias(const std::vector<float>& f) {
PopulateTensor(projection_bias_, f);
}
void SetInputLayerNormCoefficients(const std::vector<float>& f) {
PopulateTensor(input_layer_norm_coefficients_, f);
}
void SetForgetLayerNormCoefficients(const std::vector<float>& f) {
PopulateTensor(forget_layer_norm_coefficients_, f);
}
void SetCellLayerNormCoefficients(const std::vector<float>& f) {
PopulateTensor(cell_layer_norm_coefficients_, f);
}
void SetOutputLayerNormCoefficients(const std::vector<float>& f) {
PopulateTensor(output_layer_norm_coefficients_, f);
}
void SetInput(int offset, const float* begin, const float* end) {
PopulateTensor(input_, offset, const_cast<float*>(begin),
const_cast<float*>(end));
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
int num_inputs() { return n_input_; }
int num_outputs() { return n_output_; }
int num_cells() { return n_cell_; }
int num_batches() { return n_batch_; }
protected:
int input_;
int input_to_input_weights_;
int input_to_forget_weights_;
int input_to_cell_weights_;
int input_to_output_weights_;
int recurrent_to_input_weights_;
int recurrent_to_forget_weights_;
int recurrent_to_cell_weights_;
int recurrent_to_output_weights_;
int cell_to_input_weights_;
int cell_to_forget_weights_;
int cell_to_output_weights_;
int input_gate_bias_;
int forget_gate_bias_;
int cell_bias_;
int output_gate_bias_;
int projection_weights_;
int projection_bias_;
int input_activation_state_;
int input_cell_state_;
int input_layer_norm_coefficients_;
int forget_layer_norm_coefficients_;
int cell_layer_norm_coefficients_;
int output_layer_norm_coefficients_;
int output_;
int output_state_;
int cell_state_;
int n_batch_;
int n_input_;
int n_cell_;
int n_output_;
private:
const TensorType weight_type_;
int AddLayerNormCoeffsTensor(
int tensor_index, const std::vector<std::vector<int>>& input_shapes) {
if (input_shapes[tensor_index][0] != 0) {
return AddInput(TensorType_FLOAT32);
} else {
return AddNullInput();
}
}
};
class BaseLstmTest : public ::testing::Test {
protected:
std::vector<float> input_to_input_weights_;
std::vector<float> input_to_cell_weights_;
std::vector<float> input_to_forget_weights_;
std::vector<float> input_to_output_weights_;
std::vector<float> input_gate_bias_;
std::vector<float> cell_gate_bias_;
std::vector<float> forget_gate_bias_;
std::vector<float> output_gate_bias_;
std::vector<float> recurrent_to_input_weights_;
std::vector<float> recurrent_to_cell_weights_;
std::vector<float> recurrent_to_forget_weights_;
std::vector<float> recurrent_to_output_weights_;
std::vector<float> cell_to_input_weights_;
std::vector<float> cell_to_forget_weights_;
std::vector<float> cell_to_output_weights_;
std::vector<float> projection_weights_;
std::vector<float> input_layer_norm_coefficients_;
std::vector<float> forget_layer_norm_coefficients_;
std::vector<float> cell_layer_norm_coefficients_;
std::vector<float> output_layer_norm_coefficients_;
std::vector<std::vector<float>> lstm_input_;
std::vector<std::vector<float>> lstm_golden_output_;
void VerifyGoldens(const std::vector<std::vector<float>>& input,
const std::vector<std::vector<float>>& output,
LSTMOpModel* lstm, float tolerance = 1e-5) {
const int num_batches = input.size();
EXPECT_GT(num_batches, 0);
const int num_inputs = lstm->num_inputs();
EXPECT_GT(num_inputs, 0);
const int input_sequence_size = input[0].size() / num_inputs;
EXPECT_GT(input_sequence_size, 0);
for (int i = 0; i < input_sequence_size; ++i) {
for (int b = 0; b < num_batches; ++b) {
const float* batch_start = input[b].data() + i * num_inputs;
const float* batch_end = batch_start + num_inputs;
lstm->SetInput(b * lstm->num_inputs(), batch_start, batch_end);
}
ASSERT_EQ(lstm->Invoke(), kTfLiteOk);
const int num_outputs = lstm->num_outputs();
std::vector<float> expected;
for (int b = 0; b < num_batches; ++b) {
const float* golden_start_batch = output[b].data() + i * num_outputs;
const float* golden_end_batch = golden_start_batch + num_outputs;
expected.insert(expected.end(), golden_start_batch, golden_end_batch);
}
EXPECT_THAT(lstm->GetOutput(),
ElementsAreArray(ArrayFloatNear(expected, tolerance)));
}
}
};
class NoCifgNoPeepholeNoProjectionNoClippingLstmTest : public BaseLstmTest {
void SetUp() override {
input_to_input_weights_ = {-0.45018822, -0.02338299, -0.0870589,
-0.34550029, 0.04266912, -0.15680569,
-0.34856534, 0.43890524};
input_to_cell_weights_ = {-0.50013041, 0.1370284, 0.11810488, 0.2013163,
-0.20583314, 0.44344562, 0.22077113, -0.29909778};
input_to_forget_weights_ = {0.09701663, 0.20334584, -0.50592935,
-0.31343272, -0.40032279, 0.44781327,
0.01387155, -0.35593212};
input_to_output_weights_ = {-0.25065863, -0.28290087, 0.04613829,
0.40525138, 0.44272184, 0.03897077,
-0.1556896, 0.19487578};
input_gate_bias_ = {0., 0., 0., 0.};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_input_weights_ = {
-0.0063535, -0.2042388, 0.31454784, -0.35746509,
0.28902304, 0.08183324, -0.16555229, 0.02286911,
-0.13566875, 0.03034258, 0.48091322, -0.12528998,
0.24077177, -0.51332325, -0.33502164, 0.10629296};
recurrent_to_cell_weights_ = {
-0.3407414, 0.24443203, -0.2078532, 0.26320225,
0.05695659, -0.00123841, -0.4744786, -0.35869038,
-0.06418842, -0.13502428, -0.501764, 0.22830659,
-0.46367589, 0.26016325, -0.03894562, -0.16368064};
recurrent_to_forget_weights_ = {
-0.48684245, -0.06655136, 0.42224967, 0.2112639,
0.27654213, 0.20864892, -0.07646349, 0.45877004,
0.00141793, -0.14609534, 0.36447752, 0.09196436,
0.28053468, 0.01560611, -0.20127171, -0.01140004};
recurrent_to_output_weights_ = {
0.43385774, -0.17194885, 0.2718237, 0.09215671,
0.24107647, -0.39835793, 0.18212086, 0.01301402,
0.48572797, -0.50656658, 0.20047462, -0.20607421,
-0.51818722, -0.15390486, 0.0468148, 0.39922136};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.02973187, 0.1229473, 0.20885126, -0.15358765,
-0.03716109, 0.12507336, 0.41193449, -0.20860538,
-0.15053082, 0.09120187, 0.24278517, -0.12222792}};
}
};
TEST_F(NoCifgNoPeepholeNoProjectionNoClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
LSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
false, false,
false,
false,
0.0, 0.0,
{
{n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_FLOAT32);
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class NoCifgNoPeepholeNoProjectionNoClippingOmittedLayerNormLstmTest
: public NoCifgNoPeepholeNoProjectionNoClippingLstmTest {};
TEST_F(NoCifgNoPeepholeNoProjectionNoClippingOmittedLayerNormLstmTest,
LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
LSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
false, false,
false,
false,
0.0, 0.0,
{
{n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{0},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{0},
{0},
{0},
{0},
},
TensorType_FLOAT32);
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class CifgNoPeepholeNoProjectionNoClippingLstmTest : public BaseLstmTest {
void SetUp() override {
input_to_cell_weights_ = {-0.49770179, -0.27711356, -0.09624726,
0.05100781, 0.04717243, 0.48944736,
-0.38535351, -0.17212132};
input_to_forget_weights_ = {-0.55291498, -0.42866567, 0.13056988,
-0.3633365, -0.22755712, 0.28253698,
0.24407166, 0.33826375};
input_to_output_weights_ = {0.10725588, -0.02335852, -0.55932593,
-0.09426838, -0.44257352, 0.54939759,
0.01533556, 0.42751634};
cell_gate_bias_ = {0., 0., 0., 0.};
forget_gate_bias_ = {1., 1., 1., 1.};
output_gate_bias_ = {0., 0., 0., 0.};
recurrent_to_cell_weights_ = {
0.54066205, -0.32668582, -0.43562764, -0.56094903,
0.42957711, 0.01841056, -0.32764608, -0.33027974,
-0.10826075, 0.20675004, 0.19069612, -0.03026325,
-0.54532051, 0.33003211, 0.44901288, 0.21193194};
recurrent_to_forget_weights_ = {
-0.13832897, -0.0515101, -0.2359007, -0.16661474,
-0.14340827, 0.36986142, 0.23414481, 0.55899,
0.10798943, -0.41174671, 0.17751795, -0.34484994,
-0.35874045, -0.11352962, 0.27268326, 0.54058349};
recurrent_to_output_weights_ = {
0.41613156, 0.42610586, -0.16495961, -0.5663873,
0.30579174, -0.05115908, -0.33941799, 0.23364776,
0.11178309, 0.09481031, -0.26424935, 0.46261835,
0.50248802, 0.26114327, -0.43736315, 0.33149987};
cell_to_forget_weights_ = {0.47485286, -0.51955009, -0.24458408,
0.31544167};
cell_to_output_weights_ = {-0.17135078, 0.82760304, 0.85573703,
-0.77109635};
lstm_input_ = {{2., 3., 3., 4., 1., 1.}};
lstm_golden_output_ = {{-0.36444446, -0.00352185, 0.12886585, -0.05163646,
-0.42312205, -0.01218222, 0.24201041, -0.08124574,
-0.358325, -0.04621704, 0.21641694, -0.06471302}};
}
};
TEST_F(CifgNoPeepholeNoProjectionNoClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 1;
const int n_input = 2;
const int n_cell = 4;
const int n_output = 4;
LSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
true, true,
false,
false,
0.0, 0.0,
{
{n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{0, 0},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_FLOAT32);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class NoCifgPeepholeProjectionClippingLstmTest : public BaseLstmTest {
void SetUp() override {
input_to_input_weights_ = {
0.021393683, 0.06124551, 0.046905167, -0.014657677, -0.03149463,
0.09171803, 0.14647801, 0.10797193, -0.0057968358, 0.0019193048,
-0.2726754, 0.10154029, -0.018539885, 0.080349885, -0.10262385,
-0.022599787, -0.09121155, -0.008675967, -0.045206103, -0.0821282,
-0.008045952, 0.015478081, 0.055217247, 0.038719587, 0.044153627,
-0.06453243, 0.05031825, -0.046935108, -0.008164439, 0.014574226,
-0.1671009, -0.15519552, -0.16819797, -0.13971269, -0.11953059,
0.25005487, -0.22790983, 0.009855087, -0.028140958, -0.11200698,
0.11295408, -0.0035217577, 0.054485075, 0.05184695, 0.064711206,
0.10989193, 0.11674786, 0.03490607, 0.07727357, 0.11390585,
-0.1863375, -0.1034451, -0.13945189, -0.049401227, -0.18767063,
0.042483903, 0.14233552, 0.13832581, 0.18350165, 0.14545603,
-0.028545704, 0.024939531, 0.050929718, 0.0076203286, -0.0029723682,
-0.042484224, -0.11827596, -0.09171104, -0.10808628, -0.16327988,
-0.2273378, -0.0993647, -0.017155107, 0.0023917493, 0.049272764,
0.0038534778, 0.054764505, 0.089753784, 0.06947234, 0.08014476,
-0.04544234, -0.0497073, -0.07135631, -0.048929106, -0.004042012,
-0.009284026, 0.018042054, 0.0036860977, -0.07427302, -0.11434604,
-0.018995456, 0.031487543, 0.012834908, 0.019977754, 0.044256654,
-0.39292613, -0.18519334, -0.11651281, -0.06809892, 0.011373677};
input_to_forget_weights_ = {
-0.0018401089, -0.004852237, 0.03698424, 0.014181704,
0.028273236, -0.016726194, -0.05249759, -0.10204261,
0.00861066, -0.040979505, -0.009899187, 0.01923892,
-0.028177269, -0.08535103, -0.14585495, 0.10662567,
-0.01909731, -0.017883534, -0.0047269356, -0.045103323,
0.0030784295, 0.076784775, 0.07463696, 0.094531395,
0.0814421, -0.12257899, -0.033945758, -0.031303465,
0.045630626, 0.06843887, -0.13492945, -0.012480007,
-0.0811829, -0.07224499, -0.09628791, 0.045100946,
0.0012300825, 0.013964662, 0.099372394, 0.02543059,
0.06958324, 0.034257296, 0.0482646, 0.06267997,
0.052625068, 0.12784666, 0.07077897, 0.025725935,
0.04165009, 0.07241905, 0.018668644, -0.037377294,
-0.06277783, -0.08833636, -0.040120605, -0.011405586,
-0.007808335, -0.010301386, -0.005102167, 0.027717464,
0.05483423, 0.11449111, 0.11289652, 0.10939839,
0.13396506, -0.08402166, -0.01901462, -0.044678304,
-0.07720565, 0.014350063, -0.11757958, -0.0652038,
-0.08185733, -0.076754324, -0.092614375, 0.10405491,
0.052960336, 0.035755895, 0.035839386, -0.012540553,
0.036881298, 0.02913376, 0.03420159, 0.05448447,
-0.054523353, 0.02582715, 0.02327355, -0.011857179,
-0.0011980024, -0.034641717, -0.026125094, -0.17582615,
-0.15923657, -0.27486774, -0.0006143371, 0.0001771948,
-8.470171e-05, 0.02651807, 0.045790765, 0.06956496};
input_to_cell_weights_ = {
-0.04580283, -0.09549462, -0.032418985, -0.06454633,
-0.043528453, 0.043018587, -0.049152344, -0.12418144,
-0.078985475, -0.07596889, 0.019484362, -0.11434962,
-0.0074034138, -0.06314844, -0.092981495, 0.0062155537,
-0.025034338, -0.0028890965, 0.048929527, 0.06235075,
0.10665918, -0.032036792, -0.08505916, -0.10843358,
-0.13002433, -0.036816437, -0.02130134, -0.016518239,
0.0047691227, -0.0025825808, 0.066017866, 0.029991534,
-0.10652836, -0.1037554, -0.13056071, -0.03266643,
-0.033702414, -0.006473424, -0.04611692, 0.014419339,
-0.025174323, 0.0396852, 0.081777506, 0.06157468,
0.10210095, -0.009658194, 0.046511717, 0.03603906,
0.0069369148, 0.015960095, -0.06507666, 0.09551598,
0.053568836, 0.06408714, 0.12835667, -0.008714329,
-0.20211966, -0.12093674, 0.029450472, 0.2849013,
-0.029227901, 0.1164364, -0.08560263, 0.09941786,
-0.036999565, -0.028842626, -0.0033637602, -0.017012902,
-0.09720865, -0.11193351, -0.029155117, -0.017936034,
-0.009768936, -0.04223324, -0.036159635, 0.06505112,
-0.021742892, -0.023377212, -0.07221364, -0.06430552,
0.05453865, 0.091149814, 0.06387331, 0.007518393,
0.055960953, 0.069779344, 0.046411168, 0.10509911,
0.07463894, 0.0075130584, 0.012850982, 0.04555431,
0.056955688, 0.06555285, 0.050801456, -0.009862683,
0.00826772, -0.026555609, -0.0073611983, -0.0014897042};
input_to_output_weights_ = {
-0.0998932, -0.07201956, -0.052803773, -0.15629593, -0.15001918,
-0.07650751, 0.02359855, -0.075155355, -0.08037709, -0.15093534,
0.029517552, -0.04751393, 0.010350531, -0.02664851, -0.016839722,
-0.023121163, 0.0077019283, 0.012851257, -0.05040649, -0.0129761,
-0.021737747, -0.038305793, -0.06870586, -0.01481247, -0.001285394,
0.10124236, 0.083122835, 0.053313006, -0.062235646, -0.075637154,
-0.027833903, 0.029774971, 0.1130802, 0.09218906, 0.09506135,
-0.086665764, -0.037162706, -0.038880914, -0.035832845, -0.014481564,
-0.09825003, -0.12048569, -0.097665586, -0.05287633, -0.0964047,
-0.11366429, 0.035777505, 0.13568819, 0.052451383, 0.050649304,
0.05798951, -0.021852335, -0.099848844, 0.014740475, -0.078897946,
0.04974699, 0.014160473, 0.06973932, 0.04964942, 0.033364646,
0.08190124, 0.025535367, 0.050893165, 0.048514254, 0.06945813,
-0.078907564, -0.06707616, -0.11844508, -0.09986688, -0.07509403,
0.06263226, 0.14925587, 0.20188436, 0.12098451, 0.14639415,
0.0015017595, -0.014267382, -0.03417257, 0.012711468, 0.0028300495,
-0.024758482, -0.05098548, -0.0821182, 0.014225672, 0.021544158,
0.08949725, 0.07505268, -0.0020780868, 0.04908258, 0.06476295,
-0.022907063, 0.027562456, 0.040185735, 0.019567577, -0.015598739,
-0.049097303, -0.017121866, -0.083368234, -0.02332002, -0.0840956};
input_gate_bias_ = {0.02234832, 0.14757581, 0.18176508, 0.10380666,
0.053110216, -0.06928846, -0.13942584, -0.11816189,
0.19483899, 0.03652339, -0.10250295, 0.036714908,
-0.18426876, 0.036065217, 0.21810818, 0.02383196,
-0.043370757, 0.08690144, -0.04444982, 0.00030581196};
forget_gate_bias_ = {0.035185695, -0.042891346, -0.03032477, 0.23027696,
0.11098921, 0.15378423, 0.09263801, 0.09790885,
0.09508917, 0.061199076, 0.07665568, -0.015443159,
-0.03499149, 0.046190713, 0.08895977, 0.10899629,
0.40694186, 0.06030037, 0.012413437, -0.06108739};
cell_gate_bias_ = {-0.024379363, 0.0055531194, 0.23377132, 0.033463873,
-0.1483596, -0.10639995, -0.091433935, 0.058573797,
-0.06809782, -0.07889636, -0.043246906, -0.09829136,
-0.4279842, 0.034901652, 0.18797937, 0.0075234566,
0.016178843, 0.1749513, 0.13975595, 0.92058027};
output_gate_bias_ = {0.046159424, -0.0012809046, 0.03563469, 0.12648113,
0.027195795, 0.35373217, -0.018957434, 0.008907322,
-0.0762701, 0.12018895, 0.04216877, 0.0022856654,
0.040952638, 0.3147856, 0.08225149, -0.057416286,
-0.14995944, -0.008040261, 0.13208859, 0.029760877};
recurrent_to_input_weights_ = {
-0.001374326, -0.078856036, 0.10672688, 0.029162422,
-0.11585556, 0.02557986, -0.13446963, -0.035785314,
-0.01244275, 0.025961924, -0.02337298, -0.044228926,
-0.055839065, -0.046598054, -0.010546039, -0.06900766,
0.027239809, 0.022582639, -0.013296484, -0.05459212,
0.08981, -0.045407712, 0.08682226, -0.06867011,
-0.14390695, -0.02916037, 0.000996957, 0.091420636,
0.14283475, -0.07390571, -0.06402044, 0.062524505,
-0.093129106, 0.04860203, -0.08364217, -0.08119002,
0.009352075, 0.22920375, 0.0016303885, 0.11583097,
-0.13732095, 0.012405723, -0.07551853, 0.06343048,
0.12162708, -0.031923793, -0.014335606, 0.01790974,
-0.10650317, -0.0724401, 0.08554849, -0.05727212,
0.06556731, -0.042729504, -0.043227166, 0.011683251,
-0.013082158, -0.029302018, -0.010899579, -0.062036745,
-0.022509435, -0.00964907, -0.01567329, 0.04260106,
-0.07787477, -0.11576462, 0.017356863, 0.048673786,
-0.017577527, -0.05527947, -0.082487635, -0.040137455,
-0.10820036, -0.04666372, 0.022746278, -0.07851417,
0.01068115, 0.032956902, 0.022433773, 0.0026891115,
0.08944216, -0.0685835, 0.010513544, 0.07228705,
0.02032331, -0.059686817, -0.0005566496, -0.086984694,
0.040414046, -0.1380399, 0.094208956, -0.05722982,
0.012092817, -0.04989123, -0.086576, -0.003399834,
-0.04696032, -0.045747425, 0.10091314, 0.048676282,
-0.029037097, 0.031399418, -0.0040285117, 0.047237843,
0.09504992, 0.041799378, -0.049185462, -0.031518843,
-0.10516937, 0.026374253, 0.10058866, -0.0033195973,
-0.041975245, 0.0073591834, 0.0033782164, -0.004325073,
-0.10167381, 0.042500053, -0.01447153, 0.06464186,
-0.017142897, 0.03312627, 0.009205989, 0.024138335,
-0.011337001, 0.035530265, -0.010912711, 0.0706555,
-0.005894094, 0.051841937, -0.1401738, -0.02351249,
0.0365468, 0.07590991, 0.08838724, 0.021681072,
-0.10086113, 0.019608743, -0.06195883, 0.077335775,
0.023646897, -0.095322326, 0.02233014, 0.09756986,
-0.048691444, -0.009579111, 0.07595467, 0.11480546,
-0.09801813, 0.019894179, 0.08502348, 0.004032281,
0.037211012, 0.068537936, -0.048005626, -0.091520436,
-0.028379958, -0.01556313, 0.06554592, -0.045599163,
-0.01672207, -0.020169014, -0.011877351, -0.20212261,
0.010889619, 0.0047078193, 0.038385306, 0.08540671,
-0.017140968, -0.0035865551, 0.016678626, 0.005633034,
0.015963363, 0.00871737, 0.060130805, 0.028611384,
0.10109069, -0.015060172, -0.07894427, 0.06401885,
0.011584063, -0.024466386, 0.0047652307, -0.09041358,
0.030737216, -0.0046374933, 0.14215417, -0.11823516,
0.019899689, 0.006106124, -0.027092824, 0.0786356,
0.05052217, -0.058925, -0.011402121, -0.024987547,
-0.0013661642, -0.06832946, -0.015667673, -0.1083353,
-0.00096863037, -0.06988685, -0.053350925, -0.027275559,
-0.033664223, -0.07978348, -0.025200296, -0.017207067,
-0.058403496, -0.055697463, 0.005798788, 0.12965427,
-0.062582195, 0.0013350133, -0.10482091, 0.0379771,
0.072521195, -0.0029455067, -0.13797039, -0.03628521,
0.013806405, -0.017858358, -0.01008298, -0.07700066,
-0.017081132, 0.019358726, 0.0027079724, 0.004635139,
0.062634714, -0.02338735, -0.039547626, -0.02050681,
0.03385117, -0.083611414, 0.002862572, -0.09421313,
0.058618143, -0.08598433, 0.00972939, 0.023867095,
-0.053934585, -0.023203006, 0.07452513, -0.048767887,
-0.07314807, -0.056307215, -0.10433547, -0.06440842,
0.04328182, 0.04389765, -0.020006588, -0.09076438,
-0.11652589, -0.021705797, 0.03345259, -0.010329105,
-0.025767034, 0.013057034, -0.07316461, -0.10145612,
0.06358255, 0.18531723, 0.07759293, 0.12006465,
0.1305557, 0.058638252, -0.03393652, 0.09622831,
-0.16253184, -2.4580743e-06, 0.079869635, -0.070196845,
-0.005644518, 0.06857898, -0.12598175, -0.035084512,
0.03156317, -0.12794146, -0.031963028, 0.04692781,
0.030070418, 0.0071660685, -0.095516115, -0.004643372,
0.040170413, -0.062104587, -0.0037324072, 0.0554317,
0.08184801, -0.019164372, 0.06791302, 0.034257166,
-0.10307039, 0.021943003, 0.046745934, 0.0790918,
-0.0265588, -0.007824208, 0.042546265, -0.00977924,
-0.0002440307, -0.017384544, -0.017990116, 0.12252321,
-0.014512694, -0.08251313, 0.08861942, 0.13589665,
0.026351685, 0.012641483, 0.07466548, 0.044301085,
-0.045414884, -0.051112458, 0.03444247, -0.08502782,
-0.04106223, -0.028126027, 0.028473156, 0.10467447};
recurrent_to_cell_weights_ = {
-0.037322544, 0.018592842, 0.0056175636, -0.06253426,
0.055647098, -0.05713207, -0.05626563, 0.005559383,
0.03375411, -0.025757805, -0.088049285, 0.06017052,
-0.06570978, 0.007384076, 0.035123326, -0.07920549,
0.053676967, 0.044480428, -0.07663568, 0.0071805613,
0.08089997, 0.05143358, 0.038261272, 0.03339287,
-0.027673481, 0.044746667, 0.028349208, 0.020090483,
-0.019443132, -0.030755889, -0.0040000007, 0.04465846,
-0.021585021, 0.0031670958, 0.0053199246, -0.056117613,
-0.10893326, 0.076739706, -0.08509834, -0.027997585,
0.037871376, 0.01449768, -0.09002357, -0.06111149,
-0.046195522, 0.0422062, -0.005683705, -0.1253618,
-0.012925729, -0.04890792, 0.06985068, 0.037654128,
0.03398274, -0.004781977, 0.007032333, -0.031787455,
0.010868644, -0.031489216, 0.09525667, 0.013939797,
0.0058680447, 0.0167067, 0.02668468, -0.04797466,
-0.048885044, -0.12722108, 0.035304096, 0.06554885,
0.00972396, -0.039238118, -0.05159735, -0.11329045,
0.1613692, -0.03750952, 0.06529313, -0.071974665,
-0.11769596, 0.015524369, -0.0013754242, -0.12446318,
0.02786344, -0.014179351, 0.005264273, 0.14376344,
0.015983658, 0.03406988, -0.06939408, 0.040699873,
0.02111075, 0.09669095, 0.041345075, -0.08316494,
-0.07684199, -0.045768797, 0.032298047, -0.041805092,
0.0119405, 0.0061010392, 0.12652606, 0.0064572375,
-0.024950314, 0.11574242, 0.04508852, -0.04335324,
0.06760663, -0.027437469, 0.07216407, 0.06977076,
-0.05438599, 0.034033038, -0.028602652, 0.05346137,
0.043184172, -0.037189785, 0.10420091, 0.00882477,
-0.054019816, -0.074273005, -0.030617684, -0.0028467078,
0.024302477, -0.0038869337, 0.005332455, 0.0013399826,
0.04361412, -0.007001822, 0.09631092, -0.06702025,
-0.042049985, -0.035070654, -0.04103342, -0.10273396,
0.0544271, 0.037184782, -0.13150354, -0.0058036847,
-0.008264958, 0.042035464, 0.05891794, 0.029673764,
0.0063542654, 0.044788733, 0.054816857, 0.062257513,
-0.00093483756, 0.048938446, -0.004952862, -0.007730018,
-0.04043371, -0.017094059, 0.07229206, -0.023670016,
-0.052195564, -0.025616996, -0.01520939, 0.045104615,
-0.007376126, 0.003533447, 0.006570588, 0.056037236,
0.12436656, 0.051817212, 0.028532185, -0.08686856,
0.11868599, 0.07663395, -0.07323171, 0.03463402,
-0.050708205, -0.04458982, -0.11590894, 0.021273347,
0.1251325, -0.15313013, -0.12224372, 0.17228661,
0.023029093, 0.086124025, 0.006445803, -0.03496501,
0.028332196, 0.04449512, -0.042436164, -0.026587414,
-0.006041347, -0.09292539, -0.05678812, 0.03897832,
0.09465633, 0.008115513, -0.02171956, 0.08304309,
0.071401566, 0.019622514, 0.032163795, -0.004167056,
0.02295182, 0.030739572, 0.056506045, 0.004612461,
0.06524936, 0.059999723, 0.046395954, -0.0045512207,
-0.1335546, -0.030136576, 0.11584653, -0.014678886,
0.0020118146, -0.09688814, -0.0790206, 0.039770417,
-0.0329582, 0.07922767, 0.029322514, 0.026405897,
0.04207835, -0.07073373, 0.063781224, 0.0859677,
-0.10925287, -0.07011058, 0.048005477, 0.03438226,
-0.09606514, -0.006669445, -0.043381985, 0.04240257,
-0.06955775, -0.06769346, 0.043903265, -0.026784198,
-0.017840602, 0.024307009, -0.040079936, -0.019946516,
0.045318738, -0.12233574, 0.026170589, 0.0074471775,
0.15978073, 0.10185836, 0.10298046, -0.015476589,
-0.039390966, -0.072174534, 0.0739445, -0.1211869,
-0.0347889, -0.07943156, 0.014809798, -0.12412325,
-0.0030663363, 0.039695457, 0.0647603, -0.08291318,
-0.018529687, -0.004423833, 0.0037507233, 0.084633216,
-0.01514876, -0.056505352, -0.012800942, -0.06994386,
0.012962922, -0.031234352, 0.07029052, 0.016418684,
0.03618972, 0.055686004, -0.08663945, -0.017404709,
-0.054761406, 0.029065743, 0.052404847, 0.020238016,
0.0048197987, -0.0214882, 0.07078733, 0.013016777,
0.06262858, 0.009184685, 0.020785125, -0.043904778,
-0.0270329, -0.03299152, -0.060088247, -0.015162964,
-0.001828936, 0.12642565, -0.056757294, 0.013586685,
0.09232601, -0.035886683, 0.06000002, 0.05229691,
-0.052580316, -0.082029596, -0.010794592, 0.012947712,
-0.036429964, -0.085508935, -0.13127148, -0.017744139,
0.031502828, 0.036232427, -0.031581745, 0.023051167,
-0.05325106, -0.03421577, 0.028793324, -0.034633752,
-0.009881397, -0.043551125, -0.018609839, 0.0019097115,
-0.008799762, 0.056595087, 0.0022273948, 0.055752404};
recurrent_to_forget_weights_ = {
-0.057784554, -0.026057621, -0.068447545, -0.022581743,
0.14811787, 0.10826372, 0.09471067, 0.03987225,
-0.0039523416, 0.00030638507, 0.053185795, 0.10572994,
0.08414449, -0.022036452, -0.00066928595, -0.09203576,
0.032950465, -0.10985798, -0.023809856, 0.0021431844,
-0.02196096, -0.00326074, 0.00058621005, -0.074678116,
-0.06193199, 0.055729095, 0.03736828, 0.020123724,
0.061878487, -0.04729229, 0.034919553, -0.07585433,
-0.04421272, -0.044019096, 0.085488975, 0.04058006,
-0.06890133, -0.030951202, -0.024628663, -0.07672815,
0.034293607, 0.08556707, -0.05293577, -0.033561368,
-0.04899627, 0.0241671, 0.015736353, -0.095442444,
-0.029564252, 0.016493602, -0.035026584, 0.022337519,
-0.026871363, 0.004780428, 0.0077918363, -0.03601621,
0.016435321, -0.03263031, -0.09543275, -0.047392778,
0.013454138, 0.028934088, 0.01685226, -0.086110644,
-0.046250615, -0.01847454, 0.047608484, 0.07339695,
0.034546845, -0.04881143, 0.009128804, -0.08802852,
0.03761666, 0.008096139, -0.014454086, 0.014361001,
-0.023502491, -0.0011840804, -0.07607001, 0.001856849,
-0.06509276, -0.006021153, -0.08570962, -0.1451793,
0.060212336, 0.055259194, 0.06974018, 0.049454916,
-0.027794661, -0.08077226, -0.016179763, 0.1169753,
0.17213494, -0.0056326236, -0.053934924, -0.0124349,
-0.11520337, 0.05409887, 0.088759385, 0.0019655675,
0.0042065294, 0.03881498, 0.019844765, 0.041858196,
-0.05695512, 0.047233116, 0.038937137, -0.06542224,
0.014429736, -0.09719407, 0.13908425, -0.05379757,
0.012321099, 0.082840554, -0.029899208, 0.044217527,
0.059855383, 0.07711018, -0.045319796, 0.0948846,
-0.011724666, -0.0033288454, -0.033542685, -0.04764985,
-0.13873616, 0.040668588, 0.034832682, -0.015319203,
-0.018715994, 0.046002675, 0.0599172, -0.043107376,
0.0294216, -0.002314414, -0.022424703, 0.0030315618,
0.0014641669, 0.0029166266, -0.11878115, 0.013738511,
0.12375372, -0.0006038222, 0.029104086, 0.087442465,
0.052958444, 0.07558703, 0.04817258, 0.044462286,
-0.015213451, -0.08783778, -0.0561384, -0.003008196,
0.047060397, -0.002058388, 0.03429439, -0.018839769,
0.024734668, 0.024614193, -0.042046934, 0.09597743,
-0.0043254104, 0.04320769, 0.0064070094, -0.0019131786,
-0.02558259, -0.022822596, -0.023273505, -0.02464396,
-0.10991725, -0.006240552, 0.0074488563, 0.024044557,
0.04383914, -0.046476185, 0.028658995, 0.060410924,
0.050786525, 0.009452605, -0.0073054377, -0.024810238,
0.0052906186, 0.0066939713, -0.0020913032, 0.014515517,
0.015898481, 0.021362653, -0.030262267, 0.016587038,
-0.011442813, 0.041154444, -0.007631438, -0.03423484,
-0.010977775, 0.036152758, 0.0066366293, 0.11915515,
0.02318443, -0.041350313, 0.021485701, -0.10906167,
-0.028218046, -0.00954771, 0.020531068, -0.11995105,
-0.03672871, 0.024019798, 0.014255957, -0.05221243,
-0.00661567, -0.04630967, 0.033188973, 0.10107534,
-0.014027541, 0.030796422, -0.10270911, -0.035999842,
0.15443139, 0.07684145, 0.036571592, -0.035900835,
-0.0034699554, 0.06209149, 0.015920248, -0.031122351,
-0.03858649, 0.01849943, 0.13872518, 0.01503974,
0.069941424, -0.06948533, -0.0088794185, 0.061282158,
-0.047401894, 0.03100163, -0.041533746, -0.10430945,
0.044574402, -0.01425562, -0.024290353, 0.034563623,
0.05866852, 0.023947537, -0.09445152, 0.035450947,
0.02247216, -0.0042998926, 0.061146557, -0.10250651,
0.020881841, -0.06747029, 0.10062043, -0.0023941975,
0.03532124, -0.016341697, 0.09685456, -0.016764693,
0.051808182, 0.05875331, -0.04536488, 0.001626336,
-0.028892258, -0.01048663, -0.009793449, -0.017093895,
0.010987891, 0.02357273, -0.00010856845, 0.0099760275,
-0.001845119, -0.03551521, 0.0018358806, 0.05763657,
-0.01769146, 0.040995963, 0.02235177, -0.060430344,
0.11475477, -0.023854522, 0.10071741, 0.0686208,
-0.014250481, 0.034261297, 0.047418304, 0.08562733,
-0.030519066, 0.0060542435, 0.014653856, -0.038836084,
0.04096551, 0.032249358, -0.08355519, -0.026823482,
0.056386515, -0.010401743, -0.028396193, 0.08507674,
0.014410365, 0.020995233, 0.17040324, 0.11511526,
0.02459721, 0.0066619175, 0.025853224, -0.023133837,
-0.081302024, 0.017264642, -0.009585969, 0.09491168,
-0.051313367, 0.054532815, -0.014298593, 0.10657464,
0.007076659, 0.10964551, 0.0409152, 0.008275321,
-0.07283536, 0.07937492, 0.04192024, -0.1075027};
recurrent_to_output_weights_ = {
0.025825322, -0.05813119, 0.09495884, -0.045984812,
-0.01255415, -0.0026479573, -0.08196161, -0.054914974,
-0.0046604523, -0.029587349, -0.044576716, -0.07480124,
-0.082868785, 0.023254942, 0.027502948, -0.0039728214,
-0.08683098, -0.08116779, -0.014675607, -0.037924774,
-0.023314456, -0.007401714, -0.09255757, 0.029460307,
-0.08829125, -0.005139627, -0.08989442, -0.0555066,
0.13596267, -0.025062224, -0.048351806, -0.03850004,
0.07266485, -0.022414139, 0.05940088, 0.075114764,
0.09597592, -0.010211725, -0.0049794707, -0.011523867,
-0.025980417, 0.072999895, 0.11091378, -0.081685916,
0.014416728, 0.043229222, 0.034178585, -0.07530371,
0.035837382, -0.085607, -0.007721233, -0.03287832,
-0.043848954, -0.06404588, -0.06632928, -0.073643476,
0.008214239, -0.045984086, 0.039764922, 0.03474462,
0.060612556, -0.080590084, 0.049127717, 0.04151091,
-0.030063879, 0.008801774, -0.023021035, -0.019558564,
0.05158114, -0.010947698, -0.011825728, 0.0075720972,
0.0699727, -0.0039981045, 0.069350146, 0.08799282,
0.016156472, 0.035502106, 0.11695009, 0.006217345,
0.13392477, -0.037875112, 0.025745004, 0.08940699,
-0.00924166, 0.0046702605, -0.036598757, -0.08811812,
0.10522024, -0.032441203, 0.008176899, -0.04454919,
0.07058152, 0.0067963637, 0.039206743, 0.03259838,
0.03725492, -0.09515802, 0.013326398, -0.052055415,
-0.025676316, 0.03198509, -0.015951829, -0.058556724,
0.036879618, 0.043357447, 0.028362012, -0.05908629,
0.0059240665, -0.04995891, -0.019187413, 0.0276265,
-0.01628143, 0.0025863599, 0.08800015, 0.035250366,
-0.022165963, -0.07328642, -0.009415526, -0.07455109,
0.11690406, 0.0363299, 0.07411125, 0.042103454,
-0.009660886, 0.019076364, 0.018299393, -0.046004917,
0.08891175, 0.0431396, -0.026327137, -0.051502608,
0.08979574, -0.051670972, 0.04940282, -0.07491107,
-0.021240504, 0.022596184, -0.034280192, 0.060163025,
-0.058211457, -0.051837247, -0.01349775, -0.04639988,
-0.035936575, -0.011681591, 0.064818054, 0.0073146066,
-0.021745546, -0.043124277, -0.06471268, -0.07053354,
-0.029321948, -0.05330136, 0.016933719, -0.053782392,
0.13747959, -0.1361751, -0.11569455, 0.0033329215,
0.05693899, -0.053219706, 0.063698, 0.07977434,
-0.07924483, 0.06936997, 0.0034815092, -0.007305279,
-0.037325785, -0.07251102, -0.033633437, -0.08677009,
0.091591336, -0.14165086, 0.021752775, 0.019683983,
0.0011612234, -0.058154266, 0.049996935, 0.0288841,
-0.0024567875, -0.14345716, 0.010955264, -0.10234828,
0.1183656, -0.0010731248, -0.023590032, -0.072285876,
-0.0724771, -0.026382286, -0.0014920527, 0.042667855,
0.0018776858, 0.02986552, 0.009814309, 0.0733756,
0.12289186, 0.018043943, -0.0458958, 0.049412545,
0.033632483, 0.05495232, 0.036686596, -0.013781798,
-0.010036754, 0.02576849, -0.08307328, 0.010112348,
0.042521734, -0.05869831, -0.071689695, 0.03876447,
-0.13275425, -0.0352966, -0.023077697, 0.10285965,
0.084736146, 0.15568255, -0.00040734606, 0.027835453,
-0.10292561, -0.032401145, 0.10053256, -0.026142767,
-0.08271222, -0.0030240538, -0.016368777, 0.1070414,
0.042672627, 0.013456989, -0.0437609, -0.022309763,
0.11576483, 0.04108048, 0.061026827, -0.0190714,
-0.0869359, 0.037901703, 0.0610107, 0.07202949,
0.01675338, 0.086139716, -0.08795751, -0.014898893,
-0.023771819, -0.01965048, 0.007955471, -0.043740474,
0.03346837, -0.10549954, 0.090567775, 0.042013682,
-0.03176985, 0.12569028, -0.02421228, -0.029526481,
0.023851605, 0.031539805, 0.05292009, -0.02344001,
-0.07811758, -0.08834428, 0.10094801, 0.16594367,
-0.06861939, -0.021256343, -0.041093912, -0.06669611,
0.035498552, 0.021757556, -0.09302526, -0.015403468,
-0.06614931, -0.051798206, -0.013874718, 0.03630673,
0.010412845, -0.08077351, 0.046185967, 0.0035662893,
0.03541868, -0.094149634, -0.034814864, 0.003128424,
-0.020674974, -0.03944324, -0.008110165, -0.11113267,
0.08484226, 0.043586485, 0.040582247, 0.0968012,
-0.065249965, -0.028036479, 0.0050708856, 0.0017462453,
0.0326779, 0.041296225, 0.09164146, -0.047743853,
-0.015952192, -0.034451712, 0.084197424, -0.05347844,
-0.11768019, 0.085926116, -0.08251791, -0.045081906,
0.0948852, 0.068401024, 0.024856757, 0.06978981,
-0.057309967, -0.012775832, -0.0032452994, 0.01977615,
-0.041040014, -0.024264973, 0.063464895, 0.05431621,
};
cell_to_input_weights_ = {
0.040369894, 0.030746894, 0.24704495, 0.018586371, -0.037586458,
-0.15312155, -0.11812848, -0.11465643, 0.20259799, 0.11418174,
-0.10116027, -0.011334949, 0.12411352, -0.076769054, -0.052169047,
0.21198851, -0.38871562, -0.09061183, -0.09683246, -0.21929175};
cell_to_forget_weights_ = {
-0.01998659, -0.15568835, -0.24248174, -0.012770197, 0.041331276,
-0.072311886, -0.052123554, -0.0066330447, -0.043891653, 0.036225766,
-0.047248036, 0.021479502, 0.033189066, 0.11952997, -0.020432774,
0.64658105, -0.06650122, -0.03467612, 0.095340036, 0.23647355};
cell_to_output_weights_ = {
0.08286371, -0.08261836, -0.51210177, 0.002913762, 0.17764764,
-0.5495371, -0.08460716, -0.24552552, 0.030037103, 0.04123544,
-0.11940523, 0.007358328, 0.1890978, 0.4833202, -0.34441817,
0.36312827, -0.26375428, 0.1457655, -0.19724406, 0.15548733};
projection_weights_ = {
-0.009802181, 0.09401916, 0.0717386, -0.13895074,
0.09641832, 0.060420845, 0.08539281, 0.054285463,
0.061395317, 0.034448683, -0.042991187, 0.019801661,
-0.16840284, -0.015726732, -0.23041931, -0.024478018,
-0.10959692, -0.013875541, 0.18600968, -0.061274476,
0.0138165, -0.08160894, -0.07661644, 0.032372914,
0.16169067, 0.22465782, -0.03993472, -0.004017731,
0.08633481, -0.28869787, 0.08682067, 0.17240396,
0.014975425, 0.056431185, 0.031037588, 0.16702051,
0.0077946745, 0.15140012, 0.29405436, 0.120285,
-0.188994, -0.027265169, 0.043389652, -0.022061434,
0.014777949, -0.20203483, 0.094781205, 0.19100232,
0.13987629, -0.036132768, -0.06426278, -0.05108664,
0.13221376, 0.009441198, -0.16715929, 0.15859416,
-0.040437475, 0.050779544, -0.022187516, 0.012166504,
0.027685808, -0.07675938, -0.0055694645, -0.09444123,
0.0046453946, 0.050794356, 0.10770313, -0.20790008,
-0.07149004, -0.11425117, 0.008225835, -0.035802525,
0.14374903, 0.15262283, 0.048710253, 0.1847461,
-0.007487823, 0.11000021, -0.09542012, 0.22619456,
-0.029149994, 0.08527916, 0.009043713, 0.0042746216,
0.016261552, 0.022461696, 0.12689082, -0.043589946,
-0.12035478, -0.08361797, -0.050666027, -0.1248618,
-0.1275799, -0.071875185, 0.07377272, 0.09944291,
-0.18897448, -0.1593054, -0.06526116, -0.040107165,
-0.004618631, -0.067624845, -0.007576253, 0.10727444,
0.041546922, -0.20424393, 0.06907816, 0.050412357,
0.00724631, 0.039827548, 0.12449835, 0.10747581,
0.13708383, 0.09134148, -0.12617786, -0.06428341,
0.09956831, 0.1208086, -0.14676677, -0.0727722,
0.1126304, 0.010139365, 0.015571211, -0.038128063,
0.022913318, -0.042050496, 0.16842307, -0.060597885,
0.10531834, -0.06411776, -0.07451711, -0.03410368,
-0.13393489, 0.06534304, 0.003620307, 0.04490757,
0.05970546, 0.05197996, 0.02839995, 0.10434969,
-0.013699693, -0.028353551, -0.07260381, 0.047201227,
-0.024575593, -0.036445823, 0.07155557, 0.009672501,
-0.02328883, 0.009533515, -0.03606021, -0.07421458,
-0.028082801, -0.2678904, -0.13221288, 0.18419984,
-0.13012612, -0.014588381, -0.035059117, -0.04824723,
0.07830115, -0.056184657, 0.03277091, 0.025466874,
0.14494097, -0.12522776, -0.098633975, -0.10766018,
-0.08317623, 0.08594209, 0.07749552, 0.039474737,
0.1776665, -0.07409566, -0.0477268, 0.29323658,
0.10801441, 0.1154011, 0.013952499, 0.10739139,
0.10708251, -0.051456142, 0.0074137426, -0.10430189,
0.10034707, 0.045594677, 0.0635285, -0.0715442,
-0.089667566, -0.10811871, 0.00026344223, 0.08298446,
-0.009525053, 0.006585689, -0.24567553, -0.09450807,
0.09648481, 0.026996298, -0.06419476, -0.04752702,
-0.11063944, -0.23441927, -0.17608605, -0.052156363,
0.067035615, 0.19271925, -0.0032889997, -0.043264326,
0.09663576, -0.057112187, -0.10100678, 0.0628376,
0.04447668, 0.017961001, -0.10094388, -0.10190601,
0.18335468, 0.10494553, -0.052095775, -0.0026118709,
0.10539724, -0.04383912, -0.042349473, 0.08438151,
-0.1947263, 0.02251204, 0.11216432, -0.10307853,
0.17351969, -0.039091777, 0.08066188, -0.00561982,
0.12633002, 0.11335965, -0.0088127935, -0.019777594,
0.06864014, -0.059751723, 0.016233567, -0.06894641,
-0.28651384, -0.004228674, 0.019708522, -0.16305895,
-0.07468996, -0.0855457, 0.099339016, -0.07580735,
-0.13775392, 0.08434318, 0.08330512, -0.12131499,
0.031935584, 0.09180414, -0.08876437, -0.08049874,
0.008753825, 0.03498998, 0.030215185, 0.03907079,
0.089751154, 0.029194152, -0.03337423, -0.019092513,
0.04331237, 0.04299654, -0.036394123, -0.12915532,
0.09793732, 0.07512415, -0.11319543, -0.032502122,
0.15661901, 0.07671967, -0.005491124, -0.19379048,
-0.218606, 0.21448623, 0.017840758, 0.1416943,
-0.07051762, 0.19488361, 0.02664691, -0.18104725,
-0.09334311, 0.15026465, -0.15493552, -0.057762887,
-0.11604192, -0.262013, -0.01391798, 0.012185008,
0.11156489, -0.07483202, 0.06693364, -0.26151478,
0.046425626, 0.036540434, -0.16435726, 0.17338543,
-0.21401681, -0.11385144, -0.08283257, -0.069031075,
0.030635102, 0.010969227, 0.11109743, 0.010919218,
0.027526086, 0.13519906, 0.01891392, -0.046839405,
-0.040167913, 0.017953383, -0.09700955, 0.0061885654,
-0.07000971, 0.026893595, -0.038844477, 0.14543656};
lstm_input_ = {
{
0.787926, 0.151646, 0.071352, 0.118426, 0.458058,
0.596268, 0.998386, 0.568695, 0.864524, 0.571277,
0.073204, 0.296072, 0.743333, 0.069199, 0.045348,
0.867394, 0.291279, 0.013714, 0.482521, 0.626339},
{
0.295743, 0.544053, 0.690064, 0.858138, 0.497181,
0.642421, 0.524260, 0.134799, 0.003639, 0.162482,
0.640394, 0.930399, 0.050782, 0.432485, 0.988078,
0.082922, 0.563329, 0.865614, 0.333232, 0.259916}
};
lstm_golden_output_ = {
{
-0.00396806, 0.029352, -0.00279226, 0.0159977, -0.00835576,
-0.0211779, 0.0283512, -0.0114597, 0.00907307, -0.0244004,
-0.0152191, -0.0259063, 0.00914318, 0.00415118, 0.017147,
0.0134203, -0.0166936, 0.0381209, 0.000889694, 0.0143363,
-0.0328911, -0.0234288, 0.0333051, -0.012229, 0.0110322,
-0.0457725, -0.000832209, -0.0202817, 0.0327257, 0.0121308,
0.0155969, 0.0312091, -0.0213783, 0.0350169, 0.000324794,
0.0276012, -0.0263374, -0.0371449, 0.0446149, -0.0205474,
0.0103729, -0.0576349, -0.0150052, -0.0292043, 0.0376827,
0.0136115, 0.0243435, 0.0354492, -0.0189322, 0.0464512,
-0.00251373, 0.0225745, -0.0308346, -0.0317124, 0.0460407,
-0.0189395, 0.0149363, -0.0530162, -0.0150767, -0.0340193,
0.0286833, 0.00824207, 0.0264887, 0.0305169},
{
-0.013869, 0.0287268, -0.00334693, 0.00733398, -0.0287926,
-0.0186926, 0.0193662, -0.0115437, 0.00422612, -0.0345232,
0.00223253, -0.00957321, 0.0210624, 0.013331, 0.0150954,
0.02168, -0.0141913, 0.0322082, 0.00227024, 0.0260507,
-0.0188721, -0.0296489, 0.0399134, -0.0160509, 0.0116039,
-0.0447318, -0.0150515, -0.0277406, 0.0316596, 0.0118233,
0.0214762, 0.0293641, -0.0204549, 0.0450315, -0.00117378,
0.0167673, -0.0375007, -0.0238314, 0.038784, -0.0174034,
0.0131743, -0.0506589, -0.0048447, -0.0240239, 0.0325789,
0.00790065, 0.0220157, 0.0333314, -0.0264787, 0.0387855,
-0.000764675, 0.0217599, -0.037537, -0.0335206, 0.0431679,
-0.0211424, 0.010203, -0.062785, -0.00832363, -0.025181,
0.0412031, 0.0118723, 0.0239643, 0.0394009}};
}
};
TEST_F(NoCifgPeepholeProjectionClippingLstmTest, LstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 20;
const int n_output = 16;
LSTMOpModel lstm(n_batch, n_input, n_cell, n_output,
false, true,
true,
false,
0.0, 0.0,
{
{n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
},
TensorType_FLOAT32);
lstm.SetInputToInputWeights(input_to_input_weights_);
lstm.SetInputToCellWeights(input_to_cell_weights_);
lstm.SetInputToForgetWeights(input_to_forget_weights_);
lstm.SetInputToOutputWeights(input_to_output_weights_);
lstm.SetInputGateBias(input_gate_bias_);
lstm.SetCellBias(cell_gate_bias_);
lstm.SetForgetGateBias(forget_gate_bias_);
lstm.SetOutputGateBias(output_gate_bias_);
lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
lstm.SetCellToInputWeights(cell_to_input_weights_);
lstm.SetCellToForgetWeights(cell_to_forget_weights_);
lstm.SetCellToOutputWeights(cell_to_output_weights_);
lstm.SetProjectionWeights(projection_weights_);
VerifyGoldens(lstm_input_, lstm_golden_output_, &lstm);
}
class NoCifgPeepholeProjectionNoClippingLayerNormLstmTest
: public BaseLstmTest {
void SetUp() override {
input_to_input_weights_ = {0.5, 0.6, 0.7, -0.8, -0.9, 0.1, 0.2,
0.3, -0.4, 0.5, -0.8, 0.7, -0.6, 0.5,
-0.4, -0.5, -0.4, -0.3, -0.2, -0.1};
input_to_forget_weights_ = {-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2,
-0.4, 0.3, -0.8, -0.4, 0.3, -0.5, -0.4,
-0.6, 0.3, -0.4, -0.6, -0.5, -0.5};
input_to_cell_weights_ = {-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2,
-0.3, -0.2, -0.6, 0.6, -0.1, -0.4, -0.3,
-0.7, 0.7, -0.9, -0.5, 0.8, 0.6};
input_to_output_weights_ = {-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3,
-0.3, -0.8, -0.2, 0.6, -0.2, 0.4, -0.7,
-0.3, -0.5, 0.1, 0.5, -0.6, -0.4};
input_gate_bias_ = {0.03, 0.15, 0.22, 0.38};
forget_gate_bias_ = {0.1, -0.3, -0.2, 0.1};
cell_gate_bias_ = {-0.05, 0.72, 0.25, 0.08};
output_gate_bias_ = {0.05, -0.01, 0.2, 0.1};
recurrent_to_input_weights_ = {-0.2, -0.3, 0.4, 0.1, -0.5, 0.9,
-0.2, -0.3, -0.7, 0.05, -0.2, -0.6};
recurrent_to_cell_weights_ = {-0.3, 0.2, 0.1, -0.3, 0.8, -0.08,
-0.2, 0.3, 0.8, -0.6, -0.1, 0.2};
recurrent_to_forget_weights_ = {-0.5, -0.3, -0.5, -0.2, 0.6, 0.4,
0.9, 0.3, -0.1, 0.2, 0.5, 0.2};
recurrent_to_output_weights_ = {0.3, -0.1, 0.1, -0.2, -0.5, -0.7,
-0.2, -0.6, -0.1, -0.4, -0.7, -0.2};
cell_to_input_weights_ = {0.05, 0.1, 0.25, 0.15};
cell_to_forget_weights_ = {-0.02, -0.15, -0.25, -0.03};
cell_to_output_weights_ = {0.1, -0.1, -0.5, 0.05};
input_layer_norm_coefficients_ = {0.1, 0.2, 0.3, 0.5};
forget_layer_norm_coefficients_ = {0.2, 0.2, 0.4, 0.3};
cell_layer_norm_coefficients_ = {0.7, 0.2, 0.3, 0.8};
output_layer_norm_coefficients_ = {0.6, 0.2, 0.2, 0.5};
projection_weights_ = {-0.1, 0.2, 0.01, -0.2, 0.1, 0.5,
0.3, 0.08, 0.07, 0.2, -0.4, 0.2};
lstm_input_ = {
{
0.7, 0.8, 0.1, 0.2, 0.3,
0.8, 0.1, 0.2, 0.4, 0.5,
0.2, 0.7, 0.7, 0.1, 0.7},
{
0.3, 0.2, 0.9, 0.8, 0.1,
0.1, 0.5, 0.2, 0.4, 0.2,
0.6, 0.9, 0.2, 0.5, 0.7},
};
}
};
TEST_F(NoCifgPeepholeProjectionNoClippingLayerNormLstmTest,
LayerNormLstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 4;
const int n_output = 3;
const float ceil_clip = 0.0;
const float proj_clip = 0.0;
LSTMOpModel layer_norm_lstm(
n_batch, n_input, n_cell, n_output,
false, true,
true,
false, ceil_clip, proj_clip,
{
{n_batch, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{n_cell},
{n_cell},
{n_cell},
{n_cell},
},
TensorType_FLOAT32);
layer_norm_lstm.SetInputToInputWeights(input_to_input_weights_);
layer_norm_lstm.SetInputToCellWeights(input_to_cell_weights_);
layer_norm_lstm.SetInputToForgetWeights(input_to_forget_weights_);
layer_norm_lstm.SetInputToOutputWeights(input_to_output_weights_);
layer_norm_lstm.SetInputGateBias(input_gate_bias_);
layer_norm_lstm.SetCellBias(cell_gate_bias_);
layer_norm_lstm.SetForgetGateBias(forget_gate_bias_);
layer_norm_lstm.SetOutputGateBias(output_gate_bias_);
layer_norm_lstm.SetRecurrentToInputWeights(recurrent_to_input_weights_);
layer_norm_lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
layer_norm_lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
layer_norm_lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
layer_norm_lstm.SetCellToInputWeights(cell_to_input_weights_);
layer_norm_lstm.SetCellToForgetWeights(cell_to_forget_weights_);
layer_norm_lstm.SetCellToOutputWeights(cell_to_output_weights_);
layer_norm_lstm.SetInputLayerNormCoefficients(input_layer_norm_coefficients_);
layer_norm_lstm.SetForgetLayerNormCoefficients(
forget_layer_norm_coefficients_);
layer_norm_lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients_);
layer_norm_lstm.SetOutputLayerNormCoefficients(
output_layer_norm_coefficients_);
layer_norm_lstm.SetProjectionWeights(projection_weights_);
const std::vector<std::vector<float>> layer_norm_lstm_golden_output = {
{
0.0244077, 0.128027, -0.00170918,
0.0137642, 0.140751, 0.0395835,
-0.00459231, 0.155278, 0.0837377,
},
{
-0.00692428, 0.0848741, 0.063445,
-0.00403912, 0.139963, 0.072681,
0.00752706, 0.161903, 0.0561371,
}};
VerifyGoldens(lstm_input_, layer_norm_lstm_golden_output, &layer_norm_lstm);
}
class CifgPeepholeProjectionNoClippingLayerNormLstmTest : public BaseLstmTest {
void SetUp() override {
input_to_forget_weights_ = {-0.6, -0.1, 0.3, 0.2, 0.9, -0.5, -0.2,
-0.4, 0.3, -0.8, -0.4, 0.3, -0.5, -0.4,
-0.6, 0.3, -0.4, -0.6, -0.5, -0.5};
input_to_cell_weights_ = {-0.4, -0.3, -0.2, -0.1, -0.5, 0.5, -0.2,
-0.3, -0.2, -0.6, 0.6, -0.1, -0.4, -0.3,
-0.7, 0.7, -0.9, -0.5, 0.8, 0.6};
input_to_output_weights_ = {-0.8, -0.4, -0.2, -0.9, -0.1, -0.7, 0.3,
-0.3, -0.8, -0.2, 0.6, -0.2, 0.4, -0.7,
-0.3, -0.5, 0.1, 0.5, -0.6, -0.4};
forget_gate_bias_ = {0.1, -0.3, -0.2, 0.1};
cell_gate_bias_ = {-0.05, 0.72, 0.25, 0.08};
output_gate_bias_ = {0.05, -0.01, 0.2, 0.1};
recurrent_to_cell_weights_ = {-0.3, 0.2, 0.1, -0.3, 0.8, -0.08,
-0.2, 0.3, 0.8, -0.6, -0.1, 0.2};
recurrent_to_forget_weights_ = {-0.5, -0.3, -0.5, -0.2, 0.6, 0.4,
0.9, 0.3, -0.1, 0.2, 0.5, 0.2};
recurrent_to_output_weights_ = {0.3, -0.1, 0.1, -0.2, -0.5, -0.7,
-0.2, -0.6, -0.1, -0.4, -0.7, -0.2};
cell_to_forget_weights_ = {-0.02, -0.15, -0.25, -0.03};
cell_to_output_weights_ = {0.1, -0.1, -0.5, 0.05};
forget_layer_norm_coefficients_ = {0.2, 0.2, 0.4, 0.3};
cell_layer_norm_coefficients_ = {0.7, 0.2, 0.3, 0.8};
output_layer_norm_coefficients_ = {0.6, 0.2, 0.2, 0.5};
projection_weights_ = {-0.1, 0.2, 0.01, -0.2, 0.1, 0.5,
0.3, 0.08, 0.07, 0.2, -0.4, 0.2};
lstm_input_ = {
{
0.7, 0.8, 0.1, 0.2, 0.3,
0.8, 0.1, 0.2, 0.4, 0.5,
0.2, 0.7, 0.7, 0.1, 0.7},
{
0.3, 0.2, 0.9, 0.8, 0.1,
0.1, 0.5, 0.2, 0.4, 0.2,
0.6, 0.9, 0.2, 0.5, 0.7},
};
}
};
TEST_F(CifgPeepholeProjectionNoClippingLayerNormLstmTest,
LayerNormLstmBlackBoxTest) {
const int n_batch = 2;
const int n_input = 5;
const int n_cell = 4;
const int n_output = 3;
const float ceil_clip = 0.0;
const float proj_clip = 0.0;
LSTMOpModel layer_norm_lstm(
n_batch, n_input, n_cell, n_output,
true, true,
true,
false, ceil_clip, proj_clip,
{
{n_batch, n_input},
{0, 0},
{n_cell, n_input},
{n_cell, n_input},
{n_cell, n_input},
{0, 0},
{n_cell, n_output},
{n_cell, n_output},
{n_cell, n_output},
{0},
{n_cell},
{n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
{n_output, n_cell},
{0},
{n_batch, n_output},
{n_batch, n_cell},
{0},
{n_cell},
{n_cell},
{n_cell},
},
TensorType_FLOAT32);
layer_norm_lstm.SetInputToCellWeights(input_to_cell_weights_);
layer_norm_lstm.SetInputToForgetWeights(input_to_forget_weights_);
layer_norm_lstm.SetInputToOutputWeights(input_to_output_weights_);
layer_norm_lstm.SetCellBias(cell_gate_bias_);
layer_norm_lstm.SetForgetGateBias(forget_gate_bias_);
layer_norm_lstm.SetOutputGateBias(output_gate_bias_);
layer_norm_lstm.SetRecurrentToCellWeights(recurrent_to_cell_weights_);
layer_norm_lstm.SetRecurrentToForgetWeights(recurrent_to_forget_weights_);
layer_norm_lstm.SetRecurrentToOutputWeights(recurrent_to_output_weights_);
layer_norm_lstm.SetCellToForgetWeights(cell_to_forget_weights_);
layer_norm_lstm.SetCellToOutputWeights(cell_to_output_weights_);
layer_norm_lstm.SetForgetLayerNormCoefficients(
forget_layer_norm_coefficients_);
layer_norm_lstm.SetCellLayerNormCoefficients(cell_layer_norm_coefficients_);
layer_norm_lstm.SetOutputLayerNormCoefficients(
output_layer_norm_coefficients_);
layer_norm_lstm.SetProjectionWeights(projection_weights_);
const std::vector<std::vector<float>> layer_norm_lstm_golden_output = {
{
0.02129706, 0.140816242, 0.0112733059,
0.0132302344, 0.152308047, 0.0346313119,
-0.0123688057, 0.165790111, 0.0893077999,
},
{
-0.0226350538, 0.0916948169, 0.0769175813,
-0.0269966982, 0.149707705, 0.094149217,
-0.0103429332, 0.173016444, 0.0720508844,
}};
VerifyGoldens(lstm_input_, layer_norm_lstm_golden_output, &layer_norm_lstm);
}
class BaseReduceOpModel : public SingleOpModelWithNNAPI {
public:
void SetAxis(const std::vector<int>& data) { PopulateTensor(axis_, data); }
template <class T>
void SetInput(const std::vector<T>& data) {
PopulateTensor(input_, data);
}
template <class T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<float> GetDequantizedOutput() {
return Dequantize<uint8_t>(ExtractVector<uint8_t>(output_),
GetScale(output_), GetZeroPoint(output_));
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
int Input() { return input_; }
protected:
int input_;
int axis_;
int output_;
};
class MeanOpDynamicModel : public BaseReduceOpModel {
public:
MeanOpDynamicModel(const TensorData& input, const TensorData& output,
const TensorData& axis, bool keep_dims) {
input_ = AddInput(input);
axis_ = AddInput(axis);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MEAN, BuiltinOptions_ReducerOptions,
CreateReducerOptions(builder_, keep_dims).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
};
TEST(DynamicFloatMeanOpTest, NotKeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpDynamicModel m({TensorType_FLOAT32, {4, 3, 2}},
{TensorType_FLOAT32, {2}}, {TensorType_INT32, {4}},
false);
std::vector<int> axis = {1, 0, -3, -3};
m.SetAxis(axis);
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({12, 13})));
}
class MeanOpConstModel : public BaseReduceOpModel {
public:
MeanOpConstModel(const TensorData& input, const TensorData& output,
std::initializer_list<int> axis_shape,
std::initializer_list<int> axis, bool keep_dims) {
input_ = AddInput(input);
axis_ = AddConstInput(TensorType_INT32, axis, axis_shape);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_MEAN, BuiltinOptions_ReducerOptions,
CreateReducerOptions(builder_, keep_dims).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
};
TEST(NNAPIDelegate, MeanFloatNotKeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {2}},
{4}, {1, 0, -3, -3}, false);
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
EXPECT_THAT(m.GetOutput<float>(), ElementsAreArray(ArrayFloatNear({12, 13})));
}
TEST(NNAPIDelegate, MeanFloatKeepDims) {
std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0};
MeanOpConstModel m({TensorType_FLOAT32, {4, 3, 2}}, {TensorType_FLOAT32, {3}},
{2}, {0, 2}, true);
m.SetInput(data);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 3, 1}));
EXPECT_THAT(m.GetOutput<float>(),
ElementsAreArray(ArrayFloatNear({10.5, 12.5, 14.5})));
}
class BaseEmbeddingLookupOpModel : public SingleOpModelWithNNAPI {
public:
BaseEmbeddingLookupOpModel(std::initializer_list<int> index_shape,
std::initializer_list<int> weight_shape,
TensorType weight_type = TensorType_FLOAT32) {
input_ = AddInput(TensorType_INT32);
weight_ = AddInput(weight_type);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_EMBEDDING_LOOKUP, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({index_shape, weight_shape});
}
void SetInput(std::initializer_list<int> data) {
PopulateTensor(input_, data);
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int weight_;
int output_;
};
class EmbeddingLookupOpModel : public BaseEmbeddingLookupOpModel {
public:
using BaseEmbeddingLookupOpModel::BaseEmbeddingLookupOpModel;
void Set3DWeightMatrix(const std::function<float(int, int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(weight_);
int rows = tensor->dims->data[0];
int columns = tensor->dims->data[1];
int features = tensor->dims->data[2];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++) {
for (int k = 0; k < features; k++) {
tensor->data.f[(i * columns + j) * features + k] = function(i, j, k);
}
}
}
}
};
TEST(NNAPIDelegate, EmbeddingLookupSimpleTest) {
EmbeddingLookupOpModel m({3}, {3, 2, 4});
m.SetInput({1, 0, 2});
m.Set3DWeightMatrix(
[](int i, int j, int k) { return i + j / 10.0f + k / 100.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray(ArrayFloatNear({
1.00, 1.01, 1.02, 1.03, 1.10, 1.11, 1.12, 1.13,
0.00, 0.01, 0.02, 0.03, 0.10, 0.11, 0.12, 0.13,
2.00, 2.01, 2.02, 2.03, 2.10, 2.11, 2.12, 2.13,
})));
}
class HashtableLookupOpModel : public SingleOpModelWithNNAPI {
public:
HashtableLookupOpModel(std::initializer_list<int> lookup_shape,
std::initializer_list<int> key_shape,
std::initializer_list<int> value_shape,
TensorType type) {
lookup_ = AddInput(TensorType_INT32);
key_ = AddInput(TensorType_INT32);
value_ = AddInput(type);
output_ = AddOutput(type);
hit_ = AddOutput(TensorType_UINT8);
SetBuiltinOp(BuiltinOperator_HASHTABLE_LOOKUP, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({lookup_shape, key_shape, value_shape});
}
void SetLookup(std::initializer_list<int> data) {
PopulateTensor<int>(lookup_, data);
}
void SetHashtableKey(std::initializer_list<int> data) {
PopulateTensor<int>(key_, data);
}
void SetHashtableValue(const std::vector<string>& content) {
PopulateStringTensor(value_, content);
}
void SetHashtableValue(const std::function<float(int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
for (int i = 0; i < rows; i++) {
tensor->data.f[i] = function(i);
}
}
void SetHashtableValue(const std::function<float(int, int)>& function) {
TfLiteTensor* tensor = interpreter_->tensor(value_);
int rows = tensor->dims->data[0];
int features = tensor->dims->data[1];
for (int i = 0; i < rows; i++) {
for (int j = 0; j < features; j++) {
tensor->data.f[i * features + j] = function(i, j);
}
}
}
std::vector<string> GetStringOutput() {
TfLiteTensor* output = interpreter_->tensor(output_);
int num = GetStringCount(output);
std::vector<string> result(num);
for (int i = 0; i < num; i++) {
auto ref = GetString(output, i);
result[i] = string(ref.str, ref.len);
}
return result;
}
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
std::vector<uint8_t> GetHit() { return ExtractVector<uint8_t>(hit_); }
private:
int lookup_;
int key_;
int value_;
int output_;
int hit_;
};
TEST(NNAPIDelegate, HashtableLookupTest2DInput) {
HashtableLookupOpModel m({4}, {3}, {3, 2}, TensorType_FLOAT32);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue([](int i, int j) { return i + j / 10.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
2.0, 2.1,
0, 0,
0.0, 0.1,
1.0, 1.1,
})));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
TEST(NNAPIDelegate, HashtableLookupTest1DInput) {
HashtableLookupOpModel m({4}, {3}, {3}, TensorType_FLOAT32);
m.SetLookup({1234, -292, -11, 0});
m.SetHashtableKey({-11, 0, 1234});
m.SetHashtableValue([](int i) { return i * i / 10.0f; });
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear({
0.4,
0,
0.0,
0.1,
})));
EXPECT_THAT(m.GetHit(), ElementsAreArray({
1,
0,
1,
1,
}));
}
class PReluOpModel : public SingleOpModelWithNNAPI {
public:
PReluOpModel(const TensorData& input, const TensorData& alpha)
: input_type_(input.type) {
input_ = AddInput(input);
alpha_ = AddInput(alpha);
output_ = AddOutput({input.type, input.shape, input.min, input.max});
SetBuiltinOp(BuiltinOperator_PRELU, BuiltinOptions_NONE, 0);
BuildInterpreterWithNNAPI({GetShape(input_), GetShape(alpha_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
void SetAlpha(std::initializer_list<float> data) {
SetData(alpha_, input_type_, data);
}
std::vector<float> GetOutput() {
std::vector<float> output;
GetData(output_, input_type_, &output);
return output;
}
protected:
int input_;
int alpha_;
int output_;
const TensorType input_type_;
};
TEST(NNAPIDelegate, PReluFloat) {
PReluOpModel m({TensorType_FLOAT32, {1, 2, 2, 3}},
{TensorType_FLOAT32, {1, 1, 3}});
m.SetInput({
0.0f, 0.0f, 0.0f,
1.0f, 1.0f, 1.0f,
-1.0f, -1.0f, -1.0f,
-2.0f, -2.0f, -2.0f,
});
m.SetAlpha({0.0f, 1.0f, 2.0f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0.0f, 0.0f, 0.0f,
1.0f, 1.0f, 1.0f,
0.0f, -1.0f, -2.0f,
0.0f, -2.0f, -4.0f,
}));
}
TEST(NNAPIDelegate, PReluQuantized) {
const float kMin = -1;
const float kMax = 127.f / 128.f;
PReluOpModel m({TensorType_UINT8, {1, 2, 2, 3}, kMin, kMax},
{TensorType_UINT8, {1, 1, 3}, kMin, kMax});
m.SetInput({
0.0f, 0.0f, 0.0f,
0.5f, 0.5f, 0.5f,
-1.0f, -1.0f, -1.0f,
-0.25f, -0.25f, -0.25f,
});
m.SetAlpha({0.0f, 0.5f, -0.5f});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
0.0f, 0.0f, 0.0f,
0.5f, 0.5f, 0.5f,
0.0f, -0.5f, 0.5f,
0.0f, -0.125f, 0.125f,
},
kQuantizedTolerance)));
}
template <typename T1>
class PadV2OpConstModel : public PadOpModel<T1> {
public:
PadV2OpConstModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings, T1 constant_values,
const TensorData& output) {
this->input_ = this->AddInput(input);
this->paddings_ =
this->AddConstInput(TensorType_INT32, paddings, paddings_shape);
this->constant_values_ =
this->AddConstInput(GetTensorType<T1>(), {constant_values}, {1});
this->output_ = this->AddOutput(output);
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
CreatePadV2Options(this->builder_).Union());
this->BuildInterpreterWithNNAPI({input.shape});
}
PadV2OpConstModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
std::initializer_list<int> paddings,
const TensorData& constant_values,
const TensorData& output) {
this->input_ = this->AddInput(input);
this->paddings_ =
this->AddConstInput(TensorType_INT32, paddings, paddings_shape);
this->constant_values_ = this->AddInput(constant_values);
this->output_ = this->AddOutput(output);
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
CreatePadV2Options(this->builder_).Union());
this->BuildInterpreterWithNNAPI({input.shape});
}
};
template <typename RegularInputOutput>
class PadV2OpDynamicModel : public PadOpModel<RegularInputOutput> {
public:
PadV2OpDynamicModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
RegularInputOutput constant_values,
const TensorData& output) {
this->input_ = this->AddInput(input);
this->paddings_ = this->AddInput(TensorType_INT32);
this->constant_values_ = this->AddConstInput(
GetTensorType<RegularInputOutput>(), {constant_values}, {1});
this->output_ = this->AddOutput(output);
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
CreatePadV2Options(this->builder_).Union());
this->BuildInterpreterWithNNAPI({input.shape, paddings_shape});
}
PadV2OpDynamicModel(const TensorData& input,
std::initializer_list<int> paddings_shape,
const TensorData& constant_values,
const TensorData& output) {
this->input_ = this->AddInput(input);
this->paddings_ = this->AddInput(TensorType_INT32);
this->constant_values_ = this->AddInput(constant_values);
this->output_ = this->AddOutput(output);
this->SetBuiltinOp(BuiltinOperator_PADV2, BuiltinOptions_PadV2Options,
CreatePadV2Options(this->builder_).Union());
this->BuildInterpreterWithNNAPI({input.shape, paddings_shape});
}
};
TEST(PadV2OpTest, SimpleConstTest) {
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2},
{0, 0, 1, 1, 1, 1, 0, 0}, 0.0,
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadV2OpTest, SimpleConstFloat32ValuedTestUint8) {
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2},
{0, 0, 1, 1, 1, 1, 0, 0}, 5, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
5, 5, 5, 5, 5}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadV2OpTest, Simple4DConstFloat32ValuedTest) {
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 1, 2, 1}}, {4, 2},
{0, 1, 0, 0, 0, 0, 0, 1}, 5, {TensorType_FLOAT32});
m.SetInput({3, 3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({3, 5, 3, 5, 5, 5, 5, 5}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2, 1, 2, 2}));
}
TEST(PadV2OpTest, SimpleDynamicTest) {
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2}, 0.0,
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4});
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0, 0, 0, 0, 0, 1, 2, 0, 0, 3, 4,
0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadV2OpTest, SimpleDynamicValuedTest) {
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 2, 1}}, {4, 2}, 5,
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4});
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({5, 5, 5, 5, 5, 1, 2, 5, 5, 3, 4,
5, 5, 5, 5, 5}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(PadV2OpTest, AdvancedConstTest) {
PadV2OpConstModel<float> m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2},
{0, 0, 0, 2, 1, 3, 0, 0}, 0, {TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(PadV2OpTest, AdvancedDynamicTest) {
PadV2OpDynamicModel<float> m({TensorType_FLOAT32, {1, 2, 3, 1}}, {4, 2}, 0,
{TensorType_FLOAT32});
m.SetInput({1, 2, 3, 4, 5, 6});
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0, 1, 2, 3, 0, 0, 0, 0, 4, 5, 6, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
std::vector<testing::Matcher<float>> DequantizedArrayNear(
const std::vector<float>& values, const float min, const float max) {
const float quantization_tolerance = (max - min) / 255.0;
return ArrayFloatNear(values, quantization_tolerance);
}
template <typename integer_type, TensorType tensor_dtype>
void SimpleConstTestV2() {
PadV2OpConstModel<integer_type> m(
{tensor_dtype, {1, 2, 2, 1}, -1.0, 1.0}, {4, 2}, {0, 0, 1, 1, 1, 1, 0, 0},
{tensor_dtype, {1}, -1.0, 1.0}, {tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7});
m.template SetQuantizedPadValue<integer_type>(0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8SimpleConstTest) {
SimpleConstTestV2<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8SimpleConstTest) {
SimpleConstTestV2<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void SimpleDynamicTestV2() {
PadV2OpDynamicModel<integer_type> m({tensor_dtype, {1, 2, 2, 1}, -1.0, 1.0},
{4, 2}, {tensor_dtype, {1}, -1.0, 1.0},
{tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7});
m.template SetQuantizedPadValue<integer_type>(0);
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{0, 0, 0, 0, 0, -0.8, 0.2, 0, 0, 0.9, 0.7, 0, 0, 0, 0, 0},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8SimpleDynamicTest) {
SimpleDynamicTestV2<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8SimpleDynamicTest) {
SimpleDynamicTestV2<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void AdvancedConstTestV2() {
PadV2OpConstModel<integer_type> m(
{tensor_dtype, {1, 2, 3, 1}, -1.0, 1.0}, {4, 2}, {0, 0, 0, 2, 1, 3, 0, 0},
{tensor_dtype, {1}, -1.0, 1.0}, {tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.template SetQuantizedPadValue<integer_type>(0);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8AdvancedConstTest) {
AdvancedConstTestV2<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8AdvancedConstTest) {
AdvancedConstTestV2<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void AdvancedDynamicTestV2() {
PadV2OpDynamicModel<integer_type> m({tensor_dtype, {1, 2, 3, 1}, -1.0, 1.0},
{4, 2}, {tensor_dtype, {1}, -1.0, 1.0},
{tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.template SetQuantizedPadValue<integer_type>(0);
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{0, -0.8, 0.2, 0.9, 0, 0, 0, 0, 0.7, 0.1, -0.3, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8AdvancedDynamicTest) {
AdvancedDynamicTestV2<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8AdvancedDynamicTest) {
AdvancedDynamicTestV2<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void SimpleConstValuedTest() {
PadV2OpConstModel<integer_type> m(
{tensor_dtype, {1, 2, 2, 1}, -1.0, 1.0}, {4, 2}, {0, 0, 1, 1, 1, 1, 0, 0},
{tensor_dtype, {1}, -1.0, 1.0}, {tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7});
m.template SetQuantizedPadValue<integer_type>(-0.5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{-0.5, -0.5, -0.5, -0.5, -0.5, -0.8, 0.2, -0.5, -0.5, 0.9,
0.7, -0.5, -0.5, -0.5, -0.5, -0.5},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8SimpleConstValuedTest) {
SimpleConstValuedTest<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8SimpleConstValuedTest) {
SimpleConstValuedTest<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void SimpleDynamicValuedTest() {
PadV2OpDynamicModel<integer_type> m({tensor_dtype, {1, 2, 2, 1}, -1.0, 1.0},
{4, 2}, {tensor_dtype, {1}, -1.0, 1.0},
{tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7});
m.template SetQuantizedPadValue<integer_type>(-0.5);
m.SetPaddings({0, 0, 1, 1, 1, 1, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{-0.5, -0.5, -0.5, -0.5, -0.5, -0.8, 0.2, -0.5, -0.5, 0.9,
0.7, -0.5, -0.5, -0.5, -0.5, -0.5},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 4, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8SimpleDynamicValuedTest) {
SimpleDynamicValuedTest<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8SimpleDynamicValuedTest) {
SimpleDynamicValuedTest<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void AdvancedConstValuedTest() {
PadV2OpConstModel<integer_type> m(
{tensor_dtype, {1, 2, 3, 1}, -1.0, 1.0}, {4, 2}, {0, 0, 0, 2, 1, 3, 0, 0},
{tensor_dtype, {1}, -1.0, 1.0}, {tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.template SetQuantizedPadValue<integer_type>(-0.5);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{-0.5, -0.8, 0.2, 0.9, -0.5, -0.5, -0.5, -0.5, 0.7, 0.1,
-0.3, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8AdvancedConstValuedTest) {
AdvancedConstValuedTest<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8AdvancedConstValuedTest) {
AdvancedConstValuedTest<int8_t, TensorType_INT8>();
}
template <typename integer_type, TensorType tensor_dtype>
void AdvancedDynamicValuedTest() {
PadV2OpDynamicModel<integer_type> m({tensor_dtype, {1, 2, 3, 1}, -1.0, 1.0},
{4, 2}, {tensor_dtype, {1}, -1.0, 1.0},
{tensor_dtype, {}, -1.0, 1.0});
m.template SetQuantizedInput<integer_type>({-0.8, 0.2, 0.9, 0.7, 0.1, -0.3});
m.template SetQuantizedPadValue<integer_type>(-0.5);
m.SetPaddings({0, 0, 0, 2, 1, 3, 0, 0});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.template GetDequantizedOutput<integer_type>(),
ElementsAreArray(DequantizedArrayNear(
{-0.5, -0.8, 0.2, 0.9, -0.5, -0.5, -0.5, -0.5, 0.7, 0.1,
-0.3, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5,
-0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5, -0.5},
-1.0, 1.0)));
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({1, 4, 7, 1}));
}
TEST(QuantizedPadV2OpTest, UInt8AdvancedDynamicValuedTest) {
AdvancedDynamicValuedTest<uint8_t, TensorType_UINT8>();
}
TEST(QuantizedPadV2OpTest, Int8AdvancedDynamicValuedTest) {
AdvancedDynamicValuedTest<int8_t, TensorType_INT8>();
}
class LeakyReluOpModel : public SingleOpModelWithNNAPI {
public:
LeakyReluOpModel(const TensorData& input, const float alpha)
: input_type_(input.type) {
input_ = AddInput(input);
output_ = AddOutput({input.type, input.shape, input.min, input.max});
SetBuiltinOp(BuiltinOperator_LEAKY_RELU, BuiltinOptions_LeakyReluOptions,
CreateLeakyReluOptions(builder_, alpha).Union());
BuildInterpreterWithNNAPI({GetShape(input_)});
}
void SetInput(std::initializer_list<float> data) {
SetData(input_, input_type_, data);
}
std::vector<float> GetOutput() {
std::vector<float> output;
GetData(output_, input_type_, &output);
return output;
}
protected:
int input_;
int output_;
const TensorType input_type_;
};
TEST(NNAPIDelegate, LeakyReluFloat) {
LeakyReluOpModel m({TensorType_FLOAT32, {2, 3}}, 0.5f);
m.SetInput({
0.0f, 1.0f, 3.0f,
1.0f, -1.0f, -2.0f,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({
0.0f, 1.0f, 3.0f,
1.0f, -0.5f, -1.0f,
}));
}
TEST(NNAPIDelegate, LeakyReluQuantized) {
const float kMin = -1;
const float kMax = 127.f / 128.f;
LeakyReluOpModel m({TensorType_UINT8, {2, 3}, 8 * kMin, 8 * kMax}, 0.5f);
m.SetInput({
0.0f, 1.0f, 3.0f,
1.0f, -1.0f, -2.0f,
});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray(ArrayFloatNear(
{
0.0f, 1.0f, 3.0f,
1.0f, -0.5f, -1.0f,
},
kQuantizedTolerance)));
}
}
namespace ops {
namespace builtin {
TfLiteRegistration* Register_FLOOR();
}
}
namespace {
std::vector<uint32_t> GetNNAPIDimensions(const TfLiteTensor* tensor) {
std::vector<uint32_t> dimensions;
dimensions.reserve(tensor->dims->size);
if (tensor->dims_signature != nullptr &&
tensor->dims_signature->size == tensor->dims->size) {
for (auto d : TfLiteIntArrayView(tensor->dims_signature)) {
uint32_t nnapi_dim = (d == -1) ? 0 : static_cast<uint32_t>(d);
dimensions.push_back(nnapi_dim);
}
} else {
dimensions.assign(tensor->dims->data,
tensor->dims->data + tensor->dims->size);
}
return dimensions;
}
static const char kTestCustomOp[] = "nnapi-custom-op";
class NnapiTestVendorPlugin : public NnapiDelegateVendorPlugin {
public:
NnapiTestVendorPlugin() {
ValidateNode = DoValidateNode;
MapNode = DoMapNode;
ConfigureCompilationHints = DoConfigureCompilationHints;
ConfigureExecutionHints = DoConfigureExecutionHints;
}
static bool DoValidateNode(const TfLiteContext* context,
const TfLiteRegistration* registration,
const TfLiteNode* node) {
if (strcmp(kTestCustomOp, registration->custom_name) != 0) {
return false;
}
if (node->inputs->size != 1 || node->outputs->size != 1) {
return false;
}
if (context->tensors[node->inputs->data[(0)]].type != kTfLiteFloat32 ||
context->tensors[node->outputs->data[(0)]].type != kTfLiteFloat32) {
return false;
}
return true;
}
static TfLiteStatus AddFloat32Tensor(const TfLiteContext* context,
int tensor_index,
NnapiMappingUtilCInterface* mapping,
std::vector<uint32_t>* indices,
ANeuralNetworksModel* model) {
int ann_tensor_index = mapping->TfLiteIndexToNnIndex(mapping, tensor_index);
if (ann_tensor_index != -1) {
indices->push_back(ann_tensor_index);
return kTfLiteOk;
}
ann_tensor_index = mapping->AddNewNnTensorIndex(mapping, tensor_index);
TfLiteTensor* tensor = &context->tensors[tensor_index];
auto dimensions = GetNNAPIDimensions(tensor);
ANeuralNetworksOperandType operand_type{
.type = ANEURALNETWORKS_TENSOR_FLOAT32,
.dimensionCount = static_cast<uint32_t>(dimensions.size()),
.dimensions = dimensions.data(),
.scale = 0.0f,
.zeroPoint = 0,
};
EXPECT_EQ(NnApiImplementation()->ANeuralNetworksModel_addOperand(
model, &operand_type),
ANEURALNETWORKS_NO_ERROR);
if (tensor->allocation_type == kTfLiteMmapRo) {
EXPECT_EQ(NnApiImplementation()->ANeuralNetworksModel_setOperandValue(
model, ann_tensor_index, tensor->data.data, tensor->bytes),
ANEURALNETWORKS_NO_ERROR);
}
indices->push_back(ann_tensor_index);
return kTfLiteOk;
}
static TfLiteStatus DoMapNode(TfLiteContext* context, const TfLiteNode* node,
int node_index,
NnapiMappingUtilCInterface* mapping,
ANeuralNetworksModel* model) {
std::vector<uint32_t> input_indices;
std::vector<uint32_t> output_indices;
for (int input_pos = 0; input_pos < node->inputs->size; ++input_pos) {
const auto input_index = node->inputs->data[input_pos];
EXPECT_EQ(AddFloat32Tensor(context, input_index, mapping, &input_indices,
model),
kTfLiteOk);
}
for (int output_pos = 0; output_pos < node->outputs->size; ++output_pos) {
const auto output_index = node->outputs->data[output_pos];
EXPECT_EQ(AddFloat32Tensor(context, output_index, mapping,
&output_indices, model),
kTfLiteOk);
}
EXPECT_EQ(
NnApiImplementation()->ANeuralNetworksModel_addOperation(
model, ANEURALNETWORKS_FLOOR,
static_cast<uint32_t>(input_indices.size()), input_indices.data(),
static_cast<uint32_t>(output_indices.size()),
output_indices.data()),
ANEURALNETWORKS_NO_ERROR);
mapping->AddNnapiToTfliteOpMapping(mapping, node_index);
return kTfLiteOk;
}
static TfLiteStatus DoConfigureCompilationHints(
const char* compilation_hints, ANeuralNetworksCompilation* compilation) {
return kTfLiteOk;
}
static TfLiteStatus DoConfigureExecutionHints(
const char* execution_hints, ANeuralNetworksExecution* execution) {
return kTfLiteOk;
}
};
class CustomFloorOpModel : public SingleOpModelWithNNAPI {
public:
CustomFloorOpModel(const StatefulNnApiDelegate::Options& options,
const TensorData& input, const TensorData& output,
bool allow_fp32_relax_to_fp16 = false,
bool apply_delegate = true)
: SingleOpModelWithNNAPI(options) {
Init(input, output, allow_fp32_relax_to_fp16, apply_delegate);
}
int input() { return input_; }
int output() { return output_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input_;
int output_;
private:
void Init(const TensorData& input, const TensorData& output,
bool allow_fp32_relax_to_fp16 = false, bool apply_delegate = true) {
input_ = AddInput(input);
output_ = AddOutput(output);
SetCustomOp(kTestCustomOp, {}, tflite::ops::builtin::Register_FLOOR);
BuildInterpreterWithNNAPI({GetShape(input_)}, allow_fp32_relax_to_fp16,
apply_delegate);
}
};
TEST(NNAPIDelegate, CustomFloorVendorExtension) {
auto vendor_plugin = std::make_unique<NnapiTestVendorPlugin>();
StatefulNnApiDelegate::Options options;
options.accelerator_name = "nnapi-reference";
options.vendor_plugin = vendor_plugin.get();
CustomFloorOpModel m(options, {TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}});
m.PopulateTensor<float>(m.input(), {0, 0.2, 1.7, 2.8});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({0.0, 0.0, 1.0, 2.0}));
}
TEST(NNAPIDelegate, DISABLED_CustomFloorVendorExtensionDynamic) {
if (NnApiImplementation()->android_sdk_version <
delegate::nnapi::kMinSdkVersionForNNAPI12) {
GTEST_SKIP();
}
auto vendor_plugin = std::make_unique<NnapiTestVendorPlugin>();
StatefulNnApiDelegate::Options options;
options.accelerator_name = "nnapi-reference";
options.vendor_plugin = vendor_plugin.get();
options.allow_dynamic_dimensions = true;
auto tensor_data = TensorData{TensorType_FLOAT32,
{1, 2, 2, 1},
0.0f,
0.0f,
0.0f,
0,
false,
{},
{},
0,
{},
{},
{},
{},
{-1, 2, 2, 1}};
size_t max_batch_size = 2;
size_t tensor_max_size = max_batch_size * 2 * 2 * 1 * sizeof(float);
CustomFloorOpModel m(options, tensor_data, tensor_data,
false,
false);
m.SetTensorMaxSize(m.input(), tensor_max_size);
m.SetTensorMaxSize(m.output(), tensor_max_size);
m.ApplyNNAPIDelegate();
EXPECT_EQ(m.ResizeInputTensor(m.input(), {2, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input(), {0, 0.2, 1.7, 2.8, 3.4, 4.1, 5.9, 6.3});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(),
ElementsAreArray({0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0}));
EXPECT_EQ(m.ResizeInputTensor(m.input(), {1, 2, 2, 1}), kTfLiteOk);
EXPECT_EQ(m.AllocateTensors(), kTfLiteOk);
m.PopulateTensor<float>(m.input(), {1.7, 2.8, 3.4, 4.1});
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput(), ElementsAreArray({1.0, 2.0, 3.0, 4.0}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/nnapi_delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/nnapi/nnapi_delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee3c1432-be08-420c-8fd2-095a457add68 | cpp | google/cel-cpp | names | internal/names.cc | internal/names_test.cc | #include "internal/names.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "internal/lexis.h"
namespace cel::internal {
bool IsValidRelativeName(absl::string_view name) {
if (name.empty()) {
return false;
}
for (const auto& id : absl::StrSplit(name, '.')) {
if (!LexisIsIdentifier(id)) {
return false;
}
}
return true;
}
} | #include "internal/names.h"
#include "internal/testing.h"
namespace cel::internal {
namespace {
struct NamesTestCase final {
absl::string_view text;
bool ok;
};
using IsValidRelativeNameTest = testing::TestWithParam<NamesTestCase>;
TEST_P(IsValidRelativeNameTest, Compliance) {
const NamesTestCase& test_case = GetParam();
if (test_case.ok) {
EXPECT_TRUE(IsValidRelativeName(test_case.text));
} else {
EXPECT_FALSE(IsValidRelativeName(test_case.text));
}
}
INSTANTIATE_TEST_SUITE_P(IsValidRelativeNameTest, IsValidRelativeNameTest,
testing::ValuesIn<NamesTestCase>({{"foo", true},
{"foo.Bar", true},
{"", false},
{".", false},
{".foo", false},
{".foo.Bar", false},
{"foo..Bar", false},
{"foo.Bar.",
false}}));
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/names.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/names_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
15e3231c-49f1-4370-965b-db6587959c9c | cpp | tensorflow/tensorflow | fingerprinting_utils | tensorflow/cc/saved_model/fingerprinting_utils.cc | tensorflow/cc/saved_model/fingerprinting_utils_test.cc | #include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/records/record_reader.h"
#include "tensorflow/cc/saved_model/constants.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/file_system_helper.h"
#include "tensorflow/core/platform/fingerprint.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_model.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/core/util/tensor_bundle/naming.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/merge.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace tensorflow::saved_model::fingerprinting {
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using tools::proto_splitter::Field;
using tools::proto_splitter::FieldType;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetFieldTypes;
using tools::proto_splitter::GetMutableField;
using tools::proto_splitter::GetRiegeliReader;
using tools::proto_splitter::Merger;
using tools::proto_splitter::MutableFieldResult;
using tools::proto_splitter::ReadChunk;
namespace fingerprinting_utils_internal {
using ::tensorflow::protobuf::Map;
using ::tensorflow::protobuf::Message;
using ::tensorflow::protobuf::RepeatedPtrField;
using ::tensorflow::protobuf::io::CodedOutputStream;
using ::tensorflow::protobuf::io::StringOutputStream;
absl::StatusOr<int> fieldTagMatches(const RepeatedPtrField<FieldIndex>& a,
const RepeatedPtrField<FieldIndex>& b) {
int matches = 0;
for (int i = 0; i == matches && i < a.size() && i < b.size(); i++) {
switch (b[i].kind_case()) {
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kField:
if (a.at(i).has_field() && a.at(i).field() == b.at(i).field()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kIndex:
if (a.at(i).has_index() && a.at(i).index() == b.at(i).index()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::KindCase::kMapKey:
if (a.at(i).has_map_key()) {
const ::tensorflow::proto_splitter::FieldIndex_MapKey& key =
b.at(i).map_key();
const ::tensorflow::proto_splitter::FieldIndex_MapKey& chunked_key =
a.at(i).map_key();
switch (key.type_case()) {
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::kS:
if (chunked_key.has_s() && chunked_key.s() == key.s()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kBoolean:
if (chunked_key.has_boolean() &&
chunked_key.boolean() == key.boolean()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kUi32:
if (chunked_key.has_ui32() && chunked_key.ui32() == key.ui32()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kUi64:
if (chunked_key.has_ui64() && chunked_key.ui64() == key.ui64()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kI32:
if (chunked_key.has_i32() && chunked_key.i32() == key.i32()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
kI64:
if (chunked_key.has_i64() && chunked_key.i64() == key.i64()) {
matches += 1;
}
break;
case ::tensorflow::proto_splitter::FieldIndex::MapKey::TypeCase::
TYPE_NOT_SET:
default:
return absl::FailedPreconditionError(
"Encountered unknown field_tag.map_key type.");
}
}
break;
case FieldIndex::KindCase::KIND_NOT_SET:
default:
return absl::FailedPreconditionError(
"Encountered unknown field_tag kind.");
}
}
return matches;
}
absl::StatusOr<::tensorflow::proto_splitter::ChunkedMessage>
PruneChunkedMessage(
const ::tensorflow::proto_splitter::ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
std::vector<ChunkInfo> chunks_info,
std::vector<RepeatedPtrField<FieldIndex>> target_fields_list) {
::tensorflow::proto_splitter::ChunkedMessage pruned_chunked_message;
if (chunked_message.has_chunk_index()) {
pruned_chunked_message.set_chunk_index(chunked_message.chunk_index());
}
for (const ChunkedField& chunked_field : chunked_message.chunked_fields()) {
for (const auto& target_fields : target_fields_list) {
TF_ASSIGN_OR_RETURN(
int matches,
fieldTagMatches(chunked_field.field_tag(), target_fields));
if (matches == chunked_field.field_tag_size()) {
auto cf = std::make_unique<proto_splitter::ChunkedField>();
cf->mutable_field_tag()->CopyFrom(chunked_field.field_tag());
TF_ASSIGN_OR_RETURN(
*cf->mutable_message(),
PruneChunkedMessage(chunked_field.message(), reader, chunks_info,
target_fields_list));
pruned_chunked_message.mutable_chunked_fields()->AddAllocated(
cf.release());
}
}
}
return pruned_chunked_message;
}
std::string SerializeProto(const Message& message) {
std::string serialized_message;
{
StringOutputStream stream(&serialized_message);
CodedOutputStream output(&stream);
output.SetSerializationDeterministic(true);
message.SerializeToCodedStream(&output);
}
return serialized_message;
}
absl::StatusOr<uint64_t> HashFields(
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
const RepeatedPtrField<FieldIndex>& field_tags, Message* merged_message) {
uint64_t field_checksum = 0;
for (const ChunkedField& chunked_field : chunked_message.chunked_fields()) {
const RepeatedPtrField<FieldIndex> chunked_field_tags =
chunked_field.field_tag();
const ChunkedMessage& chunked_message = chunked_field.message();
TF_ASSIGN_OR_RETURN(int matches,
fieldTagMatches(chunked_field_tags, field_tags));
if (chunked_message.has_chunk_index() && matches == field_tags.size()) {
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
field_checksum = FingerprintCat64(field_checksum, Fingerprint64(chunk));
} else if (matches == field_tags.size()) {
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(chunked_message, reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
} else if (chunked_message.has_chunk_index() &&
matches == chunked_field_tags.size()) {
TF_ASSIGN_OR_RETURN(std::vector<Field> fields,
GetFieldTypes(chunked_field_tags));
for (const auto& field : fields) {
TF_ASSIGN_OR_RETURN(MutableFieldResult mfr,
GetMutableField(merged_message, field));
merged_message =
mfr.parent->GetReflection()->MutableMessage(mfr.parent, mfr.field);
}
TF_ASSIGN_OR_RETURN(
std::string chunk,
ReadChunk(reader, chunks_info[chunked_message.chunk_index()]));
merged_message->ParseFromString(chunk);
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(chunked_message, reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
} else if (matches == chunked_field_tags.size()) {
for (const ChunkedField& cf : chunked_message.chunked_fields()) {
TF_ASSIGN_OR_RETURN(uint64_t hash,
HashFields(cf.message(), reader, chunks_info,
field_tags, merged_message));
field_checksum = FingerprintCat64(field_checksum, hash);
}
}
}
return field_checksum;
}
inline RepeatedPtrField<FieldIndex> GraphDefFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex graph_def_field_tag;
graph_def_field_tag.set_field(2);
RepeatedPtrField<FieldIndex> graph_def_field_tags;
graph_def_field_tags.Add(FieldIndex(meta_graph_field_tag));
graph_def_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
graph_def_field_tags.Add(FieldIndex(graph_def_field_tag));
return graph_def_field_tags;
}
inline RepeatedPtrField<FieldIndex> SignatureDefFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex signature_def_field_tag;
signature_def_field_tag.set_field(5);
RepeatedPtrField<FieldIndex> signature_def_field_tags;
signature_def_field_tags.Add(FieldIndex(meta_graph_field_tag));
signature_def_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
signature_def_field_tags.Add(FieldIndex(signature_def_field_tag));
return signature_def_field_tags;
}
inline RepeatedPtrField<FieldIndex> SavedObjectGraphFieldTags() {
FieldIndex meta_graph_field_tag;
meta_graph_field_tag.set_field(2);
FieldIndex meta_graph_index_field_tag;
meta_graph_index_field_tag.set_index(0);
FieldIndex saved_object_graph_field_tag;
saved_object_graph_field_tag.set_field(7);
RepeatedPtrField<FieldIndex> saved_object_graph_field_tags;
saved_object_graph_field_tags.Add(FieldIndex(meta_graph_field_tag));
saved_object_graph_field_tags.Add(FieldIndex(meta_graph_index_field_tag));
saved_object_graph_field_tags.Add(FieldIndex(saved_object_graph_field_tag));
return saved_object_graph_field_tags;
}
absl::StatusOr<SavedModel> PrunedSavedModel(
absl::string_view export_dir,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info, ChunkMetadata& chunk_metadata) {
SavedModel saved_model;
ChunkMetadata pruned_chunk_metadata;
pruned_chunk_metadata.mutable_chunks()->CopyFrom(chunk_metadata.chunks());
TF_ASSIGN_OR_RETURN(
*pruned_chunk_metadata.mutable_message(),
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{GraphDefFieldTags(), SignatureDefFieldTags(),
SavedObjectGraphFieldTags()}));
TF_RETURN_IF_ERROR(
Merger::ReadPartial(io::JoinPath(export_dir, kSavedModelFilenamePrefix),
pruned_chunk_metadata, &saved_model));
return saved_model;
}
absl::StatusOr<uint64_t> HashMessage(
Message* message, const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info,
const RepeatedPtrField<FieldIndex>& field_tags) {
uint64_t total_message_hash = Fingerprint64(SerializeProto(*message));
TF_ASSIGN_OR_RETURN(
uint64_t message_hash,
HashFields(chunked_message, reader, chunks_info, field_tags, message));
return FingerprintCat64(total_message_hash, message_hash);
}
absl::StatusOr<uint64_t> HashGraphDef(
::tensorflow::GraphDef* graph_def, const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
return HashMessage(graph_def, chunked_message, reader, chunks_info,
GraphDefFieldTags());
}
absl::StatusOr<uint64_t> HashSignatureDef(
const Map<std::string, ::tensorflow::SignatureDef>& signature_def_map,
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
uint64_t signature_def_hash = 0;
std::vector<std::pair<std::string, ::tensorflow::SignatureDef>>
signature_def_sorted(signature_def_map.begin(), signature_def_map.end());
std::sort(signature_def_sorted.begin(), signature_def_sorted.end(),
[](const std::pair<std::string, ::tensorflow::SignatureDef>& a,
const std::pair<std::string, ::tensorflow::SignatureDef>& b) {
return a.first < b.first;
});
for (const auto& signature_def : signature_def_sorted) {
uint64_t signature_def_pair_hash =
FingerprintCat64(Fingerprint64(signature_def.first),
Fingerprint64(SerializeProto(signature_def.second)));
signature_def_hash =
FingerprintCat64(signature_def_hash, signature_def_pair_hash);
SignatureDef signature_def_val = signature_def.second;
TF_ASSIGN_OR_RETURN(
uint64_t signature_def_entry_hash,
HashFields(chunked_message, reader, chunks_info,
SignatureDefFieldTags(), &signature_def_val));
signature_def_hash =
FingerprintCat64(signature_def_hash, signature_def_entry_hash);
}
return signature_def_hash;
}
absl::StatusOr<uint64_t> HashSavedObjectGraph(
::tensorflow::SavedObjectGraph* saved_object_graph,
const ChunkedMessage& chunked_message,
riegeli::RecordReader<riegeli::FdReader<>>& reader,
const std::vector<ChunkInfo>& chunks_info) {
return HashMessage(saved_object_graph, chunked_message, reader, chunks_info,
SavedObjectGraphFieldTags());
}
}
using fingerprinting_utils_internal::HashFields;
using fingerprinting_utils_internal::HashGraphDef;
using fingerprinting_utils_internal::HashSavedObjectGraph;
using fingerprinting_utils_internal::HashSignatureDef;
using fingerprinting_utils_internal::PrunedSavedModel;
using fingerprinting_utils_internal::SerializeProto;
uint64_t HashCheckpointIndexFile(absl::string_view model_dir) {
std::string meta_filename = MetaFilename(io::JoinPath(
model_dir, kSavedModelVariablesDirectory, kSavedModelVariablesFilename));
std::string data;
absl::Status read_status =
ReadFileToString(Env::Default(), meta_filename, &data);
if (read_status.ok()) {
return tensorflow::Fingerprint64(data);
} else {
return 0;
}
}
absl::StatusOr<FingerprintDef> CreateFingerprintDefCpb(
absl::string_view export_dir, std::string cpb_file) {
const int kFingerprintProducer = 2;
TF_ASSIGN_OR_RETURN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
return absl::FailedPreconditionError(
absl::StrCat("Couldn't read ChunkMetadata from chunked proto.\n",
read_metadata.status().ToString()));
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FingerprintDef fingerprint_def;
SavedModel saved_model;
TF_ASSIGN_OR_RETURN(uint64_t saved_model_hash,
HashFields(chunk_metadata.message(), reader, chunks_info,
{}, &saved_model));
saved_model_hash = FingerprintCat64(
saved_model_hash, Fingerprint64(SerializeProto(saved_model)));
fingerprint_def.set_saved_model_checksum(saved_model_hash);
TF_ASSIGN_OR_RETURN(
saved_model,
PrunedSavedModel(export_dir, reader, chunks_info, chunk_metadata));
TF_ASSIGN_OR_RETURN(
uint64_t graph_def_program_hash,
HashGraphDef(saved_model.mutable_meta_graphs(0)->mutable_graph_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_graph_def_program_hash(graph_def_program_hash);
TF_ASSIGN_OR_RETURN(
uint64_t signature_def_hash,
HashSignatureDef(saved_model.meta_graphs(0).signature_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_signature_def_hash(signature_def_hash);
TF_ASSIGN_OR_RETURN(
uint64_t saved_object_graph_hash,
HashSavedObjectGraph(
saved_model.mutable_meta_graphs(0)->mutable_object_graph_def(),
chunk_metadata.message(), reader, chunks_info));
fingerprint_def.set_saved_object_graph_hash(saved_object_graph_hash);
fingerprint_def.set_checkpoint_hash(HashCheckpointIndexFile(export_dir));
reader.Close();
VersionDef* version = fingerprint_def.mutable_version();
version->set_producer(kFingerprintProducer);
return fingerprint_def;
}
} | #include "tensorflow/cc/saved_model/fingerprinting_utils.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saved_object_graph.pb.h"
#include "tensorflow/tools/proto_splitter/cc/util.h"
#include "tensorflow/tools/proto_splitter/chunk.pb.h"
#include "tensorflow/tools/proto_splitter/testdata/test_message.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace tensorflow::saved_model::fingerprinting {
namespace {
using fingerprinting_utils_internal::fieldTagMatches;
using fingerprinting_utils_internal::HashFields;
using fingerprinting_utils_internal::HashGraphDef;
using fingerprinting_utils_internal::HashSavedObjectGraph;
using fingerprinting_utils_internal::HashSignatureDef;
using fingerprinting_utils_internal::PruneChunkedMessage;
using fingerprinting_utils_internal::SerializeProto;
using ::tensorflow::proto_splitter::ChunkedField;
using ::tensorflow::proto_splitter::ChunkedMessage;
using ::tensorflow::proto_splitter::ChunkInfo;
using ::tensorflow::proto_splitter::ChunkMetadata;
using ::tensorflow::proto_splitter::FieldIndex;
using ::tensorflow::proto_splitter_testdata::ManyFields;
using ::tensorflow::protobuf::Message;
using ::tensorflow::protobuf::RepeatedPtrField;
using ::tensorflow::protobuf::TextFormat;
using ::tensorflow::protobuf::io::ArrayInputStream;
using ::tensorflow::protobuf::util::MessageDifferencer;
using tools::proto_splitter::GetChunkMetadata;
using tools::proto_splitter::GetRiegeliReader;
using tsl::testing::IsOkAndHolds;
using tsl::testing::TensorFlowSrcRoot;
absl::Status ParseTextProto(absl::string_view text_proto,
Message* parsed_proto) {
TextFormat::Parser parser;
ArrayInputStream input_stream(text_proto.data(), text_proto.size());
if (parser.Parse(&input_stream, parsed_proto)) {
return absl::OkStatus();
}
parsed_proto->Clear();
return absl::InvalidArgumentError(
absl::StrCat("Could not parse text proto: ", text_proto));
}
absl::StatusOr<RepeatedPtrField<::tensorflow::proto_splitter::FieldIndex>>
ExtractFieldTags(absl::string_view chunked_field_text_proto) {
ChunkedField chunked_field;
TF_RETURN_IF_ERROR(ParseTextProto(chunked_field_text_proto, &chunked_field));
return chunked_field.field_tag();
}
TEST(FingerprintingTest, TestFieldTagMatchesInitialSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.DeleteSubrange(2, 2);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(2));
}
TEST(FingerprintingTest, TestFieldTagMatchesNoninitialSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.DeleteSubrange(0, 2);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(0));
}
TEST(FingerprintingTest, TestFieldTagMatchesIdenticalSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(4));
}
TEST(FingerprintingTest, TestFieldTagMatchesSuperSubsequence) {
TF_ASSERT_OK_AND_ASSIGN(RepeatedPtrField<FieldIndex> field_tags,
ExtractFieldTags(R"pb(
field_tag { field: 2 }
field_tag { index: 1505 }
field_tag { field: 5 }
field_tag { map_key { ui32: 123 } }
)pb"));
RepeatedPtrField<FieldIndex> field_tags_sub;
field_tags_sub.CopyFrom(field_tags);
field_tags_sub.Add()->set_field(6);
EXPECT_THAT(fieldTagMatches(field_tags_sub, field_tags), IsOkAndHolds(4));
}
TEST(FingerprintingTest, TestPruneChunkedMessageSingleTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FieldIndex field_one_field_tag;
field_one_field_tag.set_field(1);
FieldIndex repeated_field_field_tag;
repeated_field_field_tag.set_field(2);
FieldIndex repeated_field_index_field_tag;
repeated_field_index_field_tag.set_index(1);
RepeatedPtrField<FieldIndex> target_field_tags;
target_field_tags.Add(FieldIndex(field_one_field_tag));
target_field_tags.Add(FieldIndex(repeated_field_field_tag));
target_field_tags.Add(FieldIndex(repeated_field_index_field_tag));
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{target_field_tags}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
chunked_fields {
field_tag { field: 1 }
message { chunk_index: 1 }
}
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestPruneChunkedMessageMultiTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
FieldIndex field_one_field_tag;
field_one_field_tag.set_field(1);
FieldIndex repeated_field_field_tag;
repeated_field_field_tag.set_field(2);
FieldIndex repeated_field_index_field_tag;
repeated_field_index_field_tag.set_index(1);
RepeatedPtrField<FieldIndex> target_one_field_tags;
target_one_field_tags.Add(FieldIndex(field_one_field_tag));
target_one_field_tags.Add(FieldIndex(repeated_field_field_tag));
target_one_field_tags.Add(FieldIndex(repeated_field_index_field_tag));
FieldIndex nested_map_bool_field_tag;
nested_map_bool_field_tag.set_field(7);
FieldIndex nested_map_bool_mapkey_field_tag;
nested_map_bool_mapkey_field_tag.mutable_map_key()->set_boolean(true);
FieldIndex string_field_field_tag;
string_field_field_tag.set_field(3);
RepeatedPtrField<FieldIndex> target_two_field_tags;
target_two_field_tags.Add(FieldIndex(nested_map_bool_field_tag));
target_two_field_tags.Add(FieldIndex(nested_map_bool_mapkey_field_tag));
target_two_field_tags.Add(FieldIndex(string_field_field_tag));
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info,
{target_one_field_tags, target_two_field_tags}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
chunked_fields {
field_tag { field: 1 }
message { chunk_index: 1 }
}
chunked_fields {
field_tag { field: 7 }
field_tag { map_key { boolean: true } }
message { chunk_index: 2 }
}
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestPruneChunkedMessageNoTarget) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
ChunkedMessage pruned_chunked_message;
TF_ASSERT_OK_AND_ASSIGN(
pruned_chunked_message,
PruneChunkedMessage(chunk_metadata.message(), reader, chunks_info, {}));
std::string expected_pruned_chunked_message_text_proto = R"pb(
chunk_index: 0
)pb";
ChunkedMessage expected_pruned_chunked_message;
TF_ASSERT_OK(ParseTextProto(expected_pruned_chunked_message_text_proto,
&expected_pruned_chunked_message));
ASSERT_TRUE(MessageDifferencer::Equals(pruned_chunked_message,
expected_pruned_chunked_message));
}
TEST(FingerprintingTest, TestSerializeProto) {
std::string many_fields_text_proto = R"pb(
string_field: "abc123"
)pb";
ManyFields many_fields;
TF_ASSERT_OK(ParseTextProto(many_fields_text_proto, &many_fields));
ASSERT_EQ(SerializeProto(many_fields), many_fields.SerializeAsString());
}
TEST(FingerprintingTest, TestHashFieldsV2) {
std::string cpb_file = io::JoinPath(
TensorFlowSrcRoot(), "tools/proto_splitter/testdata", "many-field.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
ManyFields many_fields;
TF_ASSERT_OK_AND_ASSIGN(uint64_t many_fields_hash,
HashFields(chunk_metadata.message(), reader,
chunks_info, {}, &many_fields));
ASSERT_EQ(many_fields_hash, 14850154939410192811U);
}
TEST(FingerprintingTest, TestHashGraphDef) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
GraphDef graph_def;
EXPECT_THAT(
HashGraphDef(&graph_def, chunk_metadata.message(), reader, chunks_info),
IsOkAndHolds(16782272393894422524U));
}
TEST(FingerprintingTest, TestHashSignatureDef) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
::tensorflow::protobuf::Map<std::string, SignatureDef> signature_def_map;
SignatureDef signature_def;
EXPECT_THAT(HashSignatureDef(signature_def_map, chunk_metadata.message(),
reader, chunks_info),
IsOkAndHolds(0));
}
TEST(FingerprintingTest, TestHashSavedObjectGraph) {
std::string cpb_file =
io::JoinPath(TensorFlowSrcRoot(), "tools/proto_splitter/testdata",
"split-standard.cpb");
TF_ASSERT_OK_AND_ASSIGN(auto reader, GetRiegeliReader(cpb_file));
auto read_metadata = GetChunkMetadata(reader);
if (!read_metadata.ok()) {
reader.Close();
TF_ASSERT_OK(read_metadata.status());
}
ChunkMetadata chunk_metadata = read_metadata.value();
std::vector<ChunkInfo> chunks_info = std::vector<ChunkInfo>(
chunk_metadata.chunks().begin(), chunk_metadata.chunks().end());
SavedObjectGraph saved_object_graph;
EXPECT_THAT(
HashSavedObjectGraph(&saved_object_graph, chunk_metadata.message(),
reader, chunks_info),
IsOkAndHolds(17454850744699451884U));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/fingerprinting_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/cc/saved_model/fingerprinting_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b6914a15-6630-4a43-882b-2ed081ace8fd | cpp | google/quiche | window_update_payload_decoder | quiche/http2/decoder/payload_decoders/window_update_payload_decoder.cc | quiche/http2/decoder/payload_decoders/window_update_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/window_update_payload_decoder.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_http2_structures.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
DecodeStatus WindowUpdatePayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
const uint32_t total_length = frame_header.payload_length;
QUICHE_DVLOG(2) << "WindowUpdatePayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK_EQ(Http2FrameType::WINDOW_UPDATE, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), total_length);
QUICHE_DCHECK_EQ(0, frame_header.flags);
if (db->Remaining() == Http2WindowUpdateFields::EncodedSize() &&
total_length == Http2WindowUpdateFields::EncodedSize()) {
DoDecode(&window_update_fields_, db);
state->listener()->OnWindowUpdate(
frame_header, window_update_fields_.window_size_increment);
return DecodeStatus::kDecodeDone;
}
state->InitializeRemainders();
return HandleStatus(state, state->StartDecodingStructureInPayload(
&window_update_fields_, db));
}
DecodeStatus WindowUpdatePayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "ResumeDecodingPayload: remaining_payload="
<< state->remaining_payload()
<< "; db->Remaining=" << db->Remaining();
QUICHE_DCHECK_EQ(Http2FrameType::WINDOW_UPDATE, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
return HandleStatus(state, state->ResumeDecodingStructureInPayload(
&window_update_fields_, db));
}
DecodeStatus WindowUpdatePayloadDecoder::HandleStatus(FrameDecoderState* state,
DecodeStatus status) {
QUICHE_DVLOG(2) << "HandleStatus: status=" << status
<< "; remaining_payload=" << state->remaining_payload();
if (status == DecodeStatus::kDecodeDone) {
if (state->remaining_payload() == 0) {
state->listener()->OnWindowUpdate(
state->frame_header(), window_update_fields_.window_size_increment);
return DecodeStatus::kDecodeDone;
}
return state->ReportFrameSizeError();
}
QUICHE_DCHECK(
(status == DecodeStatus::kDecodeInProgress &&
state->remaining_payload() > 0) ||
(status == DecodeStatus::kDecodeError && state->remaining_payload() == 0))
<< "\n status=" << status
<< "; remaining_payload=" << state->remaining_payload();
return status;
}
} | #include "quiche/http2/decoder/payload_decoders/window_update_payload_decoder.h"
#include <stddef.h>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class WindowUpdatePayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() {
return Http2FrameType::WINDOW_UPDATE;
}
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnWindowUpdate(const Http2FrameHeader& header,
uint32_t window_size_increment) override {
QUICHE_VLOG(1) << "OnWindowUpdate: " << header
<< "; window_size_increment=" << window_size_increment;
EXPECT_EQ(Http2FrameType::WINDOW_UPDATE, header.type);
StartAndEndFrame(header)->OnWindowUpdate(header, window_size_increment);
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class WindowUpdatePayloadDecoderTest
: public AbstractPayloadDecoderTest<WindowUpdatePayloadDecoder,
WindowUpdatePayloadDecoderPeer,
Listener> {
protected:
Http2WindowUpdateFields RandWindowUpdateFields() {
Http2WindowUpdateFields fields;
test::Randomize(&fields, RandomPtr());
QUICHE_VLOG(3) << "RandWindowUpdateFields: " << fields;
return fields;
}
};
TEST_F(WindowUpdatePayloadDecoderTest, WrongSize) {
auto approve_size = [](size_t size) {
return size != Http2WindowUpdateFields::EncodedSize();
};
Http2FrameBuilder fb;
fb.Append(RandWindowUpdateFields());
fb.Append(RandWindowUpdateFields());
fb.Append(RandWindowUpdateFields());
EXPECT_TRUE(VerifyDetectsFrameSizeError(0, fb.buffer(), approve_size));
}
TEST_F(WindowUpdatePayloadDecoderTest, VariousPayloads) {
for (int n = 0; n < 100; ++n) {
uint32_t stream_id = n == 0 ? 0 : RandStreamId();
Http2WindowUpdateFields fields = RandWindowUpdateFields();
Http2FrameBuilder fb;
fb.Append(fields);
Http2FrameHeader header(fb.size(), Http2FrameType::WINDOW_UPDATE,
RandFlags(), stream_id);
set_frame_header(header);
FrameParts expected(header);
expected.SetOptWindowUpdateIncrement(fields.window_size_increment);
EXPECT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/window_update_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/window_update_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
4cea8920-dc74-417f-9f7e-44a4c6a5967c | cpp | tensorflow/tensorflow | validator_runner_impl | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.cc | tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl_test.cc | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include <iostream>
#include <memory>
#include <ostream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/file_lock.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/runner.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#include "tensorflow/lite/tools/model_loader.h"
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
std::pair<std::unique_ptr<Allocation>, std::vector<uint8_t>> CopyModel(
const Allocation* input, ErrorReporter* error_reporter) {
std::vector<uint8_t> copy;
if (!input) {
return {nullptr, copy};
}
copy.resize(input->bytes());
memcpy(copy.data(), input->base(), input->bytes());
return {std::make_unique<MemoryAllocation>(copy.data(), copy.size(),
error_reporter),
std::move(copy)};
}
class FdHolder {
public:
explicit FdHolder(int fd) : fd_(fd) {}
FdHolder(FdHolder&& other) = default;
FdHolder& operator=(FdHolder&& other) = default;
~FdHolder() {
if (fd_ > 0) {
close(fd_);
}
}
private:
int fd_;
};
std::unique_ptr<FdHolder> UpdateModelPathIfUsingFd(std::string& model_path) {
if (!absl::StartsWith(model_path, "fd:")) {
return nullptr;
}
std::vector<std::string> parts = absl::StrSplit(model_path, ':');
int model_fd;
if (!absl::SimpleAtoi(parts[1], &model_fd)) {
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Failed to parse file descriptor %s from model_path %s",
parts[1].c_str(), model_path.c_str());
return nullptr;
}
int new_fd = dup(model_fd);
if (new_fd < 0) {
TFLITE_LOG_PROD(
TFLITE_LOG_ERROR,
"Failed to dup() file descriptor. Original fd: %d errno: %d", model_fd,
errno);
return nullptr;
}
parts[1] = std::to_string(new_fd);
model_path = absl::StrJoin(parts, ":");
return std::make_unique<FdHolder>(new_fd);
}
}
MinibenchmarkStatus ValidatorRunnerImpl::Init() {
if (storage_path_.empty()) {
TF_LITE_REPORT_ERROR(error_reporter_, "storage_path is empty.");
return kMinibenchmarkPreconditionNotMet;
}
if (data_directory_path_.empty()) {
TF_LITE_REPORT_ERROR(error_reporter_, "data_directory_path is empty.");
return kMinibenchmarkPreconditionNotMet;
}
if (benchmark_evaluator_ == nullptr) {
TF_LITE_REPORT_ERROR(error_reporter_, "benchmark_evaluator is null.");
return kMinibenchmarkPreconditionNotMet;
}
MinibenchmarkStatus status = storage_.Read();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Storage::Read failed.");
return status;
}
std::unique_ptr<tools::ModelLoader> model_loader =
tools::CreateModelLoaderFromPath(fd_or_model_path_);
if (!model_loader) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to parse model path.");
return kMinibenchmarkPreconditionNotMet;
}
if (!model_loader->Init() || !model_loader->GetModel()) {
TF_LITE_REPORT_ERROR(error_reporter_, "Could not load model.");
return kMinibenchmarkModelInitFailed;
}
if (custom_validation_embedder_) {
status = custom_validation_embedder_->BuildModel(
*model_loader->GetModel()->GetModel(), model_with_custom_input_);
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Failed to embed golden input to model: %d",
static_cast<int>(status));
return status;
}
model_allocation_ = std::make_unique<MemoryAllocation>(
model_with_custom_input_.GetBufferPointer(),
model_with_custom_input_.GetSize(), error_reporter_);
} else if (model_loader->type() ==
tools::ModelLoader::Type::kBufferModelLoader) {
const Allocation* alloc = model_loader->GetModel()->allocation();
if (!alloc || !alloc->valid() || !alloc->base() || alloc->bytes() <= 0) {
TF_LITE_REPORT_ERROR(error_reporter_,
"Internal error: BufferModelLoader doesn't have a "
"valid allocation.");
return kMinibenchmarkPreconditionNotMet;
}
model_allocation_ = std::make_unique<MemoryAllocation>(
alloc->base(), alloc->bytes(), error_reporter_);
}
status = nnapi_helper_.Load();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to load NNAPI SL: %d",
static_cast<int>(status));
return status;
}
status = gpu_helper_.Load();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Failed to load GPU Module: %d",
static_cast<int>(status));
return status;
}
status = validation_entrypoint_helper_.Validate();
if (status != kMinibenchmarkSuccess) {
return status;
}
ProcessRunner check_runner(data_directory_path_,
validation_entrypoint_helper_.name().c_str(),
validation_entrypoint_helper_.LoadEntrypoint(),
timeout_ms_, error_reporter_);
status = check_runner.Init();
if (status != kMinibenchmarkSuccess) {
TF_LITE_REPORT_ERROR(error_reporter_, "Runner::Init returned %d",
static_cast<int>(status));
return status;
}
return kMinibenchmarkSuccess;
}
void ValidatorRunnerImpl::TriggerValidationAsync(
std::vector<FlatBufferBuilder> tflite_settings,
absl::string_view storage_path) {
if (tflite_settings.empty()) {
return;
}
storage_ = FlatbufferStorage<BenchmarkEvent>(storage_path, error_reporter_);
std::thread detached_thread(
[original_model_path = fd_or_model_path_,
storage_path = std::string(storage_path),
data_directory_path = data_directory_path_,
tflite_settings = std::move(tflite_settings),
validation_entrypoint_name =
validation_entrypoint_helper_.name().c_str(),
validation_entrypoint = validation_entrypoint_helper_.LoadEntrypoint(),
nnapi_sl_path = nnapi_helper_.nnapi_sl_path(),
gpu_so_path = gpu_helper_.gpu_so_path(),
allocation_and_model =
CopyModel(model_allocation_.get(), error_reporter_),
timeout_ms = timeout_ms_]() {
FileLock lock(absl::StrCat(storage_path, ".parent_lock"));
if (!lock.TryLock()) {
return;
}
std::string model_path = original_model_path;
std::unique_ptr<FdHolder> fd_holder =
UpdateModelPathIfUsingFd(model_path);
for (auto& one_setting : tflite_settings) {
FlatbufferStorage<BenchmarkEvent> storage(storage_path);
TFLiteSettingsT tflite_settings_obj;
flatbuffers::GetRoot<TFLiteSettings>(one_setting.GetBufferPointer())
->UnPackTo(&tflite_settings_obj);
TFLITE_LOG_PROD(TFLITE_LOG_INFO,
"Run validation with entry point '%s' %s",
validation_entrypoint_name, storage_path.c_str());
ProcessRunner runner(data_directory_path, validation_entrypoint_name,
validation_entrypoint, timeout_ms);
int exitcode = 0;
int signal = 0;
MinibenchmarkStatus status = runner.Init();
if (status == kMinibenchmarkSuccess) {
flatbuffers::FlatBufferBuilder fbb;
status = storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_START, 0, 0,
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
if (status != kMinibenchmarkSuccess) {
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_INITIALIZATION,
exitcode, signal, {},
status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
continue;
}
std::vector<std::string> args;
if (!allocation_and_model.first) {
args.push_back(model_path);
}
args.push_back(storage_path);
args.push_back(data_directory_path);
if (tflite_settings_obj.delegate == tflite::Delegate_NNAPI &&
!nnapi_sl_path.empty()) {
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Running benchmark using NNAPI support library at path '%s'",
nnapi_sl_path.c_str());
args.push_back(nnapi_sl_path);
} else if (tflite_settings_obj.delegate == tflite::Delegate_GPU &&
!gpu_so_path.empty()) {
TFLITE_LOG_PROD(
TFLITE_LOG_INFO,
"Running benchmark using GPU Delegate Module at path '%s'",
gpu_so_path.c_str());
args.push_back(gpu_so_path);
}
std::string output;
status = runner.Run(allocation_and_model.first.get(), args, &output,
&exitcode, &signal);
if (status != kMinibenchmarkSuccess) {
std::cout << "Run() returned " << status << std::endl;
flatbuffers::FlatBufferBuilder fbb;
storage.Append(
&fbb,
CreateBenchmarkEvent(
fbb, CreateTFLiteSettings(fbb, &tflite_settings_obj),
BenchmarkEventType_ERROR, 0,
CreateBenchmarkError(fbb, BenchmarkStage_UNKNOWN, exitcode,
signal, {}, status),
Validator::BootTimeMicros(), Validator::WallTimeMicros()));
}
}
});
detached_thread.detach();
}
std::vector<const BenchmarkEvent*>
ValidatorRunnerImpl::GetSuccessfulResultsFromStorage() {
std::vector<const BenchmarkEvent*> results;
storage_.Read();
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
TFLITE_LOG_PROD(TFLITE_LOG_WARNING, "Benchmark event(%d).",
event->event_type());
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
results.push_back(event);
} else if (event->event_type() == BenchmarkEventType_ERROR) {
TFLITE_LOG(
TFLITE_LOG_WARNING,
"Benchmark event failed with error code (%d), signal (%d), exit code "
"(%d), stage (%d), mini benchmark error code (%d).\n",
event->error()->error_code(), event->error()->signal(),
event->error()->exit_code(), event->error()->stage(),
event->error()->mini_benchmark_error_code());
}
}
return results;
}
std::vector<FlatBufferBuilder> ValidatorRunnerImpl::GetCompletedResults() {
storage_.Read();
std::vector<FlatBufferBuilder> results;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() != BenchmarkEventType_ERROR &&
event->event_type() != BenchmarkEventType_END) {
continue;
}
BenchmarkEventT event_obj;
event->UnPackTo(&event_obj);
if (benchmark_evaluator_->IsValidationSuccessEvent(*event)) {
event_obj.result->ok = true;
}
FlatBufferBuilder fbb;
fbb.Finish(CreateBenchmarkEvent(fbb, &event_obj));
results.emplace_back(std::move(fbb));
}
return results;
}
int ValidatorRunnerImpl::GetNumCompletedResults() {
storage_.Read();
int num_results = 0;
for (int i = 0; i < storage_.Count(); i++) {
const BenchmarkEvent* event = storage_.Get(i);
if (event->event_type() == BenchmarkEventType_ERROR ||
(event->event_type() == BenchmarkEventType_END && event->result())) {
num_results++;
}
}
return num_results;
}
MinibenchmarkStatus
ValidatorRunnerImpl::ValidationEntrypointHelper::Validate() {
#ifndef _WIN32
if (!LoadEntrypoint()) {
TF_LITE_REPORT_ERROR(error_reporter_, "Could not load symbol '%s': '%s'",
validation_entrypoint_name_.c_str(), dlerror());
return kMinibenchmarkValidationEntrypointSymbolNotFound;
}
return kMinibenchmarkSuccess;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
ValidatorRunnerImpl::ValidationEntrypointHelper::EntrypointFunc*
ValidatorRunnerImpl::ValidationEntrypointHelper::LoadEntrypoint() {
#ifndef _WIN32
return reinterpret_cast<int (*)(int, char**)>(
dlsym(RTLD_DEFAULT, validation_entrypoint_name_.c_str()));
#endif
return nullptr;
}
MinibenchmarkStatus ValidatorRunnerImpl::NnapiHelper::Load() {
if (nnapi_sl_) {
#ifndef _WIN32
Dl_info dl_info;
if (!nnapi_sl_->ANeuralNetworks_getRuntimeFeatureLevel) {
return kMiniBenchmarkCannotLoadSupportLibrary;
}
int status = dladdr(reinterpret_cast<void*>(
nnapi_sl_->ANeuralNetworks_getRuntimeFeatureLevel),
&dl_info);
if (status == 0 || !dl_info.dli_fname) {
return kMiniBenchmarkCannotLoadSupportLibrary;
}
nnapi_sl_path_ = dl_info.dli_fname;
#else
return kMinibenchmarkUnsupportedPlatform;
#endif
}
return kMinibenchmarkSuccess;
}
MinibenchmarkStatus ValidatorRunnerImpl::GpuHelper::Load() {
if (gpu_plugin_handle_) {
#ifndef _WIN32
Dl_info dl_info;
int status = dladdr(gpu_plugin_handle_, &dl_info);
if (status == 0 || !dl_info.dli_fname) {
return kMinibenchmarkCannotLoadGpuModule;
}
gpu_so_path_ = dl_info.dli_fname;
}
#else
return kMinibenchmarkUnsupportedPlatform;
}
#endif
return kMinibenchmarkSuccess;
}
}
} | #include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.h"
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/time.h"
#include "flatbuffers/flatbuffer_builder.h"
#include "tensorflow/lite/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/benchmark_result_evaluator.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_mobilenet_validation_model.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/fb_storage.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/mini_benchmark_test_helper.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/model_modifier/custom_validation_embedder.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/nnapi_sl_fake_impl.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/status_codes.h"
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_options.h"
#include "tensorflow/lite/nnapi/sl/include/SupportLibrary.h"
#include "tensorflow/lite/stderr_reporter.h"
#ifdef __ANDROID__
#include <dlfcn.h>
#include "tensorflow/lite/experimental/acceleration/mini_benchmark/embedded_validator_runner_entrypoint.h"
#endif
namespace tflite {
namespace acceleration {
namespace {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::GetRoot;
constexpr absl::Duration kWaitBetweenRefresh = absl::Milliseconds(20);
class AlwaysTrueEvaluator : public AbstractBenchmarkResultEvaluator {
public:
bool HasPassedAccuracyCheck(const BenchmarkResult& result) override {
return true;
}
};
class ValidatorRunnerImplTest : public ::testing::Test {
protected:
void SetUp() override {
MiniBenchmarkTestHelper helper;
should_perform_test_ = helper.should_perform_test();
nnapi_sl_dump_path_ = helper.DumpToTempFile(
"libnnapi_fake.so", g_nnapi_sl_fake_impl, g_nnapi_sl_fake_impl_len);
options_.data_directory_path = ::testing::TempDir();
options_.storage_path = ::testing::TempDir() + "/storage_path.fb";
options_.validation_entrypoint_name =
"Java_org_tensorflow_lite_acceleration_validation_entrypoint";
options_.error_reporter = tflite::DefaultErrorReporter();
options_.benchmark_result_evaluator =
EmbeddedResultEvaluator::GetInstance();
options_.per_test_timeout_ms = 0;
options_.model_path = helper.DumpToTempFile(
"mobilenet_quant_with_validation.tflite",
g_tflite_acceleration_embedded_mobilenet_validation_model,
g_tflite_acceleration_embedded_mobilenet_validation_model_len);
ASSERT_TRUE(!options_.model_path.empty());
plain_model_path_ = MiniBenchmarkTestHelper::DumpToTempFile(
"mobilenet_quant.tflite",
g_tflite_acceleration_embedded_mobilenet_model,
g_tflite_acceleration_embedded_mobilenet_model_len);
ASSERT_TRUE(!plain_model_path_.empty());
}
void TearDown() override {
if (should_perform_test_) {
ASSERT_EQ(unlink(options_.storage_path.c_str()), 0);
}
}
ValidatorRunnerImpl CreateValidator() {
return ValidatorRunnerImpl(
CreateModelLoaderPath(options_), options_.storage_path,
options_.data_directory_path, options_.per_test_timeout_ms,
std::move(custom_validation_embedder_), options_.error_reporter,
options_.nnapi_sl, options_.gpu_plugin_handle,
options_.validation_entrypoint_name,
options_.benchmark_result_evaluator);
}
bool should_perform_test_;
ValidatorRunnerOptions options_{};
std::string plain_model_path_;
std::unique_ptr<CustomValidationEmbedder> custom_validation_embedder_ =
nullptr;
std::string nnapi_sl_dump_path_;
};
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithNnApiSlAndEmbeddedValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
AndroidInfo android_info;
auto status = RequestAndroidInfo(&android_info);
ASSERT_TRUE(status.ok());
InitNnApiSlInvocationStatus();
std::unique_ptr<const ::tflite::nnapi::NnApiSupportLibrary> fake_nnapi_sl =
::tflite::nnapi::loadNnApiSupportLibrary(nnapi_sl_dump_path_);
ASSERT_THAT(fake_nnapi_sl.get(), ::testing::NotNull());
options_.nnapi_sl = fake_nnapi_sl->getFL5();
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(
CreateTFLiteSettings(tflite_settings[0], Delegate_NNAPI,
CreateNNAPISettings(tflite_settings[0])));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<const BenchmarkEvent*> results =
validator.GetSuccessfulResultsFromStorage();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
ASSERT_THAT(result, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_THAT(result->result()->actual_output(),
testing::Pointee(testing::SizeIs(0)));
}
EXPECT_TRUE(WasNnApiSlInvoked());
}
TEST_F(ValidatorRunnerImplTest,
GetSuccessfulResultsSucceedWithBufferModelAndCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
options_.model_buffer = g_tflite_acceleration_embedded_mobilenet_model;
options_.model_size = g_tflite_acceleration_embedded_mobilenet_model_len;
options_.model_path.clear();
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest,
GetCompletedResultsReturnsOkWithCustomValidation) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
options_.model_path = plain_model_path_;
AlwaysTrueEvaluator evaluator;
options_.benchmark_result_evaluator = &evaluator;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_TRUE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest,
GetCompletedResultsReturnsNotOkIfCustomValidationFailed) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
int batch_size = 3;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
batch_size, std::vector<std::vector<uint8_t>>{
std::vector<uint8_t>(batch_size * 224 * 224 * 3, 1)});
options_.model_path = plain_model_path_;
ValidatorRunnerImpl validator = CreateValidator();
ASSERT_EQ(validator.Init(), kMinibenchmarkSuccess);
std::vector<flatbuffers::FlatBufferBuilder> tflite_settings(1);
tflite_settings[0].Finish(CreateTFLiteSettings(tflite_settings[0]));
validator.TriggerValidationAsync(std::move(tflite_settings),
options_.storage_path);
FlatbufferStorage<BenchmarkEvent> storage(options_.storage_path,
options_.error_reporter);
while (validator.GetNumCompletedResults() < 1) {
usleep(absl::ToInt64Microseconds(kWaitBetweenRefresh));
}
std::vector<FlatBufferBuilder> results = validator.GetCompletedResults();
ASSERT_THAT(results, testing::Not(testing::IsEmpty()));
for (auto& result : results) {
const BenchmarkEvent* event =
GetRoot<BenchmarkEvent>(result.GetBufferPointer());
ASSERT_THAT(event, testing::Property(&BenchmarkEvent::event_type,
testing::Eq(BenchmarkEventType_END)));
EXPECT_FALSE(event->result()->ok());
EXPECT_THAT(event->result()->actual_output(),
testing::Pointee(testing::SizeIs(1)));
EXPECT_THAT(event->result()->actual_output()->Get(0)->value(),
testing::Pointee(testing::SizeIs(batch_size * 1001)));
}
}
TEST_F(ValidatorRunnerImplTest, FailIfItCannotFindNnApiSlPath) {
if (!should_perform_test_) {
std::cerr << "Skipping test";
return;
}
NnApiSLDriverImplFL5 wrong_handle_nnapi_sl{};
options_.nnapi_sl = &wrong_handle_nnapi_sl;
ValidatorRunnerImpl validator = CreateValidator();
EXPECT_EQ(validator.Init(), kMiniBenchmarkCannotLoadSupportLibrary);
}
TEST_F(ValidatorRunnerImplTest, FailWithInvalidEntrypoint) {
options_.validation_entrypoint_name = "invalid_name()";
EXPECT_EQ(CreateValidator().Init(),
kMinibenchmarkValidationEntrypointSymbolNotFound);
}
TEST_F(ValidatorRunnerImplTest, FailIfCannotLoadModel) {
options_.model_path = "invalid/path";
EXPECT_EQ(CreateValidator().Init(), kMinibenchmarkModelInitFailed);
}
TEST_F(ValidatorRunnerImplTest, FailIfCannotEmbedInputData) {
options_.model_path = plain_model_path_;
custom_validation_embedder_ = std::make_unique<CustomValidationEmbedder>(
1, std::vector<std::vector<uint8_t>>(2));
EXPECT_EQ(CreateValidator().Init(),
kMinibenchmarkValidationSubgraphBuildFailed);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/acceleration/mini_benchmark/validator_runner_impl_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e7903eb4-9985-4b6c-b0e5-c2348dbcc8ef | cpp | tensorflow/tensorflow | remote_device | tensorflow/core/distributed_runtime/remote_device.cc | tensorflow/core/distributed_runtime/remote_device_test.cc | #include "tensorflow/core/distributed_runtime/remote_device.h"
#include <stdlib.h>
#include <vector>
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/process_util.h"
#include "tensorflow/core/common_runtime/renamed_device.h"
#include "tensorflow/core/distributed_runtime/worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/protobuf/worker.pb.h"
#include "tensorflow/core/util/device_name_utils.h"
namespace tensorflow {
class RemoteDevice : public Device {
public:
RemoteDevice(Env* env, const DeviceAttributes& da)
: Device(env, da),
local_dev_name_(DeviceNameUtils::LocalName(da.name())) {}
Status Sync() override { return absl::OkStatus(); }
Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; }
ResourceMgr* resource_manager() override {
LOG(FATAL) << "Accessing the resource manager of a remote device is not "
<< "supported.";
std::abort();
}
bool IsLocal() const override { return false; }
bool IsRemoteCallAllowed() const override { return true; }
private:
const string local_dev_name_;
RemoteDevice(const RemoteDevice&) = delete;
void operator=(const RemoteDevice&) = delete;
};
void AsRemoteDevices(
Env* env,
const protobuf::RepeatedPtrField<DeviceAttributes>& device_attributes,
LookupLocalDevice lookup_local_device,
std::vector<std::unique_ptr<Device>>* remote_devices) {
for (const auto& da : device_attributes) {
Device* local_device;
if (lookup_local_device != nullptr &&
lookup_local_device(da.name(), &local_device).ok()) {
remote_devices->emplace_back(RenamedDevice::NewRenamedDevice(
local_device->name(), local_device, false, false));
} else {
auto d = new RemoteDevice(env, da);
remote_devices->emplace_back(d);
}
}
}
void NewRemoteDevices(Env* env, WorkerCacheInterface* worker_cache,
const string& worker_name, NewRemoteDevicesDone done) {
WorkerInterface* wi = worker_cache->GetOrCreateWorker(worker_name);
if (wi == nullptr) {
std::vector<Device*> empty;
done(errors::NotFound("Device ", worker_name, " is not found."), &empty);
return;
}
struct Call {
GetStatusRequest req;
GetStatusResponse resp;
};
Call* call = new Call;
auto cb = [env, worker_cache, worker_name, done, wi,
call](const Status& status) {
Status s = status;
std::vector<Device*> remote_devices;
auto cleanup = gtl::MakeCleanup(
[&worker_cache, &worker_name, &wi, &done, &remote_devices, &s, call] {
worker_cache->ReleaseWorker(worker_name, wi);
done(s, &remote_devices);
delete call;
});
if (!s.ok()) {
return;
}
DeviceNameUtils::ParsedName worker_name_parsed;
if (!DeviceNameUtils::ParseFullName(worker_name, &worker_name_parsed) ||
!worker_name_parsed.has_job || !worker_name_parsed.has_replica ||
!worker_name_parsed.has_task) {
s = errors::InvalidArgument("Could not parse worker name: ", worker_name);
LOG(WARNING) << s;
return;
}
remote_devices.reserve(call->resp.device_attributes_size());
for (const DeviceAttributes& da : call->resp.device_attributes()) {
DeviceNameUtils::ParsedName device_name_parsed;
CHECK(DeviceNameUtils::ParseFullName(da.name(), &device_name_parsed))
<< "Device attribute name '" << da.name() << "' could not be "
<< "parsed. Device Attribute: " << da.DebugString();
if (device_name_parsed.job == worker_name_parsed.job &&
device_name_parsed.replica == worker_name_parsed.replica &&
device_name_parsed.task == worker_name_parsed.task) {
auto d = new RemoteDevice(env, da);
remote_devices.push_back(d);
} else {
DeviceAttributes da_rewritten = da;
da_rewritten.set_name(DeviceNameUtils::FullName(
worker_name_parsed.job, worker_name_parsed.replica,
worker_name_parsed.task, device_name_parsed.type,
device_name_parsed.id));
auto d = new RemoteDevice(env, da_rewritten);
if (getenv("TPU_NO_POPULATE_DEVICE_LIST_FROM_CLUSTER_SPEC") !=
nullptr) {
if (worker_name_parsed.job == "worker" ||
device_name_parsed.type.find("TPU") == std::string::npos) {
remote_devices.push_back(d);
}
} else {
remote_devices.push_back(d);
}
}
}
};
wi->GetStatusAsync(nullptr, &call->req, &call->resp,
false, cb);
}
std::unique_ptr<Device> NewRemoteDevice(Env* env,
DeviceAttributes device_attribute) {
return std::make_unique<RemoteDevice>(env, device_attribute);
}
} | #include "tensorflow/core/distributed_runtime/remote_device.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_channel.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_testlib.h"
#include "tensorflow/core/distributed_runtime/rpc/grpc_worker_cache.h"
#include "tensorflow/core/distributed_runtime/worker_interface.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
const char* const kSession = "remote_session";
class RemoteDeviceTest : public ::testing::Test {
protected:
string remote_name_;
std::unique_ptr<WorkerCacheInterface> worker_cache_;
WorkerInterface* wi_;
std::vector<Device*> devices_;
std::unique_ptr<test::TestCluster> cluster_;
std::unique_ptr<GrpcWorkerEnv> grpc_worker_env_;
RemoteDeviceTest() {
SessionOptions options;
(*options.config.mutable_device_count())["CPU"] = 2;
TF_CHECK_OK(test::TestCluster::MakeTestCluster(options, 1, &cluster_));
const string& hostport = cluster_->targets()[0];
GrpcChannelSpec spec;
TF_CHECK_OK(spec.AddHostPortsJob("localhost", {hostport}));
ChannelCreationFunction channel_func =
ConvertToChannelCreationFunction(NewHostPortGrpcChannel);
std::shared_ptr<GrpcChannelCache> channel_cache(
NewGrpcChannelCache(spec, channel_func));
grpc_worker_env_.reset(CreateGrpcWorkerEnv());
worker_cache_.reset(
NewGrpcWorkerCache(channel_cache, grpc_worker_env_.get()));
remote_name_ = "/job:localhost/replica:0/task:0";
wi_ = worker_cache_->GetOrCreateWorker(remote_name_);
}
~RemoteDeviceTest() override {
worker_cache_->ReleaseWorker(remote_name_, wi_);
}
void SetUp() override {
Notification n;
NewRemoteDevices(Env::Default(), worker_cache_.get(), remote_name_,
[&n, this](const Status& s, std::vector<Device*>* found) {
TF_CHECK_OK(s);
devices_ = *found;
n.Notify();
});
n.WaitForNotification();
EXPECT_EQ(devices_.size(), 2);
std::sort(devices_.begin(), devices_.end(), [](Device* a, Device* b) {
return a->name().compare(b->name()) < 0;
});
}
void TearDown() override {
for (auto d : devices_) delete d;
}
};
TEST_F(RemoteDeviceTest, GetStatus) {
EXPECT_EQ(devices_[0]->name(),
strings::StrCat(remote_name_, "/device:CPU:0"));
EXPECT_EQ(devices_[0]->attributes().device_type(),
DeviceType(DEVICE_CPU).type());
EXPECT_EQ(devices_[0]->attributes().memory_limit(), 256 << 20);
EXPECT_EQ(devices_[1]->name(),
strings::StrCat(remote_name_, "/device:CPU:1"));
EXPECT_EQ(devices_[1]->attributes().memory_limit(), 256 << 20);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/remote_device.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/distributed_runtime/remote_device_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f1ff1941-455f-40bb-b02a-d14a5a2cd4cb | cpp | tensorflow/tensorflow | scoped_allocator_optimizer | tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc | tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc | #include "tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h"
#include "tensorflow/core/common_runtime/scoped_allocator.h"
#include "tensorflow/core/common_runtime/scoped_allocator_mgr.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils/frame.h"
#include "tensorflow/core/lib/gtl/inlined_vector.h"
#define LOG_WARNING_AND_RETURN_IF_ERROR(...) \
do { \
const ::tensorflow::Status _status = (__VA_ARGS__); \
if (TF_PREDICT_FALSE(!_status.ok())) { \
LOG(WARNING) << "error: " << _status; \
return _status; \
} \
} while (0)
namespace tensorflow {
namespace grappler {
namespace {
const char kScopedAllocatorAttrName[] = "_scoped_allocator";
bool HasOpName(const string& node_name, const string& op_name) {
size_t begin = node_name.rfind('/');
if (begin == string::npos) {
begin = 0;
} else {
++begin;
}
size_t end = node_name.rfind('_');
if (end != string::npos) {
size_t p = end + 1;
while (p < node_name.size()) {
if (!isdigit(node_name[p])) {
end = node_name.size();
break;
}
++p;
}
} else {
end = node_name.size();
}
return node_name.substr(begin, end - begin) == op_name;
}
Status GetOutputDataType(
const std::vector<OpInfo::TensorProperties>& output_props, int output_index,
DataType* dtype) {
int output_props_size = output_props.size();
if (output_index >= output_props_size) {
return errors::Internal("Invalid output index ", output_index,
" size of output_props ", output_props.size());
}
*dtype = output_props[output_index].dtype();
return absl::OkStatus();
}
Status CheckTypesAndGetShapes(const GraphProperties& graph_properties,
const std::vector<NodeDef*>& ops, DataType* type,
std::vector<TensorShape>* shapes) {
VLOG(1) << "CheckTypesAndGetShapes";
*type = DT_INVALID;
for (NodeDef* n : ops) {
AttrSlice n_attrs = AttrSlice(*n);
DataType dtype;
LOG_WARNING_AND_RETURN_IF_ERROR(GetNodeAttr(n_attrs, "T", &dtype));
VLOG(2) << "op " << n->name() << " has type " << dtype << " shapes.size() "
<< shapes->size();
if (!graph_properties.HasOutputProperties(n->name())) {
LOG(ERROR) << "Node " << n->DebugString() << " lacks output shape.";
return errors::Aborted("Node ", n->name(), " lacks output shape.");
}
const std::vector<OpInfo::TensorProperties>& prop_list =
graph_properties.GetOutputProperties(n->name());
if (prop_list.size() != 1) {
return errors::Aborted("Node ", n->name(),
" does not have exactly one output as expected "
"by ScopedAllocatorOptimizer");
}
const OpInfo::TensorProperties& props = prop_list[0];
if (shapes->empty()) {
*type = props.dtype();
} else if (*type != props.dtype()) {
return errors::Aborted("Group ops don't all have same type");
}
if (*type != dtype) {
return errors::Internal(
"Type mismatch: type in op attr = ", DataTypeString(dtype),
", type in output props = ", DataTypeString(*type));
}
if (!TensorShape::IsValid(props.shape()) || props.shape().unknown_rank()) {
return errors::Aborted("Complete shape not known for ", n->name());
}
VLOG(2) << "Adding shape " << props.shape().DebugString();
shapes->push_back(TensorShape(props.shape()));
}
return absl::OkStatus();
}
struct InputDesc {
NodeDef* from_node_def;
int output_slot;
NodeDef* to_node_def;
InputDesc(NodeDef* f, int os, NodeDef* t)
: from_node_def(f), output_slot(os), to_node_def(t) {}
};
void RemoveNode(NodeDef* nd, GraphDef* graph, NodeMap* node_map) {
node_map->RemoveNode(nd->name());
protobuf::RepeatedPtrField<NodeDef>* nodes = graph->mutable_node();
for (int i = 0; i < nodes->size(); ++i) {
if (nd->name() == (*nodes)[i].name()) {
nodes->SwapElements(i, nodes->size() - 1);
nodes->RemoveLast();
return;
}
}
LOG(FATAL) << "Failed to find node " << nd->name() << " in graph";
}
Status RemoveEdge(const string& input_edge_name, const string& from_node_name,
NodeDef* to_node, NodeMap* node_map) {
protobuf::RepeatedPtrField<string>* inputs = to_node->mutable_input();
int edge_index = -1;
for (edge_index = 0; edge_index < inputs->size(); ++edge_index) {
VLOG(2) << " consider edge " << (*inputs)[edge_index];
if ((*inputs)[edge_index] == input_edge_name) {
break;
}
}
if (edge_index >= inputs->size()) {
return errors::Internal("Could not find input name ", input_edge_name,
" at node ", to_node->name());
}
if (node_map) {
node_map->RemoveOutput(from_node_name, to_node->name());
}
inputs->DeleteSubrange(edge_index, 1);
return absl::OkStatus();
}
Status MaybeRewriteInput(ScopedAllocatorOptimizer* sa_opti,
int64_t invocation_count, GraphDef* graph,
NodeMap* node_map, const DataType& dtype,
NodeDef* input, const string& edge_name,
int output_index, NodeDef* op, NodeDef** new_input,
int* new_output_index, bool* rewrite) {
*rewrite = IsConstant(*input) || IsExit(*input) ||
(sa_opti->repeated_outputs().find(edge_name) !=
sa_opti->repeated_outputs().end());
if (!(*rewrite)) {
*new_input = input;
*new_output_index = output_index;
return absl::OkStatus();
}
int unique_id;
LOG_WARNING_AND_RETURN_IF_ERROR(sa_opti->NewIdentityId(&unique_id));
string identity_name = strings::StrCat("scoped_allocator_identity_",
unique_id, "_", invocation_count);
NodeDefBuilder identity_builder(identity_name, "Identity");
identity_builder.Device(op->device());
identity_builder.Attr("T", dtype);
identity_builder.Input(
NodeDefBuilder::NodeOut(input->name(), output_index, dtype));
NodeDef* identity = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(identity_builder.Finalize(identity));
node_map->AddNode(identity_name, identity);
node_map->AddOutput(input->name(), identity_name);
node_map->UpdateInput(op->name(), input->name(), identity_name);
*op->mutable_input(0) = identity_name;
*new_input = identity;
*new_output_index = 0;
VLOG(1) << "Rewrite input " << edge_name << " op " << op->name()
<< " old output index " << output_index << " with identity "
<< identity_name << " new output index 0";
return absl::OkStatus();
}
Status GetInputs(ScopedAllocatorOptimizer* sa_opti, int64_t invocation_count,
GraphDef* graph, const GraphProperties& graph_properties,
NodeMap* node_map, const std::vector<NodeDef*>& ops,
DataType dtype, std::vector<InputDesc>* inputs) {
VLOG(1) << "Getinputs";
for (NodeDef* n : ops) {
NodeDef* inode = nullptr;
int output_index = 0;
DataType inode_dtype = DT_INVALID;
VLOG(2) << "for node " << n->name();
for (const auto& input_name : n->input()) {
if (!IsControlInput(input_name)) {
if (inode) {
return errors::Internal("Found more than one input for node ",
n->name());
}
ParseNodeName(input_name, &output_index);
inode = node_map->GetNode(input_name);
if (inode == nullptr) {
return errors::Internal("Did not find node ", input_name);
}
VLOG(2) << "inode " << inode->DebugString() << " output_index "
<< output_index;
bool rewrite;
LOG_WARNING_AND_RETURN_IF_ERROR(MaybeRewriteInput(
sa_opti, invocation_count, graph, node_map, dtype, inode,
input_name, output_index, n, &inode, &output_index, &rewrite));
if (rewrite) {
inode_dtype = dtype;
}
VLOG(2) << "inode after rewrite " << inode->DebugString()
<< " output_index " << output_index;
}
}
if (inode == nullptr) {
return errors::Internal("Did not find node");
}
if (inode_dtype == DT_INVALID) {
if (!graph_properties.HasOutputProperties(inode->name())) {
return errors::Internal("Input node ", inode->name(),
" does not have output properties");
}
const auto& inode_output_props =
graph_properties.GetOutputProperties(inode->name());
LOG_WARNING_AND_RETURN_IF_ERROR(
GetOutputDataType(inode_output_props, output_index, &inode_dtype));
}
if (inode_dtype != dtype) {
return errors::Aborted("ScopedAllocatorOptimizer expected input type ",
dtype, " but found ", inode_dtype);
}
inputs->emplace_back(inode, output_index, n);
}
return absl::OkStatus();
}
Status GetDataInputs(GraphDef* graph, NodeMap* node_map, NodeDef* op,
std::vector<InputDesc>* inputs) {
VLOG(2) << "GetDataInputs for node " << op->name();
NodeDef* inode = nullptr;
int output_index = 0;
for (const auto& input_name : op->input()) {
if (IsControlInput(input_name)) {
continue;
}
ParseNodeName(input_name, &output_index);
inode = nullptr;
inode = node_map->GetNode(input_name);
if (inode == nullptr) {
return errors::Internal("Did not find node ", input_name);
}
VLOG(2) << "inode " << inode->DebugString() << " output_index "
<< output_index;
inputs->emplace_back(inode, output_index, op);
}
return absl::OkStatus();
}
void DumpGraphToVLOG(const GraphDef& graph, int log_level) {
if (VLOG_IS_ON(log_level)) {
for (const auto& line : str_util::Split(graph.DebugString(), "\n\r")) {
VLOG(log_level) << line;
}
}
}
}
void ScopedAllocatorOptimizer::ExtendNodeAttr(StringPiece name,
const std::vector<int32>& values,
NodeDef* node_def) {
if (HasNodeAttr(*node_def, name)) {
VLOG(2) << "extending";
AttrValue* existing = &(*node_def->mutable_attr())[string(name)];
for (int32_t i : values) {
existing->mutable_list()->add_i(i);
}
} else {
VLOG(2) << "setting new attr value";
AddNodeAttr(name, values, node_def);
}
}
class UnaryElementwiseRewriter : public ScopedAllocatorOptimizer::Rewriter {
public:
~UnaryElementwiseRewriter() override {}
Status CheckUsesAllocatorAttributes(const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
if (IsConstant(*nd.from_node_def)) {
return errors::Aborted(
"Abandoning ScopedAllocatorOptimizer because input ",
nd.from_node_def->name(),
" is a Const op which does not use AllocatorAttributes");
}
}
return absl::OkStatus();
}
Status CheckExistingScopedAllocator(const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
VLOG(2) << "get attrs for " << nd.from_node_def->name();
AttrSlice n_attrs = AttrSlice(*nd.from_node_def);
std::vector<int32> scope_ids;
Status ss = GetNodeAttr(n_attrs, kScopedAllocatorAttrName, &scope_ids);
if (ss.ok() && scope_ids[0] == nd.output_slot) {
LOG(INFO) << "Abandoning ScopedAllocatorOptimizer because input "
<< nd.from_node_def->name() << " output " << scope_ids[0]
<< " is already assigned to scope_id " << scope_ids[1];
return errors::Aborted(
"Abandoning ScopedAllocatorOptimizer because input ",
nd.from_node_def->name(), " output ", scope_ids[0], " is already ",
"assigned to scope_id ", scope_ids[1]);
}
}
return absl::OkStatus();
}
Status CheckInternalDataDependency(const std::set<string>& op_set,
const std::vector<InputDesc>& inputs) {
for (const InputDesc& nd : inputs) {
if (op_set.find(nd.from_node_def->name()) != op_set.end()) {
if (nd.output_slot != tensorflow::Graph::kControlSlot) {
return errors::Aborted("Data edge exists between ",
nd.from_node_def->name(),
" and another "
"node in the set");
}
}
}
return absl::OkStatus();
}
void ClearInternalControlInputs(const std::set<string>& op_set,
const std::vector<NodeDef*>& ops,
NodeMap* node_map) {
for (NodeDef* n : ops) {
for (const auto& input_name : n->input()) {
if (IsControlInput(input_name)) {
int position = 0;
string input_node_name = ParseNodeName(input_name, &position);
CHECK_EQ(position, -1);
if (op_set.find(input_node_name) != op_set.end()) {
VLOG(1) << "Remove control output from " << input_node_name
<< " via edge " << input_name << " to " << n->name();
TF_CHECK_OK(RemoveEdge(input_name, input_node_name, n, node_map));
}
}
}
}
}
Status AnalyzeInputs(ScopedAllocatorOptimizer* sa_opti,
int64_t invocation_count, GraphDef* graph,
NodeMap* node_map, const std::vector<NodeDef*>& ops,
const std::set<string>& op_instance_names,
string* device_name, DataType* dtype,
std::vector<TensorShape>* input_shapes,
std::vector<InputDesc>* inputs, TensorShape* sa_shape) {
CHECK(graph_properties_);
LOG_WARNING_AND_RETURN_IF_ERROR(
CheckTypesAndGetShapes(*graph_properties_, ops, dtype, input_shapes));
LOG_WARNING_AND_RETURN_IF_ERROR(
GetInputs(sa_opti, invocation_count, graph, *graph_properties_,
sa_opti->node_map(), ops, *dtype, inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(CheckUsesAllocatorAttributes(*inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(CheckExistingScopedAllocator(*inputs));
LOG_WARNING_AND_RETURN_IF_ERROR(
CheckInternalDataDependency(op_instance_names, *inputs));
ClearInternalControlInputs(op_instance_names, ops, node_map);
*device_name = ops[0]->device();
CHECK(!device_name->empty());
CHECK(!input_shapes->empty());
CHECK_EQ(0, Allocator::kAllocatorAlignment % DataTypeSize(*dtype))
<< "ScopedAllocatorOptimizer only applies to types that evenly "
<< "divide kAllocatorAlignment";
std::vector<ScopedAllocator::Field> sa_fields;
int64_t num_bytes = ScopedAllocatorMgr::PopulateFields(
0 , *input_shapes, *dtype, &sa_fields);
int64_t num_elts = num_bytes / DataTypeSize(*dtype);
VLOG(2) << "num_bytes " << num_bytes << " num_elts=" << num_elts;
*sa_shape = TensorShape({num_elts});
return absl::OkStatus();
}
Status TransitiveFanoutWithinFrame(
GraphDef* graph, NodeMap* node_map,
const std::vector<const NodeDef*>& source_nodes,
absl::flat_hash_set<const NodeDef*>* fanout) {
std::deque<const NodeDef*> queue(source_nodes.begin(), source_nodes.end());
absl::flat_hash_set<const NodeDef*> visited;
while (!queue.empty()) {
const NodeDef* node = queue.front();
queue.pop_front();
if (!visited.insert(node).second) {
continue;
}
fanout->insert(node);
for (const NodeDef* output : node_map->GetOutputs(node->name())) {
if (!ModifiesFrameInfo(*output)) {
queue.push_back(output);
}
VLOG(2) << "TransitiveFanout parent: " << node->name()
<< " child: " << output->name() << " of type " << output->op();
}
}
return absl::OkStatus();
}
Status ConstructScopedAllocatorNode(
ScopedAllocatorOptimizer* sa_opti, GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops, const string& device_name,
DataType dtype, int sa_id, const string& sa_name,
const std::vector<TensorShape>& input_shapes,
const std::vector<InputDesc>& inputs, const TensorShape& sa_shape) {
VLOG(2) << "ConstructScopedAllocatorNode " << sa_name;
NodeDefBuilder sa_builder(sa_name, "_ScopedAllocator");
sa_builder.Device(device_name);
sa_builder.Attr("sa_name", sa_name);
sa_builder.Attr("T", dtype);
sa_builder.Attr("id", sa_id);
sa_builder.Attr("shapes", input_shapes);
sa_builder.Attr("shape", sa_shape);
sa_builder.Attr("expected_call_count", static_cast<int64_t>(ops.size()));
NodeDef* sa_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(sa_builder.Finalize(sa_node));
node_map->AddNode(sa_name, sa_node);
std::vector<const NodeDef*> fanout_sources;
fanout_sources.reserve(inputs.size());
for (const auto& input : inputs) {
fanout_sources.push_back(input.from_node_def);
}
absl::flat_hash_set<const NodeDef*> fanout;
TF_RETURN_IF_ERROR(
TransitiveFanoutWithinFrame(graph, node_map, fanout_sources, &fanout));
for (int i = 0, end = inputs.size(); i < end; ++i) {
auto& nd = inputs[i];
if (IsArg(*nd.from_node_def)) {
return errors::Aborted(
"ScopedAllocatorOptimizer does not work well when the op inputs "
"are _Arg ops; skipping this optimizer for this function");
}
VLOG(2) << "To input " << i << ": " << nd.from_node_def->name()
<< " add control input "
<< "^" << sa_name;
nd.from_node_def->add_input(strings::StrCat("^", sa_name));
ScopedAllocatorOptimizer::ExtendNodeAttr(kScopedAllocatorAttrName,
{nd.output_slot, sa_id + 1 + i},
nd.from_node_def);
node_map->AddOutput(sa_name, nd.from_node_def->name());
}
bool added_delay_edge = false;
for (auto& nd : inputs) {
std::vector<InputDesc> inputs_to_first;
LOG_WARNING_AND_RETURN_IF_ERROR(GetDataInputs(
graph, sa_opti->node_map(), nd.from_node_def, &inputs_to_first));
for (int i = 0, end = inputs_to_first.size(); i < end; ++i) {
if (fanout.find(inputs_to_first[i].from_node_def) != fanout.end()) {
VLOG(2) << "Found node " << inputs_to_first[i].from_node_def->name()
<< " in the fanout of " << sa_name;
continue;
}
sa_node->add_input(
strings::StrCat("^", inputs_to_first[i].from_node_def->name()));
node_map->AddOutput(inputs_to_first[i].from_node_def->name(), sa_name);
added_delay_edge = true;
VLOG(2) << "Adding control dependency from "
<< inputs_to_first[i].from_node_def->name() << " to "
<< sa_node->name();
break;
}
if (added_delay_edge) {
break;
}
}
if (!added_delay_edge) {
LOG(WARNING) << "Found no node from which a control edge can be added to "
"scoped allocator node. If you run into issues with "
"graphs that contain control flow, turn off the "
"ScopedAllocatorOptimizer and file a bug.";
}
return absl::OkStatus();
}
Status BuildSAConcatNode(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const std::set<string>& op_instance_names,
const string& device_name, DataType dtype, int sa_id,
const string& sa_name, const string& sac_name,
const TensorShape& sa_shape,
std::vector<NodeDefBuilder::NodeOut>* sac_inputs) {
VLOG(2) << "BuildSAConcatNode " << sac_name;
absl::flat_hash_map<string, string> sac_ctl_inputs;
for (int i = 0, end = ops.size(); i < end; ++i) {
NodeDef* old_op = ops[i];
for (const string& old_op_input : old_op->input()) {
int position = 0;
string input_name = ParseNodeName(old_op_input, &position);
if (position == -1) {
if (op_instance_names.find(old_op_input) == op_instance_names.end()) {
sac_ctl_inputs.emplace(old_op_input, input_name);
}
} else {
if (op_instance_names.find(old_op_input) != op_instance_names.end()) {
LOG(ERROR) << "Data edge between " << old_op_input << " and "
<< old_op->name() << " cannot build ScopedAllocator.";
return errors::Aborted("Data edge between ", old_op_input, " and ",
old_op->name(),
" cannot build ScopedAllocator.");
}
sac_inputs->push_back(
NodeDefBuilder::NodeOut(old_op_input, 0, dtype));
}
VLOG(3) << "from op " << i << ": " << old_op->name()
<< " sac_inputs append " << old_op_input;
}
}
NodeDefBuilder sac_builder(sac_name, "_ScopedAllocatorConcat");
VLOG(2) << "New sac_name " << sac_name << " shape "
<< sa_shape.DebugString();
sac_builder.Device(device_name);
sac_builder.Attr("sa_name", sa_name);
sac_builder.Attr("id", sa_id);
sac_builder.Attr("T", dtype);
sac_builder.Attr("shape", sa_shape);
sac_builder.Attr("N", static_cast<int>(sac_inputs->size()));
sac_builder.Input(NodeDefBuilder::NodeOut(sa_name, 0, dtype));
sac_builder.Input(*sac_inputs);
NodeDef* sac_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(sac_builder.Finalize(sac_node));
node_map->AddNode(sac_name, sac_node);
node_map->AddOutput(sa_name, sac_name);
for (const auto& ctl_input : sac_ctl_inputs) {
const auto& ctl_edge = ctl_input.first;
const auto& input_name = ctl_input.second;
sac_node->add_input(ctl_edge);
node_map->AddOutput(input_name, sac_node->name());
}
return absl::OkStatus();
}
Status BuildReplacementOp(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const string& device_name, DataType dtype,
const string& op_name, const string& sac_name,
const string& sa_op_name) {
VLOG(2) << "BuildReplacementOp " << sa_op_name;
NodeDefBuilder op_builder(sa_op_name, op_name);
op_builder.Device(device_name);
AttrSlice first_slice(*ops[0]);
for (auto& it : first_slice) {
op_builder.Attr(it.first, it.second);
}
op_builder.Attr("_forward_input", {0, 0});
op_builder.Input(sac_name, 0, dtype);
NodeDef* sa_op_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(op_builder.Finalize(sa_op_node));
node_map->AddNode(sa_op_name, sa_op_node);
node_map->AddOutput(sac_name, sa_op_name);
return absl::OkStatus();
}
Status BuildSplitNode(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const std::vector<TensorShape>& input_shapes,
const std::vector<NodeDefBuilder::NodeOut>& sac_inputs,
const string& device_name, DataType dtype,
const string& op_name, int sa_id,
const string& sas_name, const string& sa_name,
const string& sa_op_name) {
VLOG(2) << "new ScopedAllocatorSplit " << sas_name;
NodeDefBuilder sas_builder(sas_name, "_ScopedAllocatorSplit");
sas_builder.Device(device_name);
sas_builder.Attr("sa_name", sa_name);
sas_builder.Attr("id", sa_id);
sas_builder.Attr("T", dtype);
sas_builder.Attr("shapes", input_shapes);
std::vector<NodeDefBuilder::NodeOut> sas_inputs = sac_inputs;
sas_builder.Attr("N", static_cast<int>(sas_inputs.size()));
sas_builder.Input(NodeDefBuilder::NodeOut(sa_op_name, 0, dtype));
sas_builder.Input(sas_inputs);
NodeDef* sas_node = graph->add_node();
LOG_WARNING_AND_RETURN_IF_ERROR(sas_builder.Finalize(sas_node));
node_map->AddNode(sas_name, sas_node);
node_map->AddOutput(sa_op_name, sas_name);
for (const auto& input : sas_inputs) {
node_map->AddOutput(input.node, sas_name);
}
return absl::OkStatus();
}
Status RewireSubgraph(GraphDef* graph, NodeMap* node_map,
const std::vector<NodeDef*>& ops,
const std::set<string>& op_instance_names,
const string& op_name, const string& sas_name) {
VLOG(2) << "RewireSubgraph";
for (int op_idx = 0, idx_limit = ops.size(); op_idx < idx_limit; ++op_idx) {
NodeDef* old_op = ops[op_idx];
auto output_nodes = node_map->GetOutputs(old_op->name());
VLOG(3) << "old_op " << old_op->name() << " had " << output_nodes.size()
<< " outputs. Moving them to the ScopedAllocatorSplit node.";
if (VLOG_IS_ON(2)) {
for (NodeDef* n : output_nodes) {
VLOG(3) << " output: " << n->name();
}
}
for (NodeDef* n : output_nodes) {
VLOG(3) << "really checking old output " << n->name()
<< " for corresponding input.";
if (op_instance_names.find(n->name()) != op_instance_names.end()) {
VLOG(3) << "Dropping control output from " << old_op->name() << " to "
<< n->name();
Status ignore = RemoveEdge(strings::StrCat("^", old_op->name()),
old_op->name(), n, node_map);
continue;
}
bool found = false;
VLOG(3) << "about to iterate over " << n->input_size() << " inputs";
for (int i = 0; i < n->input_size(); ++i) {
VLOG(3) << "input " << n->input(i);
int position = 0;
string input_node = ParseNodeName(n->input(i), &position);
if (input_node == old_op->name()) {
found = true;
VLOG(3) << "match pos=" << position;
if (position == -1) {
*n->mutable_input(i) = strings::StrCat("^", sas_name);
} else {
CHECK_EQ(0, position)
<< "name " << n->input(i) << " pos " << position;
*n->mutable_input(i) = strings::StrCat(sas_name, ":", op_idx);
}
node_map->UpdateInput(n->name(), old_op->name(), sas_name);
VLOG(3) << "breaking on success";
break;
} else {
VLOG(3) << "other input " << n->input(i);
}
}
VLOG(3) << "before HasOp";
if (!HasOpName(n->name(), op_name)) {
CHECK(found) << "old_op " << old_op->name() << " node "
<< " could not find input edge on " << n->DebugString()
<< " to replace."
<< " " << op_name << " not in " << n->name();
}
VLOG(3) << "bottom of for output_nodes";
}
VLOG(3) << "Clearing all inputs of " << old_op->name();
node_map->RemoveInputs(old_op->name());
old_op->clear_input();
node_map->RemoveOutputs(old_op->name());
VLOG(3) << "after clear: " << old_op->DebugString();
RemoveNode(old_op, graph, node_map);
}
return absl::OkStatus();
}
Status Rewrite(ScopedAllocatorOptimizer* sa_opti, int64_t invocation_count,
GraphDef* graph, const string& op_name,
const std::vector<NodeDef*>& ops, bool* applied) override {
if (VLOG_IS_ON(1)) {
VLOG(1) << "Rewrite";
string op_names;
for (auto& nd : ops) {
strings::StrAppend(&op_names, nd->name(), ", ");
}
VLOG(1) << "UnaryElementwiseRewriter::Rewrite " << op_name
<< " to: " << op_names;
}
NodeMap* node_map = sa_opti->node_map();
std::set<string> op_instance_names;
for (auto& nd : ops) {
op_instance_names.insert(nd->name());
VLOG(2) << "op_instance_name " << nd->name();
}
DataType dtype;
std::vector<TensorShape> input_shapes;
std::vector<InputDesc> inputs;
TensorShape sa_shape;
string device_name;
TF_RETURN_IF_ERROR(AnalyzeInputs(
sa_opti, invocation_count, graph, node_map, ops, op_instance_names,
&device_name, &dtype, &input_shapes, &inputs, &sa_shape));
int sa_id = sa_opti->NewScopedAllocatorId(input_shapes.size());
string sa_name =
strings::StrCat("scoped_allocator_", sa_id, "_", invocation_count);
TF_RETURN_IF_ERROR(ConstructScopedAllocatorNode(
sa_opti, graph, node_map, ops, device_name, dtype, sa_id, sa_name,
input_shapes, inputs, sa_shape));
std::vector<NodeDefBuilder::NodeOut> sac_inputs;
string sac_name = strings::StrCat("scoped_allocator_concat_", sa_id, "_",
invocation_count);
TF_RETURN_IF_ERROR(BuildSAConcatNode(
graph, node_map, ops, op_instance_names, device_name, dtype, sa_id,
sa_name, sac_name, sa_shape, &sac_inputs));
string sa_op_name = strings::StrCat(sa_name, "_", op_name);
TF_RETURN_IF_ERROR(BuildReplacementOp(graph, node_map, ops, device_name,
dtype, op_name, sac_name,
sa_op_name));
string sas_name = strings::StrCat("scoped_allocator_split_", sa_id, "_",
invocation_count);
TF_RETURN_IF_ERROR(BuildSplitNode(graph, node_map, ops, input_shapes,
sac_inputs, device_name, dtype, op_name,
sa_id, sas_name, sa_name, sa_op_name));
TF_RETURN_IF_ERROR(RewireSubgraph(graph, node_map, ops, op_instance_names,
op_name, sas_name));
*applied = true;
return absl::OkStatus();
}
};
ScopedAllocatorOptimizer::ScopedAllocatorOptimizer(
RewriterConfig::Toggle opt_level, const ScopedAllocatorOptions& opts)
: opt_level_(opt_level) {
VLOG(1) << "ScopedAllocatorOptimizer::ScopedAllocatorOptimizer";
Rewriter* r = new UnaryElementwiseRewriter();
to_delete_.push_back(r);
if (opts.enable_op_size() == 0) {
for (const auto& op_name : {"CollectiveReduce"}) {
op_name_set_.insert(op_name);
rewriters_[op_name] = r;
}
} else {
for (const auto& op_name : opts.enable_op()) {
op_name_set_.insert(op_name);
rewriters_[op_name] = r;
}
}
}
Status ScopedAllocatorOptimizer::Optimize(Cluster* ,
const GrapplerItem& item,
GraphDef* optimized_graph) {
VLOG(3) << "Input graph:";
DumpGraphToVLOG(item.graph, 3);
nodes_to_preserve_ = item.NodesToPreserve();
GraphProperties graph_properties(item);
const bool assume_valid_feeds = opt_level_ == RewriterConfig::AGGRESSIVE;
LOG_WARNING_AND_RETURN_IF_ERROR(graph_properties.InferStatically(
assume_valid_feeds, false,
false));
*optimized_graph = item.graph;
node_map_ = std::make_unique<NodeMap>(optimized_graph);
LOG_WARNING_AND_RETURN_IF_ERROR(ScopedAllocatorOptimizer::ProcessGraphDef(
optimized_graph, graph_properties));
VLOG(1) << "ScopedAllocatorOptimizer::Optimize() done";
VLOG(3) << "Optimized graph:";
DumpGraphToVLOG(*optimized_graph, 3);
return absl::OkStatus();
}
ScopedAllocatorOptimizer::Rewriter* ScopedAllocatorOptimizer::GetRewriter(
const string& op_name) {
auto it = rewriters_.find(op_name);
if (it != rewriters_.end()) {
return it->second;
}
return nullptr;
}
int ScopedAllocatorOptimizer::NewScopedAllocatorId(int num_fields) {
CHECK_GT(num_fields, 0);
int id = next_sa_id_;
next_sa_id_ += (num_fields + 1);
CHECK_GT(next_sa_id_, 0);
return id;
}
Status ScopedAllocatorOptimizer::NewIdentityId(int* id) {
*id = next_identity_id_++;
if (next_identity_id_ < 0) {
return errors::Aborted("NewIdentityId overflow");
}
return absl::OkStatus();
}
ScopedAllocatorOptimizer::~ScopedAllocatorOptimizer() {
for (auto ptr : to_delete_) {
delete ptr;
}
}
void ScopedAllocatorOptimizer::FindOpOccurrences(GraphDef* graph,
const OpNameSet& op_names,
GraphOpOccurrences* occs) {
VLOG(1) << "FindOpOccurrences ";
for (const auto& it : op_names) {
VLOG(1) << "search target " << it;
}
for (int ni = 0; ni < graph->node_size(); ++ni) {
NodeDef* node = graph->mutable_node(ni);
const string& op_name = node->op();
if (op_names.find(op_name) != op_names.end()) {
VLOG(1) << "found " << op_name << " on dev " << node->device();
(*occs)[node->device()][op_name].push_back(node);
}
}
}
namespace {
struct OpNameOrder {
bool operator()(const NodeDef* a, const NodeDef* b) {
return a->name() <= b->name();
}
};
class Tree {
public:
Tree(const string& edge, int depth) : edge_(edge), depth_(depth) {}
~Tree() {
for (const auto& it : subtrees_) delete it.second;
}
Tree* GetSubTree(const string& edge) {
auto it = subtrees_.find(edge);
if (it != subtrees_.end()) {
return it->second;
}
Tree* t = new Tree(edge, depth_ + 1);
subtrees_[edge] = t;
return t;
}
void InsertNode(NodeDef* n) { nodes_.push_back(n); }
string edge_;
int depth_;
std::vector<NodeDef*> nodes_;
absl::flat_hash_map<string, Tree*> subtrees_;
};
Status ApplyToAll(Tree* tree, const std::function<Status(Tree*)>& func) {
Status s;
for (const auto& it : tree->subtrees_) {
s = ApplyToAll(it.second, func);
if (!s.ok()) return s;
}
s = func(tree);
return s;
}
Tree* ComputeScopeTree(const string& op_name,
const std::vector<NodeDef*>& node_vec) {
Tree* root = new Tree("", 0);
for (NodeDef* n : node_vec) {
std::vector<string> pieces = str_util::Split(n->name(), "/");
int depth = pieces.size() - 1;
Tree* subtree = root;
for (int i = 0; i < depth; ++i) {
subtree = subtree->GetSubTree(pieces[i]);
}
subtree->InsertNode(n);
}
return root;
}
void PartitionByLoopStructure(const FrameView& frame_view,
std::vector<NodeDef*> nodes,
std::vector<std::vector<NodeDef*>>* loop_groups) {
absl::flat_hash_map<uint64, std::vector<NodeDef*>> loop_sets;
for (NodeDef* nd : nodes) {
uint64 hash = 0;
const std::vector<int>& loop_ids = frame_view.Frames(*nd);
for (int id : loop_ids) {
hash = Hash64Combine(hash, static_cast<uint64>(id));
}
loop_sets[hash].push_back(nd);
}
for (auto it : loop_sets) {
loop_groups->push_back(std::move(it.second));
}
}
void IdentifyRepeatedInputs(const std::vector<NodeDef*>& nodes,
absl::flat_hash_set<string>* seen_outputs,
absl::flat_hash_set<string>* repeated_outputs) {
for (NodeDef* node : nodes) {
for (const auto& input_name : node->input()) {
if (!seen_outputs->insert(input_name).second) {
repeated_outputs->insert(input_name);
}
}
}
}
}
Status ScopedAllocatorOptimizer::ProcessGraphDef(
GraphDef* graph, const GraphProperties& graph_properties) {
static std::atomic<int64_t> invocation_counter(1);
const int64_t invocation_count =
invocation_counter.fetch_add(1, std::memory_order_seq_cst);
VLOG(1) << "ProcessGraphDef " << invocation_count;
Status status;
GraphOpOccurrences occ;
FindOpOccurrences(graph, op_name_set_, &occ);
if (!occ.empty()) {
FrameView frame_view;
LOG_WARNING_AND_RETURN_IF_ERROR(frame_view.InferFromGraph(*graph));
for (auto& dt : occ) {
VLOG(2) << "Processing device " << dt.first;
const DevOpOccurrences& dev_occ = dt.second;
for (auto& it : dev_occ) {
string op_name = it.first;
VLOG(1) << "Processing " << op_name << " set size " << it.second.size();
Rewriter* rewriter = GetRewriter(op_name);
if (!rewriter) {
LOG(ERROR) << "Failed to find Rewriter in ScopedAllocatorOptimizer "
<< "for op_name " << op_name;
continue;
}
rewriter->SetGraphProperties(graph_properties);
std::unique_ptr<Tree> root(ComputeScopeTree(it.first, it.second));
absl::flat_hash_set<string> seen_outputs;
status = ApplyToAll(root.get(), [this, &seen_outputs](Tree* t) {
IdentifyRepeatedInputs(t->nodes_, &seen_outputs, &repeated_outputs_);
return absl::OkStatus();
});
if (!status.ok()) {
break;
}
status = ApplyToAll(root.get(), [this, rewriter, graph, &frame_view,
&op_name, invocation_count](Tree* t) {
VLOG(2) << "applied to tree node " << t->edge_ << " at depth "
<< t->depth_ << " of size " << t->nodes_.size();
if (t->nodes_.size() > 1) {
std::vector<std::vector<NodeDef*>> loop_groups;
PartitionByLoopStructure(frame_view, t->nodes_, &loop_groups);
for (auto& lg : loop_groups) {
if (lg.size() > 1) {
bool applied = false;
Status s = OrderNodeSet(&lg);
TF_RETURN_IF_ERROR(s);
VLOG(1) << "Applying Rewriter for " << op_name;
s = rewriter->Rewrite(this, invocation_count, graph, op_name,
lg, &applied);
LOG_WARNING_AND_RETURN_IF_ERROR(s);
}
}
}
return absl::OkStatus();
});
if (!status.ok()) {
break;
}
}
if (!status.ok()) {
break;
}
}
}
VLOG(1) << "ScopedAllocatorOptimizer returning " << status;
if (!status.ok()) {
LOG(ERROR) << "ScopedAllocatorOptimizer: " << status;
}
return status;
}
namespace {
struct InstanceKeyLess {
bool operator()(const NodeDef* a, const NodeDef* b) const {
AttrSlice a_attrs = AttrSlice(*a);
AttrSlice b_attrs = AttrSlice(*b);
int32_t a_key = -1;
int32_t b_key = -1;
Status s = GetNodeAttr(a_attrs, "instance_key", &a_key);
CHECK(s.ok());
s = GetNodeAttr(b_attrs, "instance_key", &b_key);
CHECK(s.ok());
return a_key < b_key;
}
};
struct NameLess {
bool operator()(const NodeDef* a, const NodeDef* b) const {
return a->name() < b->name();
}
};
bool IsCollectiveNode(const NodeDef& n) {
AttrSlice attrs = AttrSlice(n);
int key = -1;
if (!IsCollective(n)) return false;
Status s = GetNodeAttr(attrs, "instance_key", &key);
if (s.ok() && key >= 0) {
return true;
}
return false;
}
}
Status ScopedAllocatorOptimizer::OrderNodeSet(
std::vector<NodeDef*>* nodes) const {
if (nodes->size() <= 1) return absl::OkStatus();
if (IsCollectiveNode(*nodes->at(0))) {
std::sort(nodes->begin(), nodes->end(), InstanceKeyLess());
} else {
std::sort(nodes->begin(), nodes->end(), NameLess());
}
return absl::OkStatus();
}
}
}
#undef LOG_WARNING_AND_RETURN_IF_ERROR | #include "tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.h"
#include <unordered_set>
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/graph/testlib.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/config.pb.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace grappler {
namespace {
class ScopedAllocatorOptimizerTest : public ::testing::Test {
public:
std::unique_ptr<Session> CreateSession(const GraphDef& graph,
const ConfigProto& config) {
SessionOptions options;
options.config = config;
(*options.config.mutable_device_count())["CPU"] = 2;
Session* session = NewSession(options);
TF_CHECK_OK(session->Create(graph));
return std::unique_ptr<Session>(session);
}
std::vector<Tensor> EvaluateNodes(const GraphDef& graph,
const std::vector<string>& fetch) {
SessionOptions options;
std::unique_ptr<Session> session(NewSession(options));
TF_CHECK_OK(session->Create(graph));
RunOptions run_options;
std::vector<Tensor> output_tensors;
TF_CHECK_OK(
session->Run(run_options, {}, fetch, fetch, &output_tensors, nullptr));
TF_CHECK_OK(session->Close());
return output_tensors;
}
void BuildAbsGraph(GraphDef* graph_def, bool forward) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a =
ops::Const<float>(s.WithOpName("a"), {1.0, 0.0, 0.0, -1.0}, {2, 2});
Output b =
ops::Const<float>(s.WithOpName("b"), {1.0, -2.0, 3.0, 4.0}, {2, 2});
Output c =
ops::Const<float>(s.WithOpName("c"), {-5.0, -2.0, 0.0, -2.0}, {2, 2});
Output s1 = ops::Add(s.WithOpName("s1"), a, b);
Output s2 = ops::Add(s.WithOpName("s2"), b, c);
Output int1, int2;
if (forward) {
int1 = ops::Identity(s.WithOpName("i1"), s1);
int2 = ops::Identity(s.WithOpName("i2"), s2);
} else {
int1 = s1;
int2 = s2;
}
Output a1 = ops::Abs(s.WithOpName("a1"), int1);
Output a2 = ops::Abs(s.WithOpName("a2"), int2);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildAbsGraphWithInputDependencies(GraphDef* graph_def) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output b = ops::Placeholder(s.WithOpName("b"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output c = ops::Placeholder(s.WithOpName("c"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output s1 = ops::Add(s.WithOpName("s1"), b, c);
Output a1 = ops::Abs(s.WithOpName("a1"), a);
Output a2 = ops::Abs(s.WithOpName("a2"), b);
Output a3 = ops::Abs(s.WithOpName("a3"), s1);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
Output r3 = ops::Reshape(s.WithOpName("r3"), a3, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildAbsGraphWithInputAndOutputControlEdges(GraphDef* graph_def) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Placeholder(s.WithOpName("a"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output b = ops::Placeholder(s.WithOpName("b"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output ctl1 = ops::Placeholder(s.WithOpName("ctl1"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output ctl2 = ops::Placeholder(s.WithOpName("ctl2"), DT_FLOAT,
ops::Placeholder::Shape({2, 2}));
Output a1 = ops::Abs(s.WithOpName("a1").WithControlDependencies({ctl1}), a);
Output a2 = ops::Abs(s.WithOpName("a2").WithControlDependencies({ctl2}), b);
Output o1 = ops::Reshape(s.WithOpName("o1"), a1, {1, 4});
Output o2 = ops::Reshape(s.WithOpName("o2"), a2, {4, 1});
Output ctl3 =
ops::Const<float>(s.WithOpName("ctl3").WithControlDependencies({a1}),
{0.0, 0.0, 0.0, 0.0}, {2, 2});
Output ctl4 =
ops::Const<float>(s.WithOpName("ctl4").WithControlDependencies({a2}),
{0.0, 0.0, 0.0, 0.0}, {2, 2});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void BuildGraphWithMultipleScopes(GraphDef* graph_def) {
Scope root_scope = Scope::NewRootScope();
root_scope =
root_scope.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output a = ops::Const<float>(root_scope.WithOpName("a"),
{1.0, 0.0, 0.0, -1.0}, {2, 2});
Output b = ops::Const<float>(root_scope.WithOpName("b"),
{1.0, -2.0, 3.0, 4.0}, {2, 2});
Output c = ops::Const<float>(root_scope.WithOpName("c"),
{-5.0, -2.0, 0.0, -2.0}, {2, 2});
Output s1 = ops::Add(root_scope.WithOpName("s1"), a, b);
Output s2 = ops::Add(root_scope.WithOpName("s2"), b, c);
Output a1 = ops::Abs(root_scope.WithOpName("a1"), s1);
Output a2 = ops::Abs(root_scope.WithOpName("a2"), s2);
Output r1 = ops::Reshape(root_scope.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(root_scope.WithOpName("r2"), a2, {4, 1});
Scope sub_scope = root_scope.NewSubScope("sub");
Output s3 = ops::Add(sub_scope.WithOpName("s3"), a, b);
Output a3 = ops::Abs(sub_scope.WithOpName("a3"), s3);
Output a4 = ops::Abs(sub_scope.WithOpName("a4"), s2);
Output r3 = ops::Reshape(sub_scope.WithOpName("r3"), a3, {1, 4});
Output r4 = ops::Reshape(sub_scope.WithOpName("r4"), a4, {4, 1});
TF_CHECK_OK(root_scope.ToGraphDef(graph_def));
}
void BuildConstGraph(GraphDef* graph_def, bool forward) {
Scope s = Scope::NewRootScope();
s = s.WithDevice("/job:localhost/replica:0/task:0/device:CPU:0");
Output c1 =
ops::Const<float>(s.WithOpName("c1"), {1.0, 0.0, 0.0, -1.0}, {2, 2});
Output c2 =
ops::Const<float>(s.WithOpName("c2"), {1.0, -2.0, 3.0, 4.0}, {2, 2});
Output a1 = ops::Abs(s.WithOpName("a1"), c1);
Output a2 = ops::Abs(s.WithOpName("a2"), c2);
Output r1 = ops::Reshape(s.WithOpName("r1"), a1, {1, 4});
Output r2 = ops::Reshape(s.WithOpName("r2"), a2, {4, 1});
TF_CHECK_OK(s.ToGraphDef(graph_def));
}
void SetShapes(GraphDef* graph_def) {
TensorShapeProto shape_proto;
shape_proto.add_dim()->set_size(2);
shape_proto.add_dim()->set_size(2);
for (NodeDef& n : *graph_def->mutable_node()) {
if (n.op() == "Add" || n.op() == "Abs") {
AddNodeAttr("_output_shapes", {shape_proto}, &n);
}
}
}
void ExecuteGraph(const GraphDef& graph_def,
const std::vector<string>& output_names,
std::vector<Tensor>* outputs) {
ConfigProto config;
GraphOptions* gopt = config.mutable_graph_options();
OptimizerOptions* opts = gopt->mutable_optimizer_options();
opts->set_do_common_subexpression_elimination(false);
opts->set_do_constant_folding(false);
opts->set_do_function_inlining(false);
opts->set_opt_level(OptimizerOptions::L0);
RewriterConfig* rwcfg = gopt->mutable_rewrite_options();
rwcfg->clear_optimizers();
(*rwcfg->add_optimizers()) = "scoped_allocator";
rwcfg->mutable_scoped_allocator_opts()->add_enable_op("Abs");
std::unique_ptr<Session> session(CreateSession(graph_def, config));
std::vector<std::pair<string, Tensor>> inputs;
std::vector<string> target_nodes = {};
Status s = session->Run(inputs, output_names, target_nodes, outputs);
TF_ASSERT_OK(s);
ASSERT_EQ(outputs->size(), output_names.size());
}
void ValidateValues(const std::vector<Tensor>& outputs,
const std::vector<std::vector<float>>& expected) {
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(expected[i].size(), outputs[i].NumElements());
for (int j = 0; j < expected[i].size(); ++j) {
EXPECT_EQ(expected[i][j], outputs[i].flat<float>()(j));
}
}
}
void GetNode(NodeMap* node_map, const string& node_name, NodeDef** node_def) {
*node_def = node_map->GetNode(node_name);
ASSERT_TRUE(*node_def);
}
NodeDef* ValidateSAControlInput(GraphDef* graph, NodeMap* node_map,
const string& node_name) {
NodeDef* node = nullptr;
GetNode(node_map, node_name, &node);
int num_control_inputs = 0;
string control_input_name;
for (const auto& input : node->input()) {
if (IsControlInput(input)) {
++num_control_inputs;
control_input_name = input;
}
}
EXPECT_EQ(num_control_inputs, 1);
NodeDef* control_input_node = nullptr;
GetNode(node_map, control_input_name, &control_input_node);
EXPECT_EQ(control_input_node->op(), "_ScopedAllocator");
return control_input_node;
}
int NumControlInputs(NodeMap* node_map, const string& node_name) {
NodeDef* node = nullptr;
GetNode(node_map, node_name, &node);
int num_control_inputs = 0;
for (const auto& input : node->input()) {
if (IsControlInput(input)) {
++num_control_inputs;
}
}
return num_control_inputs;
}
};
#ifndef ENABLE_MKL
TEST_F(ScopedAllocatorOptimizerTest, UnaryRewriteOnly) {
GrapplerItem item;
BuildAbsGraph(&item.graph, false);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Abs");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr , item, &optimized_graph));
NodeMap node_map(&optimized_graph);
NodeDef* nd = nullptr;
GetNode(&node_map, "scoped_allocator_1_1", &nd);
{
auto& nd_set = node_map.GetOutputs(nd->name());
ASSERT_EQ(3, nd_set.size());
std::unordered_set<string> expected = {"scoped_allocator_concat_1_1", "s1",
"s2"};
for (auto it : nd_set) {
ASSERT_NE(expected.find(it->name()), expected.end())
<< "Failed to find " << it->name();
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_concat_1_1");
ASSERT_EQ(1, nd_set.size());
for (auto it : nd_set) {
ASSERT_EQ("scoped_allocator_1_1_Abs", it->name());
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_1_1_Abs");
ASSERT_EQ(1, nd_set.size());
for (auto it : nd_set) {
ASSERT_EQ("scoped_allocator_split_1_1", it->name());
}
}
{
auto& nd_set = node_map.GetOutputs("scoped_allocator_split_1_1");
ASSERT_EQ(2, nd_set.size());
std::unordered_set<string> name_set;
for (auto it : nd_set) {
name_set.insert(it->name());
}
ASSERT_TRUE(name_set.find("r1") != name_set.end());
ASSERT_TRUE(name_set.find("r2") != name_set.end());
}
}
TEST_F(ScopedAllocatorOptimizerTest, UnaryExecute) {
GraphDef graph_def;
BuildAbsGraph(&graph_def, false);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def,
{"r1:0", "r2:0"}, &outputs);
ValidateValues(outputs, {{2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, MultipleScopes) {
GraphDef graph_def;
BuildGraphWithMultipleScopes(&graph_def);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def,
{"r1:0", "r2:0", "sub/r3:0", "sub/r4:0"},
&outputs);
ValidateValues(
outputs,
{{2, 2, 3, 3}, {4, 4, 3, 2}, {2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, Extend) {
NodeDef nd;
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {0, 2}, &nd);
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {6, 7}, &nd);
ScopedAllocatorOptimizer::ExtendNodeAttr("_scoped_allocator", {2, 3}, &nd);
VLOG(0) << "nd: " << nd.DebugString();
std::vector<int> scoped_allocator_attrs;
AttrSlice slice(nd);
Status sa_status =
GetNodeAttr(slice, "_scoped_allocator", &scoped_allocator_attrs);
for (int i : scoped_allocator_attrs) {
VLOG(0) << "extracted: " << i;
}
NodeDef nd2;
AddNodeAttr("_scoped_allocator", {0, 2}, &nd2);
AddNodeAttr("_scoped_allocator", {6, 7}, &nd2);
AddNodeAttr("_scoped_allocator", {2, 3}, &nd2);
VLOG(0) << "nd2: " << nd2.DebugString();
}
TEST_F(ScopedAllocatorOptimizerTest, ForwardInputToOutput) {
GraphDef graph_def;
BuildAbsGraph(&graph_def, true);
SetShapes(&graph_def);
std::vector<Tensor> outputs;
ExecuteGraph(graph_def, {"r1:0", "r2:0"}, &outputs);
ValidateValues(outputs, {{2, 2, 3, 3}, {4, 4, 3, 2}});
}
TEST_F(ScopedAllocatorOptimizerTest, InputDependencies) {
GrapplerItem item;
BuildAbsGraphWithInputDependencies(&item.graph);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Add");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr, item, &optimized_graph));
NodeMap node_map(&optimized_graph);
NodeDef* scoped_allocator_node =
ValidateSAControlInput(&optimized_graph, &node_map, "a");
VLOG(1) << scoped_allocator_node->DebugString();
EXPECT_TRUE(ValidateSAControlInput(&optimized_graph, &node_map, "b"));
EXPECT_TRUE(ValidateSAControlInput(&optimized_graph, &node_map, "s1"));
EXPECT_EQ(scoped_allocator_node->input_size(), 1);
EXPECT_EQ(scoped_allocator_node->input(0), "^c");
}
TEST_F(ScopedAllocatorOptimizerTest, ControlEdgeRewire) {
GrapplerItem item;
BuildAbsGraphWithInputAndOutputControlEdges(&item.graph);
SetShapes(&item.graph);
LOG(INFO) << item.graph.DebugString();
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Const");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr, item, &optimized_graph));
TF_ASSERT_OK(TopologicalSort(&optimized_graph));
NodeMap node_map(&optimized_graph);
LOG(INFO) << optimized_graph.DebugString();
NodeDef* ctl1 = nullptr;
GetNode(&node_map, "ctl1", &ctl1);
const auto& ctl1_outputs = node_map.GetOutputs("ctl1");
EXPECT_EQ(ctl1_outputs.size(), 1);
NodeDef* sa_concat = *ctl1_outputs.begin();
EXPECT_EQ(sa_concat->op(), "_ScopedAllocatorConcat");
NodeDef* ctl2 = nullptr;
GetNode(&node_map, "ctl2", &ctl2);
const auto& ctl2_outputs = node_map.GetOutputs("ctl2");
EXPECT_EQ(ctl2_outputs.size(), 1);
EXPECT_EQ(*ctl2_outputs.begin(), sa_concat);
EXPECT_EQ(NumControlInputs(&node_map, sa_concat->name()), 2);
const auto& sa_concat_outputs = node_map.GetOutputs(sa_concat->name());
EXPECT_EQ(sa_concat_outputs.size(), 1);
NodeDef* fused_abs = *sa_concat_outputs.begin();
EXPECT_EQ(NumControlInputs(&node_map, fused_abs->name()), 0);
const auto& fused_abs_outputs = node_map.GetOutputs(fused_abs->name());
EXPECT_EQ(fused_abs_outputs.size(), 1);
NodeDef* sa_split = *fused_abs_outputs.begin();
EXPECT_EQ(NumControlOutputs(*sa_split, node_map), 2);
EXPECT_EQ(NumControlInputs(&node_map, "ctl3"), 1);
EXPECT_EQ(NumControlInputs(&node_map, "ctl4"), 1);
}
TEST_F(ScopedAllocatorOptimizerTest, ConstInput) {
GrapplerItem item;
BuildConstGraph(&item.graph, false);
SetShapes(&item.graph);
ScopedAllocatorOptions opts;
opts.add_enable_op("Abs");
ScopedAllocatorOptimizer sao(RewriterConfig::ON, opts);
ScopedAllocatorOptimizer::OpNameSet ons;
ons.insert("Abs");
GraphDef optimized_graph;
TF_ASSERT_OK(sao.Optimize(nullptr , item, &optimized_graph));
const NodeDef* sa_node = nullptr;
for (const NodeDef& node : optimized_graph.node()) {
if (node.op() == "_ScopedAllocator") {
sa_node = &node;
break;
}
}
ASSERT_NE(sa_node, nullptr);
int num_identity_ops = 0;
NodeMap node_map(&optimized_graph);
for (NodeDef* sa_output : node_map.GetOutputs(sa_node->name())) {
EXPECT_FALSE(IsConstant(*sa_output));
if (IsIdentity(*sa_output)) {
++num_identity_ops;
}
}
EXPECT_EQ(num_identity_ops, 2);
}
#endif
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/scoped_allocator_optimizer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
81a66a6c-8535-493f-811b-5d2d96798c77 | cpp | tensorflow/tensorflow | bfloat16 | tensorflow/core/framework/bfloat16.cc | tensorflow/core/framework/bfloat16_test.cc | #include "tensorflow/core/framework/bfloat16.h"
#include "Eigen/Core"
namespace tensorflow {
void RoundFloatToBFloat16(const float* src, bfloat16* dst, int64_t size) {
Eigen::Map<const Eigen::ArrayXf> src_eigen(src, size);
Eigen::Map<Eigen::Array<bfloat16, Eigen::Dynamic, 1>> dst_eigen(dst, size);
dst_eigen = src_eigen.cast<bfloat16>();
}
void FloatToBFloat16(const float* src, bfloat16* dst, int64_t size) {
for (; size != 0; src++, dst++, size--) {
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
memcpy(dst, src, sizeof(bfloat16));
#else
memcpy(
dst,
reinterpret_cast<const char*>(src) + sizeof(float) - sizeof(bfloat16),
sizeof(bfloat16));
#endif
}
}
void BFloat16ToFloat(const bfloat16* src, float* dst, int64_t size) {
Eigen::Map<const Eigen::Array<bfloat16, Eigen::Dynamic, 1>> src_eigen(src,
size);
Eigen::Map<Eigen::ArrayXf> dst_eigen(dst, size);
dst_eigen = src_eigen.cast<float>();
}
} | #include "tensorflow/core/framework/bfloat16.h"
#include "absl/base/casts.h"
#include "tensorflow/core/framework/numeric_types.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
TEST(Bfloat16Test, Conversion) {
float a[100];
for (int i = 0; i < 100; ++i) {
a[i] = i + 1.25;
}
bfloat16 b[100];
float c[100];
FloatToBFloat16(a, b, 100);
BFloat16ToFloat(b, c, 100);
for (int i = 0; i < 100; ++i) {
EXPECT_LE(fabs(c[i] - a[i]) / a[i], 1.0 / 128);
}
}
void BM_FloatToBFloat16(::testing::benchmark::State& state) {
static const int N = 32 << 20;
float* inp = new float[N];
bfloat16* out = new bfloat16[N];
for (auto s : state) {
FloatToBFloat16(inp, out, N);
}
const int64_t tot = static_cast<int64_t>(state.iterations()) * N;
state.SetItemsProcessed(tot);
state.SetBytesProcessed(tot * (sizeof(float) + sizeof(bfloat16)));
delete[] inp;
delete[] out;
}
BENCHMARK(BM_FloatToBFloat16);
void BM_RoundFloatToBFloat16(::testing::benchmark::State& state) {
static const int N = 32 << 20;
float* inp = new float[N];
bfloat16* out = new bfloat16[N];
for (auto s : state) {
RoundFloatToBFloat16(inp, out, N);
tensorflow::testing::DoNotOptimize(inp);
tensorflow::testing::DoNotOptimize(out);
}
const int64_t tot = static_cast<int64_t>(state.iterations()) * N;
state.SetItemsProcessed(tot);
state.SetBytesProcessed(tot * (sizeof(float) + sizeof(bfloat16)));
delete[] inp;
delete[] out;
}
BENCHMARK(BM_RoundFloatToBFloat16);
void BM_BFloat16ToFloat(::testing::benchmark::State& state) {
static const int N = 32 << 20;
bfloat16* inp = new bfloat16[N];
float* out = new float[N];
for (auto s : state) {
BFloat16ToFloat(inp, out, N);
}
const int64_t tot = static_cast<int64_t>(state.iterations()) * N;
state.SetItemsProcessed(tot);
state.SetBytesProcessed(tot * (sizeof(float) + sizeof(bfloat16)));
delete[] inp;
delete[] out;
}
BENCHMARK(BM_BFloat16ToFloat);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/bfloat16.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/bfloat16_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d7563ad5-4d68-4de2-a7be-a79d148bcbfa | cpp | google/tsl | integral_types | tsl/platform/default/integral_types.h | tsl/platform/integral_types_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
#define TENSORFLOW_TSL_PLATFORM_DEFAULT_INTEGRAL_TYPES_H_
#include <cstdint>
namespace tsl {
typedef signed char int8;
typedef short int16;
typedef int int32;
typedef ::std::int64_t int64;
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned int uint32;
typedef std::uint64_t uint64;
}
#endif | #include "tsl/platform/test.h"
#include "tsl/platform/types.h"
namespace tsl {
namespace {
TEST(IntegralTypes, Basic) {
EXPECT_EQ(1, sizeof(int8));
EXPECT_EQ(2, sizeof(int16));
EXPECT_EQ(4, sizeof(int32));
EXPECT_EQ(8, sizeof(int64_t));
EXPECT_EQ(1, sizeof(uint8));
EXPECT_EQ(2, sizeof(uint16));
EXPECT_EQ(4, sizeof(uint32));
EXPECT_EQ(8, sizeof(uint64));
}
TEST(IntegralTypes, MinAndMaxConstants) {
EXPECT_EQ(static_cast<uint8>(kint8min), static_cast<uint8>(kint8max) + 1);
EXPECT_EQ(static_cast<uint16>(kint16min), static_cast<uint16>(kint16max) + 1);
EXPECT_EQ(static_cast<uint32>(kint32min), static_cast<uint32>(kint32max) + 1);
EXPECT_EQ(static_cast<uint64>(kint64min), static_cast<uint64>(kint64max) + 1);
EXPECT_EQ(0, static_cast<uint8>(kuint8max + 1));
EXPECT_EQ(0, static_cast<uint16>(kuint16max + 1));
EXPECT_EQ(0, static_cast<uint32>(kuint32max + 1));
EXPECT_EQ(0, static_cast<uint64>(kuint64max + 1));
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/default/integral_types.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/integral_types_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
64710183-ea4a-40de-abc1-7e74867a301d | cpp | google/libaddressinput | language | cpp/src/language.cc | cpp/test/language_test.cc | #include "language.h"
#include <algorithm>
#include <cctype>
#include <string>
#include <vector>
#include "rule.h"
#include "util/string_split.h"
namespace i18n {
namespace addressinput {
Language::Language(const std::string& language_tag) : tag(language_tag),
base(),
has_latin_script(false) {
static const char kSubtagsSeparator = '-';
static const char kAlternativeSubtagsSeparator = '_';
std::replace(
tag.begin(), tag.end(), kAlternativeSubtagsSeparator, kSubtagsSeparator);
std::string lowercase = tag;
std::transform(
lowercase.begin(), lowercase.end(), lowercase.begin(), tolower);
base = lowercase.substr(0, lowercase.find(kSubtagsSeparator));
static const char kLowercaseLatinScript[] = "latn";
std::vector<std::string> subtags;
SplitString(lowercase, kSubtagsSeparator, &subtags);
has_latin_script =
(subtags.size() > 1 && subtags[1] == kLowercaseLatinScript) ||
(subtags.size() > 2 && subtags[2] == kLowercaseLatinScript);
}
Language::~Language() = default;
Language ChooseBestAddressLanguage(const Rule& address_region_rule,
const Language& ui_language) {
if (address_region_rule.GetLanguages().empty()) {
return ui_language;
}
std::vector<Language> available_languages;
for (const auto& language_tag : address_region_rule.GetLanguages()) {
available_languages.emplace_back(language_tag);
}
if (ui_language.tag.empty()) {
return available_languages.front();
}
bool has_latin_format = !address_region_rule.GetLatinFormat().empty();
static const char kLatinScriptSuffix[] = "-Latn";
Language latin_script_language(
available_languages.front().base + kLatinScriptSuffix);
if (has_latin_format && ui_language.has_latin_script) {
return latin_script_language;
}
for (const auto& language : available_languages) {
if (ui_language.base == language.base) {
return language;
}
}
return has_latin_format ? latin_script_language : available_languages.front();
}
}
} | #include "language.h"
#include <string>
#include <gtest/gtest.h>
namespace {
using i18n::addressinput::Language;
struct LanguageTestCase {
LanguageTestCase(const std::string& input_language_tag,
const std::string& expected_language_tag,
const std::string& expected_base_language,
bool expected_has_latin_script)
: input_language_tag(input_language_tag),
expected_language_tag(expected_language_tag),
expected_base_language(expected_base_language),
expected_has_latin_script(expected_has_latin_script) {}
~LanguageTestCase() = default;
const std::string input_language_tag;
const std::string expected_language_tag;
const std::string expected_base_language;
const bool expected_has_latin_script;
};
class LanguageTest : public testing::TestWithParam<LanguageTestCase> {
public:
LanguageTest(const LanguageTest&) = delete;
LanguageTest& operator=(const LanguageTest&) = delete;
protected:
LanguageTest() = default;
};
TEST_P(LanguageTest, ExtractedDataIsCorrect) {
Language language(GetParam().input_language_tag);
EXPECT_EQ(GetParam().expected_language_tag, language.tag);
EXPECT_EQ(GetParam().expected_base_language, language.base);
EXPECT_EQ(GetParam().expected_has_latin_script, language.has_latin_script);
}
INSTANTIATE_TEST_SUITE_P(
LanguageTestCases, LanguageTest,
testing::Values(LanguageTestCase("", "", "", false),
LanguageTestCase("en", "en", "en", false),
LanguageTestCase("zh-Latn-CN", "zh-Latn-CN", "zh", true),
LanguageTestCase("zh-cmn-Latn-CN", "zh-cmn-Latn-CN", "zh",
true),
LanguageTestCase("zh-Hans", "zh-Hans", "zh", false),
LanguageTestCase("en_GB", "en-GB", "en", false)));
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/language.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/language_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
e4cffad6-7c59-4630-b97b-acf78e9c302a | cpp | tensorflow/tensorflow | stablehlo_multiply | tensorflow/lite/kernels/stablehlo_multiply.cc | tensorflow/lite/kernels/stablehlo_multiply_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_elementwise.h"
namespace tflite::ops::builtin {
TfLiteRegistration* Register_STABLEHLO_MULTIPLY() {
static TfLiteRegistration r = {nullptr, nullptr, ElementwisePrepare,
ElementwiseEval<ComputationType::kMul>};
return &r;
}
} | #include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
class MultiplyOpModel : public SingleOpModel {
public:
MultiplyOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_STABLEHLO_MULTIPLY, BuiltinOptions_NONE, 0);
SetBypassDefaultDelegates();
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
std::vector<float> GetOutput() { return ExtractVector<float>(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(StablehloElementwise, MultiplyWorks) {
MultiplyOpModel model({TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {1, 2, 2, 1}},
{TensorType_FLOAT32, {}});
model.PopulateTensor<float>(model.input1(), {1.2, 2.5, -1.2, 1});
model.PopulateTensor<float>(model.input2(), {0.1, 3, 2, 0.5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::vector<float> expected_values = {0.12, 7.5, -2.4, 0.5};
std::vector<float> actual_values = model.GetOutput();
ASSERT_EQ(actual_values.size(), expected_values.size());
for (int idx = 0; idx < expected_values.size(); ++idx) {
ASSERT_NEAR(actual_values[idx], expected_values[idx], 1e-6);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_multiply.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_multiply_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2cc773d3-be6d-4d81-99e4-e20270add2d0 | cpp | tensorflow/tensorflow | common_subgraph_elimination | tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc | tensorflow/core/grappler/optimizers/common_subgraph_elimination_test.cc | #include "tensorflow/core/grappler/optimizers/common_subgraph_elimination.h"
#include <set>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/grappler/graph_topology_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/graph_optimizer.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/canonicalizer.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/grappler/utils/traversal.h"
#include "tensorflow/core/lib/gtl/flatset.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/hash.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/platform/stringpiece.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace grappler {
class Cluster;
}
}
using tensorflow::strings::StrCat;
namespace tensorflow {
namespace grappler {
class UniqueNodes {
public:
NodeDef* FindOrAddRepresentative(NodeDef* node) {
uint64 sig = ComputeSignature(*node);
std::vector<NodeDef*>& candidates = rep_[sig];
for (auto& candidate : candidates) {
if ((candidate == node) || SameNode(*candidate, *node)) {
return candidate;
}
}
candidates.push_back(node);
return node;
}
void RemoveRepresentative(NodeDef* node) {
auto it = memoized_signatures_.find(node);
if (it == memoized_signatures_.end()) return;
std::vector<NodeDef*>& candidates = rep_[it->second];
for (int i = 0, end = candidates.size(); i < end; ++i) {
if (candidates[i] == node) {
std::swap(candidates[i], candidates[candidates.size() - 1]);
candidates.resize(candidates.size() - 1);
break;
}
}
memoized_signatures_.erase(node);
}
private:
uint64 ComputeSignature(const NodeDef& node);
bool SameNode(const NodeDef& node1, const NodeDef& node2) const;
absl::flat_hash_map<uint64, std::vector<NodeDef*>> rep_;
absl::flat_hash_map<const NodeDef*, uint64> memoized_signatures_;
};
uint64 UniqueNodes::ComputeSignature(const NodeDef& node) {
auto it = memoized_signatures_.find(&node);
if (it != memoized_signatures_.end()) return it->second;
uint64 h = Hash64(node.op());
h = Hash64Combine(Hash64(node.device()), h);
for (const auto& input : node.input()) {
const TensorId input_tensor = ParseTensorName(input);
uint64 input_hash = Hash64Combine(
Hash64(input_tensor.node().data(), input_tensor.node().size()),
std::hash<int>()(input_tensor.index()));
h = Hash64CombineUnordered(input_hash, h);
}
for (const auto& attr : node.attr()) {
uint64 attr_hash =
Hash64Combine(Hash64(attr.first), FastAttrValueHash(attr.second));
h = Hash64CombineUnordered(attr_hash, h);
}
memoized_signatures_.emplace(&node, h);
return h;
}
bool UniqueNodes::SameNode(const NodeDef& node1, const NodeDef& node2) const {
if (node1.op() != node2.op()) {
return false;
}
if (node1.device() != node2.device()) {
return false;
}
if (node1.input_size() != node2.input_size()) {
return false;
}
if (node1.attr_size() != node2.attr_size()) {
return false;
}
auto it1 = node1.input().begin();
auto it2 = node2.input().begin();
for (; it1 != node1.input().end(); ++it1, ++it2) {
if (*it1 != *it2) return false;
}
for (const auto& attr1 : node1.attr()) {
auto it = node2.attr().find(attr1.first);
if (it == node2.attr().end()) return false;
if (!AreAttrValuesEqual(attr1.second, it->second,
true)) {
return false;
}
}
return true;
}
bool CommonSubgraphElimination::CanDedup(const NodeDef& node) const {
if (nodes_to_preserve_.find(node.name()) != nodes_to_preserve_.end()) {
return false;
}
if (IsEnter(node) || IsExit(node)) {
return false;
}
if (node.device().find("SPU") != string::npos) {
return false;
}
if (IsAssert(node) || IsPrint(node)) {
return true;
}
return IsFreeOfSideEffect(node);
}
Status CommonSubgraphElimination::DedupComputations(GraphDef* optimized_graph) {
CanonicalizeGraph(optimized_graph);
GraphTopologyView graph_view;
if (!graph_view.InitializeFromGraph(*optimized_graph).ok()) {
LOG(WARNING) << "Failed to initialize GraphTopologyView.";
return absl::OkStatus();
}
absl::flat_hash_set<const NodeDef*> feeds_inplace_op;
for (int i = 0; i < optimized_graph->node_size(); ++i) {
const NodeDef& root = optimized_graph->node(i);
if (feeds_inplace_op.find(&root) != feeds_inplace_op.end()) continue;
if (ModifiesInputsInPlace(root)) {
const auto is_continue_traversal = [&](const NodeDef* node) -> bool {
return node->op() == root.op() || !NeverForwardsInputs(*node);
};
DfsTraversal(graph_view, {&root}, TraversalDirection::kFollowInputs,
DfsPredicates::Advance(is_continue_traversal),
DfsCallbacks::PreOrder([&](const NodeDef* node) {
feeds_inplace_op.insert(node);
}));
}
}
std::vector<bool> can_dedup(optimized_graph->node_size());
for (int i = 0; i < optimized_graph->node_size(); ++i) {
const NodeDef& node = optimized_graph->node(i);
can_dedup[i] = (feeds_inplace_op.find(&node) == feeds_inplace_op.end()) &&
CanDedup(node);
}
bool stop = true;
std::set<int> duplicates;
UniqueNodes nodes;
NodeMap node_map(optimized_graph);
do {
stop = true;
for (int i = 0; i < optimized_graph->node_size(); ++i) {
if (!can_dedup[i] || duplicates.find(i) != duplicates.end()) {
continue;
}
NodeDef* node = optimized_graph->mutable_node(i);
NodeDef* rep = nodes.FindOrAddRepresentative(node);
if (rep == node) {
continue;
}
const auto fanouts = node_map.GetOutputs(node->name());
for (NodeDef* fanout : fanouts) {
bool updated_fanout = false;
for (int i = 0; i < fanout->input_size(); ++i) {
string* fanout_input = fanout->mutable_input(i);
const int position =
NodePositionIfSameNode(*fanout_input, node->name());
if (position < -1) {
continue;
} else {
if (!updated_fanout) {
nodes.RemoveRepresentative(fanout);
}
updated_fanout = true;
if (position > 0) {
*fanout_input = StrCat(rep->name(), ":", position);
} else if (position == 0) {
*fanout_input = rep->name();
} else {
*fanout_input = StrCat("^", rep->name());
}
}
}
if (updated_fanout) {
node_map.UpdateInput(fanout->name(), node->name(), rep->name());
CanonicalizeNode(fanout);
}
}
if (fetch_nodes_known_) {
node->Clear();
}
duplicates.insert(i);
stop = false;
}
} while (!stop);
if (fetch_nodes_known_ && !duplicates.empty()) {
EraseNodesFromGraph(duplicates, optimized_graph);
}
return absl::OkStatus();
}
Status CommonSubgraphElimination::Optimize(Cluster* ,
const GrapplerItem& item,
GraphDef* optimized_graph) {
nodes_to_preserve_ = item.NodesToPreserve();
fetch_nodes_known_ = !item.fetch.empty();
*optimized_graph = item.graph;
TF_RETURN_IF_ERROR(TopologicalSort(optimized_graph));
GRAPPLER_RETURN_IF_DEADLINE_EXCEEDED();
return DedupComputations(optimized_graph);
}
}
} | #include "tensorflow/core/grappler/optimizers/common_subgraph_elimination.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/math_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/grappler/optimizers/arithmetic_optimizer_test_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
void VerifyGraphsMatch(const GraphDef& original_graph,
const GraphDef& optimized_graph, int line) {
EXPECT_EQ(original_graph.node_size(), optimized_graph.node_size()) << line;
for (int i = 0; i < original_graph.node_size(); ++i) {
const NodeDef& original = original_graph.node(i);
const NodeDef& optimized = optimized_graph.node(i);
EXPECT_EQ(original.name(), optimized.name()) << line;
EXPECT_EQ(original.op(), optimized.op()) << line;
EXPECT_EQ(original.input_size(), optimized.input_size()) << line;
for (int j = 0; j < original.input_size(); ++j) {
EXPECT_EQ(original.input(j), optimized.input(j)) << line;
}
}
}
}
class CommonSubgraphEliminationTest : public ArithmeticOptimizerTest {};
TEST_F(CommonSubgraphEliminationTest, NoOp) {
TrivialTestGraphInputYielder fake_input(4, 1, 10, false, {"CPU:0"});
GrapplerItem item;
CHECK(fake_input.NextItem(&item));
CommonSubgraphElimination optimizer;
GraphDef output;
Status status = optimizer.Optimize(nullptr, item, &output);
TF_EXPECT_OK(status);
VerifyGraphsMatch(item.graph, output, __LINE__);
}
TEST_F(CommonSubgraphEliminationTest, OpDedupping) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c1 = ops::Const(s.WithOpName("c1"), {3.14, 2.7}, {1, 2});
Output c2 = ops::Const(s.WithOpName("c2"), {3.14, 2.7}, {1, 2});
Output div = ops::Div(s.WithOpName("div"), c1, c2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 2);
const NodeDef* new_c1 = node_map.GetNode("c1");
ASSERT_NE(new_c1, nullptr);
const NodeDef* new_div = node_map.GetNode("div");
ASSERT_NE(new_div, nullptr);
ASSERT_EQ(new_div->input_size(), 2);
EXPECT_EQ(new_div->input(0), "c1");
EXPECT_EQ(new_div->input(1), "c1");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(CommonSubgraphEliminationTest, OpDeduppingAssertAndCheckNumerics) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output p = ops::Placeholder(s, DT_BOOL, ops::Placeholder::Shape({}));
Output c = ops::Const(s.WithOpName("c"), {3.14, 2.7}, {1, 2});
auto check1 = ops::CheckNumerics(s.WithOpName("check1"), c, "foo");
auto check2 = ops::CheckNumerics(s.WithOpName("check2"), c, "foo");
auto assert1 = ops::Assert(s.WithOpName("assert1"), p, {c});
auto assert2 = ops::Assert(s.WithOpName("assert2"), p, {c});
Output div = ops::Div(s.WithOpName("div").WithControlDependencies(
{assert1.operation, assert2.operation}),
check1, check2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div"};
Tensor bool_t(DT_BOOL, TensorShape({}));
bool_t.scalar<bool>().setConstant(true);
auto tensors_expected =
EvaluateNodes(item.graph, item.fetch, {{"Placeholder", bool_t}});
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 6);
const NodeDef* new_div = node_map.GetNode("div");
ASSERT_NE(new_div, nullptr);
ASSERT_EQ(new_div->input_size(), 3);
EXPECT_EQ(new_div->input(0), "check1");
EXPECT_EQ(new_div->input(1), "check2");
EXPECT_EQ(new_div->input(2), "^assert1");
auto tensors = EvaluateNodes(output, item.fetch, {{"Placeholder", bool_t}});
EXPECT_EQ(tensors.size(), 1);
test::ExpectTensorNear<double>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(CommonSubgraphEliminationTest, OpDedupCommutative) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output c1 = ops::Const(s.WithOpName("c1"), {1.0f, 2.0f}, {1, 2});
Output c2 = ops::Const(s.WithOpName("c2"), {3.0f, 4.0f}, {1, 2});
Output mul1 = ops::Mul(s.WithOpName("mul1"), c1, c2);
Output mul2 = ops::Mul(s.WithOpName("mul2"), c2, c1);
Output div1 = ops::Div(s.WithOpName("div1"), mul1, mul2);
GrapplerItem item;
TF_CHECK_OK(s.ToGraphDef(&item.graph));
item.fetch = {"div1"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
CommonSubgraphElimination optimizer;
GraphDef output;
OptimizeTwice(&optimizer, &item, &output);
NodeMap node_map(&output);
EXPECT_EQ(output.node_size(), 4);
const NodeDef* new_c1 = node_map.GetNode("c1");
ASSERT_NE(new_c1, nullptr);
const NodeDef* new_c2 = node_map.GetNode("c2");
ASSERT_NE(new_c2, nullptr);
const NodeDef* new_mul1 = node_map.GetNode("mul1");
ASSERT_NE(new_mul1, nullptr);
ASSERT_EQ(new_mul1->input_size(), 2);
EXPECT_EQ(new_mul1->input(0), "c1");
EXPECT_EQ(new_mul1->input(1), "c2");
const NodeDef* new_div1 = node_map.GetNode("div1");
ASSERT_NE(new_div1, nullptr);
ASSERT_EQ(new_div1->input_size(), 2);
EXPECT_EQ(new_div1->input(0), "mul1");
EXPECT_EQ(new_div1->input(1), "mul1");
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/common_subgraph_elimination.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/common_subgraph_elimination_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cb8cdf4-cddc-43a3-bd8c-aee1ea0653d4 | cpp | tensorflow/tensorflow | op_gen_lib | tensorflow/core/framework/op_gen_lib.cc | tensorflow/core/framework/op_gen_lib_test.cc | #include "tensorflow/core/framework/op_gen_lib.h"
#include <algorithm>
#include <vector>
#include "absl/strings/escaping.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/util/proto/proto_utils.h"
namespace tensorflow {
string WordWrap(StringPiece prefix, StringPiece str, int width) {
const string indent_next_line = "\n" + Spaces(prefix.size());
width -= prefix.size();
string result;
strings::StrAppend(&result, prefix);
while (!str.empty()) {
if (static_cast<int>(str.size()) <= width) {
strings::StrAppend(&result, str);
break;
}
auto space = str.rfind(' ', width);
if (space == StringPiece::npos) {
space = str.find(' ');
if (space == StringPiece::npos) {
strings::StrAppend(&result, str);
break;
}
}
StringPiece to_append = str.substr(0, space);
str.remove_prefix(space + 1);
while (absl::EndsWith(to_append, " ")) {
to_append.remove_suffix(1);
}
while (absl::ConsumePrefix(&str, " ")) {
}
strings::StrAppend(&result, to_append);
if (!str.empty()) strings::StrAppend(&result, indent_next_line);
}
return result;
}
bool ConsumeEquals(StringPiece* description) {
if (absl::ConsumePrefix(description, "=")) {
while (absl::ConsumePrefix(description,
" ")) {
}
return true;
}
return false;
}
static bool SplitAt(char split_ch, StringPiece* orig,
StringPiece* before_split) {
auto pos = orig->find(split_ch);
if (pos == StringPiece::npos) {
*before_split = *orig;
*orig = StringPiece();
return false;
} else {
*before_split = orig->substr(0, pos);
orig->remove_prefix(pos + 1);
return true;
}
}
static bool StartsWithFieldName(StringPiece line,
const std::vector<string>& multi_line_fields) {
StringPiece up_to_colon;
if (!SplitAt(':', &line, &up_to_colon)) return false;
while (absl::ConsumePrefix(&up_to_colon, " "))
;
for (const auto& field : multi_line_fields) {
if (up_to_colon == field) {
return true;
}
}
return false;
}
static bool ConvertLine(StringPiece line,
const std::vector<string>& multi_line_fields,
string* ml) {
if (!StartsWithFieldName(line, multi_line_fields)) {
return false;
}
StringPiece up_to_colon;
StringPiece after_colon = line;
SplitAt(':', &after_colon, &up_to_colon);
while (absl::ConsumePrefix(&after_colon, " "))
;
if (!absl::ConsumePrefix(&after_colon, "\"")) {
return false;
}
auto last_quote = after_colon.rfind('\"');
if (last_quote == StringPiece::npos) {
return false;
}
StringPiece escaped = after_colon.substr(0, last_quote);
StringPiece suffix = after_colon.substr(last_quote + 1);
string unescaped;
if (!absl::CUnescape(escaped, &unescaped, nullptr)) {
return false;
}
string end = "END";
for (int s = 0; unescaped.find(end) != string::npos; ++s) {
end = strings::StrCat("END", s);
}
strings::StrAppend(ml, up_to_colon, ": <<", end, "\n", unescaped, "\n", end);
if (!suffix.empty()) {
strings::StrAppend(ml, suffix);
}
strings::StrAppend(ml, "\n");
return true;
}
string PBTxtToMultiline(StringPiece pbtxt,
const std::vector<string>& multi_line_fields) {
string ml;
ml.reserve(pbtxt.size() * (17. / 16));
StringPiece line;
while (!pbtxt.empty()) {
SplitAt('\n', &pbtxt, &line);
if (!ConvertLine(line, multi_line_fields, &ml)) {
strings::StrAppend(&ml, line, "\n");
}
}
return ml;
}
static bool FindMultiline(StringPiece line, size_t colon, string* end) {
if (colon == StringPiece::npos) return false;
line.remove_prefix(colon + 1);
while (absl::ConsumePrefix(&line, " ")) {
}
if (absl::ConsumePrefix(&line, "<<")) {
*end = string(line);
return true;
}
return false;
}
string PBTxtFromMultiline(StringPiece multiline_pbtxt) {
string pbtxt;
pbtxt.reserve(multiline_pbtxt.size() * (33. / 32));
StringPiece line;
while (!multiline_pbtxt.empty()) {
if (!SplitAt('\n', &multiline_pbtxt, &line)) {
strings::StrAppend(&pbtxt, line);
break;
}
string end;
auto colon = line.find(':');
if (!FindMultiline(line, colon, &end)) {
strings::StrAppend(&pbtxt, line, "\n");
continue;
}
strings::StrAppend(&pbtxt, line.substr(0, colon + 1));
string unescaped;
bool first = true;
while (!multiline_pbtxt.empty()) {
SplitAt('\n', &multiline_pbtxt, &line);
if (absl::ConsumePrefix(&line, end)) break;
if (first) {
first = false;
} else {
unescaped.push_back('\n');
}
strings::StrAppend(&unescaped, line);
line = StringPiece();
}
strings::StrAppend(&pbtxt, " \"", absl::CEscape(unescaped), "\"", line,
"\n");
}
return pbtxt;
}
static void StringReplace(const string& from, const string& to, string* s) {
std::vector<string> split;
string::size_type pos = 0;
while (pos < s->size()) {
auto found = s->find(from, pos);
if (found == string::npos) {
split.push_back(s->substr(pos));
break;
} else {
split.push_back(s->substr(pos, found - pos));
pos = found + from.size();
if (pos == s->size()) {
split.push_back("");
}
}
}
*s = absl::StrJoin(split, to);
}
static void RenameInDocs(const string& from, const string& to,
ApiDef* api_def) {
const string from_quoted = strings::StrCat("`", from, "`");
const string to_quoted = strings::StrCat("`", to, "`");
for (int i = 0; i < api_def->in_arg_size(); ++i) {
if (!api_def->in_arg(i).description().empty()) {
StringReplace(from_quoted, to_quoted,
api_def->mutable_in_arg(i)->mutable_description());
}
}
for (int i = 0; i < api_def->out_arg_size(); ++i) {
if (!api_def->out_arg(i).description().empty()) {
StringReplace(from_quoted, to_quoted,
api_def->mutable_out_arg(i)->mutable_description());
}
}
for (int i = 0; i < api_def->attr_size(); ++i) {
if (!api_def->attr(i).description().empty()) {
StringReplace(from_quoted, to_quoted,
api_def->mutable_attr(i)->mutable_description());
}
}
if (!api_def->summary().empty()) {
StringReplace(from_quoted, to_quoted, api_def->mutable_summary());
}
if (!api_def->description().empty()) {
StringReplace(from_quoted, to_quoted, api_def->mutable_description());
}
}
namespace {
void InitApiDefFromOpDef(const OpDef& op_def, ApiDef* api_def) {
api_def->set_graph_op_name(op_def.name());
api_def->set_visibility(ApiDef::VISIBLE);
auto* endpoint = api_def->add_endpoint();
endpoint->set_name(op_def.name());
for (const auto& op_in_arg : op_def.input_arg()) {
auto* api_in_arg = api_def->add_in_arg();
api_in_arg->set_name(op_in_arg.name());
api_in_arg->set_rename_to(op_in_arg.name());
api_in_arg->set_description(op_in_arg.description());
*api_def->add_arg_order() = op_in_arg.name();
}
for (const auto& op_out_arg : op_def.output_arg()) {
auto* api_out_arg = api_def->add_out_arg();
api_out_arg->set_name(op_out_arg.name());
api_out_arg->set_rename_to(op_out_arg.name());
api_out_arg->set_description(op_out_arg.description());
}
for (const auto& op_attr : op_def.attr()) {
auto* api_attr = api_def->add_attr();
api_attr->set_name(op_attr.name());
api_attr->set_rename_to(op_attr.name());
if (op_attr.has_default_value()) {
*api_attr->mutable_default_value() = op_attr.default_value();
}
api_attr->set_description(op_attr.description());
}
api_def->set_summary(op_def.summary());
api_def->set_description(op_def.description());
}
void MergeArg(ApiDef::Arg* base_arg, const ApiDef::Arg& new_arg) {
if (!new_arg.rename_to().empty()) {
base_arg->set_rename_to(new_arg.rename_to());
}
if (!new_arg.description().empty()) {
base_arg->set_description(new_arg.description());
}
}
void MergeAttr(ApiDef::Attr* base_attr, const ApiDef::Attr& new_attr) {
if (!new_attr.rename_to().empty()) {
base_attr->set_rename_to(new_attr.rename_to());
}
if (new_attr.has_default_value()) {
*base_attr->mutable_default_value() = new_attr.default_value();
}
if (!new_attr.description().empty()) {
base_attr->set_description(new_attr.description());
}
}
Status MergeApiDefs(ApiDef* base_api_def, const ApiDef& new_api_def) {
if (new_api_def.visibility() != ApiDef::DEFAULT_VISIBILITY) {
base_api_def->set_visibility(new_api_def.visibility());
}
if (new_api_def.endpoint_size() > 0) {
base_api_def->clear_endpoint();
std::copy(
new_api_def.endpoint().begin(), new_api_def.endpoint().end(),
protobuf::RepeatedFieldBackInserter(base_api_def->mutable_endpoint()));
}
for (const auto& new_arg : new_api_def.in_arg()) {
bool found_base_arg = false;
for (int i = 0; i < base_api_def->in_arg_size(); ++i) {
auto* base_arg = base_api_def->mutable_in_arg(i);
if (base_arg->name() == new_arg.name()) {
MergeArg(base_arg, new_arg);
found_base_arg = true;
break;
}
}
if (!found_base_arg) {
return errors::FailedPrecondition("Argument ", new_arg.name(),
" not defined in base api for ",
base_api_def->graph_op_name());
}
}
for (const auto& new_arg : new_api_def.out_arg()) {
bool found_base_arg = false;
for (int i = 0; i < base_api_def->out_arg_size(); ++i) {
auto* base_arg = base_api_def->mutable_out_arg(i);
if (base_arg->name() == new_arg.name()) {
MergeArg(base_arg, new_arg);
found_base_arg = true;
break;
}
}
if (!found_base_arg) {
return errors::FailedPrecondition("Argument ", new_arg.name(),
" not defined in base api for ",
base_api_def->graph_op_name());
}
}
if (new_api_def.arg_order_size() > 0) {
if (new_api_def.arg_order_size() != base_api_def->arg_order_size()) {
return errors::FailedPrecondition(
"Invalid number of arguments ", new_api_def.arg_order_size(), " for ",
base_api_def->graph_op_name(),
". Expected: ", base_api_def->arg_order_size());
}
if (!std::is_permutation(new_api_def.arg_order().begin(),
new_api_def.arg_order().end(),
base_api_def->arg_order().begin())) {
return errors::FailedPrecondition(
"Invalid arg_order: ", absl::StrJoin(new_api_def.arg_order(), ", "),
" for ", base_api_def->graph_op_name(),
". All elements in arg_order override must match base arg_order: ",
absl::StrJoin(base_api_def->arg_order(), ", "));
}
base_api_def->clear_arg_order();
std::copy(
new_api_def.arg_order().begin(), new_api_def.arg_order().end(),
protobuf::RepeatedFieldBackInserter(base_api_def->mutable_arg_order()));
}
for (const auto& new_attr : new_api_def.attr()) {
bool found_base_attr = false;
for (int i = 0; i < base_api_def->attr_size(); ++i) {
auto* base_attr = base_api_def->mutable_attr(i);
if (base_attr->name() == new_attr.name()) {
MergeAttr(base_attr, new_attr);
found_base_attr = true;
break;
}
}
if (!found_base_attr) {
return errors::FailedPrecondition("Attribute ", new_attr.name(),
" not defined in base api for ",
base_api_def->graph_op_name());
}
}
if (!new_api_def.summary().empty()) {
base_api_def->set_summary(new_api_def.summary());
}
auto description = new_api_def.description().empty()
? base_api_def->description()
: new_api_def.description();
if (!new_api_def.description_prefix().empty()) {
description =
strings::StrCat(new_api_def.description_prefix(), "\n", description);
}
if (!new_api_def.description_suffix().empty()) {
description =
strings::StrCat(description, "\n", new_api_def.description_suffix());
}
base_api_def->set_description(description);
return absl::OkStatus();
}
}
ApiDefMap::ApiDefMap(const OpList& op_list) {
for (const auto& op : op_list.op()) {
ApiDef api_def;
InitApiDefFromOpDef(op, &api_def);
map_[op.name()] = api_def;
}
}
ApiDefMap::~ApiDefMap() {}
Status ApiDefMap::LoadFileList(Env* env, const std::vector<string>& filenames) {
for (const auto& filename : filenames) {
TF_RETURN_IF_ERROR(LoadFile(env, filename));
}
return absl::OkStatus();
}
Status ApiDefMap::LoadFile(Env* env, const string& filename) {
if (filename.empty()) return absl::OkStatus();
string contents;
TF_RETURN_IF_ERROR(ReadFileToString(env, filename, &contents));
Status status = LoadApiDef(contents);
if (!status.ok()) {
return errors::CreateWithUpdatedMessage(
status, strings::StrCat("Error parsing ApiDef file ", filename, ": ",
status.message()));
}
return absl::OkStatus();
}
Status ApiDefMap::LoadApiDef(const string& api_def_file_contents) {
const string contents = PBTxtFromMultiline(api_def_file_contents);
ApiDefs api_defs;
TF_RETURN_IF_ERROR(
proto_utils::ParseTextFormatFromString(contents, &api_defs));
for (const auto& api_def : api_defs.op()) {
if (map_.find(api_def.graph_op_name()) != map_.end()) {
TF_RETURN_IF_ERROR(MergeApiDefs(&map_[api_def.graph_op_name()], api_def));
}
}
return absl::OkStatus();
}
void ApiDefMap::UpdateDocs() {
for (auto& name_and_api_def : map_) {
auto& api_def = name_and_api_def.second;
CHECK_GT(api_def.endpoint_size(), 0);
const string canonical_name = api_def.endpoint(0).name();
if (api_def.graph_op_name() != canonical_name) {
RenameInDocs(api_def.graph_op_name(), canonical_name, &api_def);
}
for (const auto& in_arg : api_def.in_arg()) {
if (in_arg.name() != in_arg.rename_to()) {
RenameInDocs(in_arg.name(), in_arg.rename_to(), &api_def);
}
}
for (const auto& out_arg : api_def.out_arg()) {
if (out_arg.name() != out_arg.rename_to()) {
RenameInDocs(out_arg.name(), out_arg.rename_to(), &api_def);
}
}
for (const auto& attr : api_def.attr()) {
if (attr.name() != attr.rename_to()) {
RenameInDocs(attr.name(), attr.rename_to(), &api_def);
}
}
}
}
const tensorflow::ApiDef* ApiDefMap::GetApiDef(const string& name) const {
return gtl::FindOrNull(map_, name);
}
} | #include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace {
constexpr char kTestOpList[] = R"(op {
name: "testop"
input_arg {
name: "arg_a"
}
input_arg {
name: "arg_b"
}
output_arg {
name: "arg_c"
}
attr {
name: "attr_a"
}
deprecation {
version: 123
explanation: "foo"
}
})";
constexpr char kTestApiDef[] = R"(op {
graph_op_name: "testop"
visibility: VISIBLE
endpoint {
name: "testop1"
}
in_arg {
name: "arg_a"
}
in_arg {
name: "arg_b"
}
out_arg {
name: "arg_c"
}
attr {
name: "attr_a"
}
summary: "Mock op for testing."
description: <<END
Description for the
testop.
END
arg_order: "arg_a"
arg_order: "arg_b"
}
)";
TEST(OpGenLibTest, MultilinePBTxt) {
const string pbtxt = R"(foo: "abc"
foo: ""
foo: "\n\n"
foo: "abc\nEND"
foo: "ghi\njkl\n"
bar: "quotes:\""
)";
const string ml_foo = R"(foo: <<END
abc
END
foo: <<END
END
foo: <<END
END
foo: <<END0
abc
END
END0
foo: <<END
ghi
jkl
END
bar: "quotes:\""
)";
const string ml_foo_bar = R"(foo: <<END
abc
END
foo: <<END
END
foo: <<END
END
foo: <<END0
abc
END
END0
foo: <<END
ghi
jkl
END
bar: <<END
quotes:"
END
)";
EXPECT_EQ(ml_foo, PBTxtToMultiline(pbtxt, {"foo"}));
EXPECT_EQ(pbtxt, PBTxtToMultiline(pbtxt, {"baz"}));
EXPECT_EQ(ml_foo_bar, PBTxtToMultiline(pbtxt, {"foo", "bar"}));
EXPECT_EQ(pbtxt, PBTxtFromMultiline(pbtxt));
EXPECT_EQ(pbtxt, PBTxtFromMultiline(ml_foo));
EXPECT_EQ(pbtxt, PBTxtFromMultiline(ml_foo_bar));
}
TEST(OpGenLibTest, PBTxtToMultilineErrorCases) {
EXPECT_EQ("f: <<END\n7\nEND\n", PBTxtToMultiline("f: \"7\"\n", {"f"}));
EXPECT_EQ("f \"7\"\n", PBTxtToMultiline("f \"7\"\n", {"f"}));
EXPECT_EQ("f: 7\n", PBTxtToMultiline("f: 7\n", {"f"}));
EXPECT_EQ("f: 7\"\n", PBTxtToMultiline("f: 7\"\n", {"f"}));
EXPECT_EQ("f: \"7\n", PBTxtToMultiline("f: \"7\n", {"f"}));
EXPECT_EQ("f: \"7\\\"\n", PBTxtToMultiline("f: \"7\\\"\n", {"f"}));
}
TEST(OpGenLibTest, PBTxtToMultilineComments) {
const string pbtxt = R"(f: "bar" # Comment 1
f: "\n" # Comment 2
)";
const string ml = R"(f: <<END
bar
END # Comment 1
f: <<END
END # Comment 2
)";
EXPECT_EQ(ml, PBTxtToMultiline(pbtxt, {"f"}));
EXPECT_EQ(pbtxt, PBTxtFromMultiline(ml));
}
TEST(OpGenLibTest, ApiDefAccessInvalidName) {
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
ASSERT_EQ(nullptr, api_map.GetApiDef("testop5"));
}
TEST(OpGenLibTest, ApiDefInitializedFromOpDef) {
tensorflow::ApiDef expected_api_def;
protobuf::TextFormat::ParseFromString(
R"(graph_op_name: "testop"
visibility: VISIBLE
endpoint {
name: "testop"
}
in_arg {
name: "arg_a"
rename_to: "arg_a"
}
in_arg {
name: "arg_b"
rename_to: "arg_b"
}
out_arg {
name: "arg_c"
rename_to: "arg_c"
}
attr {
name: "attr_a"
rename_to: "attr_a"
}
arg_order: "arg_a"
arg_order: "arg_b"
)",
&expected_api_def);
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
const auto* api_def = api_map.GetApiDef("testop");
ASSERT_EQ(api_def->DebugString(), expected_api_def.DebugString());
}
TEST(OpGenLibTest, ApiDefLoadSingleApiDef) {
tensorflow::ApiDefs expected_api_defs;
protobuf::TextFormat::ParseFromString(R"(op {
graph_op_name: "testop"
visibility: VISIBLE
endpoint {
name: "testop1"
}
in_arg {
name: "arg_a"
rename_to: "arg_a"
}
in_arg {
name: "arg_b"
rename_to: "arg_b"
}
out_arg {
name: "arg_c"
rename_to: "arg_c"
}
attr {
name: "attr_a"
rename_to: "attr_a"
}
summary: "Mock op for testing."
description: "Description for the\ntestop."
arg_order: "arg_a"
arg_order: "arg_b"
}
)",
&expected_api_defs);
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
const auto* api_def = api_map.GetApiDef("testop");
EXPECT_EQ(1, api_def->endpoint_size());
EXPECT_EQ("testop1", api_def->endpoint(0).name());
ApiDefs api_defs;
*api_defs.add_op() = *api_def;
EXPECT_EQ(api_defs.DebugString(), expected_api_defs.DebugString());
}
TEST(OpGenLibTest, ApiDefOverrideVisibility) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
endpoint {
name: "testop2"
}
}
)";
const string api_def2 = R"(
op {
graph_op_name: "testop"
visibility: HIDDEN
endpoint {
name: "testop2"
}
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
auto* api_def = api_map.GetApiDef("testop");
EXPECT_EQ(ApiDef::VISIBLE, api_def->visibility());
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
EXPECT_EQ(ApiDef::VISIBLE, api_def->visibility());
TF_CHECK_OK(api_map.LoadApiDef(api_def2));
EXPECT_EQ(ApiDef::HIDDEN, api_def->visibility());
}
TEST(OpGenLibTest, ApiDefOverrideEndpoints) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
endpoint {
name: "testop2"
}
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
auto* api_def = api_map.GetApiDef("testop");
ASSERT_EQ(1, api_def->endpoint_size());
EXPECT_EQ("testop1", api_def->endpoint(0).name());
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
ASSERT_EQ(1, api_def->endpoint_size());
EXPECT_EQ("testop2", api_def->endpoint(0).name());
}
TEST(OpGenLibTest, ApiDefOverrideArgs) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
in_arg {
name: "arg_a"
rename_to: "arg_aa"
}
out_arg {
name: "arg_c"
rename_to: "arg_cc"
}
arg_order: "arg_b"
arg_order: "arg_a"
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
const auto* api_def = api_map.GetApiDef("testop");
ASSERT_EQ(2, api_def->in_arg_size());
EXPECT_EQ("arg_aa", api_def->in_arg(0).rename_to());
EXPECT_EQ("arg_b", api_def->in_arg(1).rename_to());
ASSERT_EQ(1, api_def->out_arg_size());
EXPECT_EQ("arg_cc", api_def->out_arg(0).rename_to());
ASSERT_EQ(2, api_def->arg_order_size());
EXPECT_EQ("arg_b", api_def->arg_order(0));
EXPECT_EQ("arg_a", api_def->arg_order(1));
}
TEST(OpGenLibTest, ApiDefOverrideDescriptions) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
summary: "New summary"
description: <<END
New description
END
description_prefix: "A"
description_suffix: "Z"
}
)";
const string api_def2 = R"(
op {
graph_op_name: "testop"
description_prefix: "B"
description_suffix: "Y"
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
const auto* api_def = api_map.GetApiDef("testop");
EXPECT_EQ("New summary", api_def->summary());
EXPECT_EQ("A\nNew description\nZ", api_def->description());
EXPECT_EQ("", api_def->description_prefix());
EXPECT_EQ("", api_def->description_suffix());
TF_CHECK_OK(api_map.LoadApiDef(api_def2));
EXPECT_EQ("B\nA\nNew description\nZ\nY", api_def->description());
EXPECT_EQ("", api_def->description_prefix());
EXPECT_EQ("", api_def->description_suffix());
}
TEST(OpGenLibTest, ApiDefInvalidOpInOverride) {
const string api_def1 = R"(
op {
graph_op_name: "different_testop"
endpoint {
name: "testop2"
}
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
ASSERT_EQ(nullptr, api_map.GetApiDef("different_testop"));
}
TEST(OpGenLibTest, ApiDefInvalidArgOrder) {
const string api_def1 = R"(
op {
graph_op_name: "testop"
arg_order: "arg_a"
arg_order: "unexpected_arg"
}
)";
const string api_def2 = R"(
op {
graph_op_name: "testop"
arg_order: "arg_a"
}
)";
const string api_def3 = R"(
op {
graph_op_name: "testop"
arg_order: "arg_a"
arg_order: "arg_a"
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(kTestOpList, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(kTestApiDef));
auto status = api_map.LoadApiDef(api_def1);
ASSERT_EQ(tensorflow::error::FAILED_PRECONDITION, status.code());
status = api_map.LoadApiDef(api_def2);
ASSERT_EQ(tensorflow::error::FAILED_PRECONDITION, status.code());
status = api_map.LoadApiDef(api_def3);
ASSERT_EQ(tensorflow::error::FAILED_PRECONDITION, status.code());
}
TEST(OpGenLibTest, ApiDefInvalidSyntax) {
const string api_def = R"pb(
op { bad_op_name: "testop" }
)pb";
OpList op_list;
ApiDefMap api_map(op_list);
auto status = api_map.LoadApiDef(api_def);
ASSERT_EQ(absl::StatusCode::kInvalidArgument, status.code());
}
TEST(OpGenLibTest, ApiDefUpdateDocs) {
const string op_list1 = R"(op {
name: "testop"
input_arg {
name: "arg_a"
description: "`arg_a`, `arg_c`, `attr_a`, `testop`"
}
output_arg {
name: "arg_c"
description: "`arg_a`, `arg_c`, `attr_a`, `testop`"
}
attr {
name: "attr_a"
description: "`arg_a`, `arg_c`, `attr_a`, `testop`"
}
description: "`arg_a`, `arg_c`, `attr_a`, `testop`"
}
)";
const string api_def1 = R"(
op {
graph_op_name: "testop"
endpoint {
name: "testop2"
}
in_arg {
name: "arg_a"
rename_to: "arg_aa"
}
out_arg {
name: "arg_c"
rename_to: "arg_cc"
description: "New description: `arg_a`, `arg_c`, `attr_a`, `testop`"
}
attr {
name: "attr_a"
rename_to: "attr_aa"
}
}
)";
OpList op_list;
protobuf::TextFormat::ParseFromString(op_list1, &op_list);
ApiDefMap api_map(op_list);
TF_CHECK_OK(api_map.LoadApiDef(api_def1));
api_map.UpdateDocs();
const string expected_description =
"`arg_aa`, `arg_cc`, `attr_aa`, `testop2`";
EXPECT_EQ(expected_description, api_map.GetApiDef("testop")->description());
EXPECT_EQ(expected_description,
api_map.GetApiDef("testop")->in_arg(0).description());
EXPECT_EQ("New description: " + expected_description,
api_map.GetApiDef("testop")->out_arg(0).description());
EXPECT_EQ(expected_description,
api_map.GetApiDef("testop")->attr(0).description());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_gen_lib.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/op_gen_lib_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
901d41c8-873e-4d38-b328-5d78ce6e163b | cpp | tensorflow/tensorflow | dtype | third_party/xla/xla/python/ifrt/dtype.cc | third_party/xla/xla/python/ifrt/dtype_test.cc | #include "xla/python/ifrt/dtype.h"
#include <optional>
#include <ostream>
#include <string>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/python/ifrt/dtype.pb.h"
namespace xla {
namespace ifrt {
std::optional<int> DType::byte_size() const {
switch (kind_) {
case kS2:
case kU2:
case kS4:
case kU4:
return std::nullopt;
case kPred:
case kS8:
case kU8:
case kF8E3M4:
case kF8E4M3:
case kF8E4M3FN:
case kF8E4M3B11FNUZ:
case kF8E4M3FNUZ:
case kF8E5M2:
case kF8E5M2FNUZ:
return 1;
case kS16:
case kU16:
case kF16:
case kBF16:
return 2;
case kS32:
case kU32:
case kF32:
return 4;
case kS64:
case kU64:
case kF64:
case kC64:
return 8;
case kC128:
return 16;
case kToken:
case kInvalid:
case kString:
return std::nullopt;
}
}
std::optional<int> DType::bit_size() const {
switch (kind_) {
case kS2:
case kU2:
return 2;
case kS4:
case kU4:
return 4;
case kPred:
case kS8:
case kU8:
case kF8E3M4:
case kF8E4M3:
case kF8E4M3FN:
case kF8E4M3B11FNUZ:
case kF8E4M3FNUZ:
case kF8E5M2:
case kF8E5M2FNUZ:
return 8;
case kS16:
case kU16:
case kF16:
case kBF16:
return 16;
case kS32:
case kU32:
case kF32:
return 32;
case kS64:
case kU64:
case kF64:
case kC64:
return 64;
case kC128:
return 128;
case kToken:
case kInvalid:
case kString:
return std::nullopt;
}
}
absl::StatusOr<DType> DType::FromProto(const DTypeProto& dtype_proto) {
switch (dtype_proto.kind()) {
case DTypeProto::KIND_PRED:
return DType(DType::Kind::kPred);
case DTypeProto::KIND_TOKEN:
return DType(DType::Kind::kToken);
#define CASE(X) \
case DTypeProto::KIND_##X: \
return DType(DType::Kind::k##X);
CASE(S4);
CASE(S8);
CASE(S16);
CASE(S32);
CASE(S64);
CASE(U4);
CASE(U8);
CASE(U16);
CASE(U32);
CASE(U64);
CASE(F16);
CASE(F32);
CASE(F64);
CASE(BF16);
CASE(C64);
CASE(C128);
CASE(F8E4M3FN);
CASE(F8E4M3B11FNUZ);
CASE(F8E4M3FNUZ);
CASE(F8E5M2);
CASE(F8E5M2FNUZ);
#undef CASE
case DTypeProto::KIND_STRING:
return DType(DType::Kind::kString);
default:
return DType(DType::Kind::kInvalid);
}
}
DTypeProto DType::ToProto() const {
DTypeProto dtype_proto;
switch (kind()) {
case DType::Kind::kPred:
dtype_proto.set_kind(DTypeProto::KIND_PRED);
break;
case DType::Kind::kToken:
dtype_proto.set_kind(DTypeProto::KIND_TOKEN);
break;
#define CASE(X) \
case DType::Kind::k##X: \
dtype_proto.set_kind(DTypeProto::KIND_##X); \
break;
CASE(S4);
CASE(S8);
CASE(S16);
CASE(S32);
CASE(S64);
CASE(U4);
CASE(U8);
CASE(U16);
CASE(U32);
CASE(U64);
CASE(F16);
CASE(F32);
CASE(F64);
CASE(BF16);
CASE(C64);
CASE(C128);
CASE(F8E4M3FN);
CASE(F8E4M3B11FNUZ);
CASE(F8E4M3FNUZ);
CASE(F8E5M2);
CASE(F8E5M2FNUZ);
#undef CASE
case DType::Kind::kString:
dtype_proto.set_kind(DTypeProto::KIND_STRING);
break;
default:
dtype_proto.set_kind(DTypeProto::KIND_UNSPECIFIED);
break;
}
return dtype_proto;
}
std::string DType::DebugString() const {
switch (kind_) {
case kInvalid:
return "INVALID";
case kPred:
return "PRED";
case kS8:
return "S8";
case kS16:
return "S16";
case kS32:
return "S32";
case kS64:
return "S64";
case kU8:
return "U8";
case kU16:
return "U16";
case kU32:
return "U32";
case kU64:
return "U64";
case kF16:
return "F16";
case kF32:
return "F32";
case kF64:
return "F64";
case kBF16:
return "BF16";
case kC64:
return "C64";
case kC128:
return "C128";
case kToken:
return "TOKEN";
case kString:
return "STRING";
default:
return absl::StrCat("UNKNOWN(", static_cast<int>(kind_), ")");
}
}
std::ostream& operator<<(std::ostream& os, const DType& dtype) {
return os << dtype.DebugString();
}
}
} | #include "xla/python/ifrt/dtype.h"
#include <optional>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "xla/python/ifrt/dtype.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace ifrt {
namespace {
TEST(DTypeTest, FromToFromProto) {
for (int i = 0; i < DTypeProto::Kind_descriptor()->value_count(); ++i) {
DTypeProto proto;
proto.set_kind(static_cast<DTypeProto::Kind>(
DTypeProto::Kind_descriptor()->value(i)->number()));
TF_ASSERT_OK_AND_ASSIGN(DType dtype, DType::FromProto(proto));
TF_ASSERT_OK_AND_ASSIGN(DType dtype_copy,
DType::FromProto(dtype.ToProto()));
EXPECT_EQ(dtype_copy, dtype);
}
}
TEST(DTypeTest, ByteSize) {
for (const auto& [kind, byte_size] :
std::vector<std::tuple<DType::Kind, int>>({
{DType::kS2, -1},
{DType::kU2, -1},
{DType::kS4, -1},
{DType::kU4, -1},
{DType::kPred, 1},
{DType::kS8, 1},
{DType::kU8, 1},
{DType::kF8E3M4, 1},
{DType::kF8E4M3, 1},
{DType::kF8E4M3FN, 1},
{DType::kF8E4M3B11FNUZ, 1},
{DType::kF8E4M3FNUZ, 1},
{DType::kF8E5M2, 1},
{DType::kF8E5M2FNUZ, 1},
{DType::kS16, 2},
{DType::kU16, 2},
{DType::kF16, 2},
{DType::kBF16, 2},
{DType::kS32, 4},
{DType::kU32, 4},
{DType::kF32, 4},
{DType::kS64, 8},
{DType::kU64, 8},
{DType::kF64, 8},
{DType::kC64, 8},
{DType::kC128, 16},
{DType::kToken, -1},
{DType::kInvalid, -1},
{DType::kString, -1},
})) {
EXPECT_EQ(DType(kind).byte_size(),
byte_size == -1 ? std::nullopt : std::make_optional(byte_size));
}
}
TEST(DTypeTest, BitSize) {
for (const auto& [kind, bit_size] :
std::vector<std::tuple<DType::Kind, int>>({
{DType::kS2, 2},
{DType::kU2, 2},
{DType::kS4, 4},
{DType::kU4, 4},
{DType::kPred, 8},
{DType::kS8, 8},
{DType::kU8, 8},
{DType::kF8E3M4, 8},
{DType::kF8E4M3, 8},
{DType::kF8E4M3FN, 8},
{DType::kF8E4M3B11FNUZ, 8},
{DType::kF8E4M3FNUZ, 8},
{DType::kF8E5M2, 8},
{DType::kF8E5M2FNUZ, 8},
{DType::kS16, 16},
{DType::kU16, 16},
{DType::kF16, 16},
{DType::kBF16, 16},
{DType::kS32, 32},
{DType::kU32, 32},
{DType::kF32, 32},
{DType::kS64, 64},
{DType::kU64, 64},
{DType::kF64, 64},
{DType::kC64, 64},
{DType::kC128, 128},
{DType::kToken, -1},
{DType::kInvalid, -1},
{DType::kString, -1},
})) {
EXPECT_EQ(DType(kind).bit_size(),
bit_size == -1 ? std::nullopt : std::make_optional(bit_size));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/dtype.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/ifrt/dtype_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b06baea6-f014-46e5-b82a-6472409090f8 | cpp | tensorflow/tensorflow | cross_trainer_cache | tensorflow/core/data/service/cross_trainer_cache.h | tensorflow/core/data/service/cross_trainer_cache_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_CROSS_TRAINER_CACHE_H_
#define TENSORFLOW_CORE_DATA_SERVICE_CROSS_TRAINER_CACHE_H_
#include <cstddef>
#include <deque>
#include <functional>
#include <limits>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "tensorflow/core/data/service/byte_size.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
template <class ElementType>
class CachableSequence {
public:
virtual ~CachableSequence() = default;
virtual StatusOr<ElementType> GetNext() = 0;
virtual size_t GetElementSizeBytes(const ElementType&) const = 0;
};
template <class ElementType>
class CrossTrainerCache {
public:
explicit CrossTrainerCache(
size_t max_cache_size_bytes,
std::unique_ptr<CachableSequence<ElementType>> cachable_sequence);
virtual ~CrossTrainerCache() = default;
CrossTrainerCache(const CrossTrainerCache&) = delete;
CrossTrainerCache& operator=(const CrossTrainerCache&) = delete;
StatusOr<std::shared_ptr<const ElementType>> Get(
const std::string& trainer_id);
void Cancel(Status status);
bool IsCancelled() const;
private:
struct CacheQueryResult {
std::shared_ptr<const ElementType> element;
bool cache_hit;
};
StatusOr<CacheQueryResult> GetCacheQueryResult(const std::string& trainer_id);
bool IsElementReady(const std::string& trainer_id);
size_t GetElementIndex(const std::string& trainer_id);
StatusOr<std::shared_ptr<const ElementType>> GetElement(
const std::string& trainer_id);
Status ExtendCache();
void FreeSpace(size_t new_element_size_bytes);
void RecordMetrics(const CacheQueryResult& result);
const size_t max_cache_size_bytes_;
std::unique_ptr<CachableSequence<ElementType>> cachable_sequence_;
mutable mutex mu_;
mutable condition_variable cv_;
Status status_ TF_GUARDED_BY(mu_) = absl::OkStatus();
std::deque<std::shared_ptr<const ElementType>> cache_ TF_GUARDED_BY(mu_);
size_t cache_size_bytes_ TF_GUARDED_BY(mu_) = 0;
size_t cache_start_index_ TF_GUARDED_BY(mu_) = 0;
bool extending_cache_ TF_GUARDED_BY(mu_) = false;
absl::flat_hash_map<std::string, size_t> trainer_to_element_index_map_
TF_GUARDED_BY(mu_);
};
template <class ElementType>
CrossTrainerCache<ElementType>::CrossTrainerCache(
size_t max_cache_size_bytes,
std::unique_ptr<CachableSequence<ElementType>> cachable_sequence)
: max_cache_size_bytes_(max_cache_size_bytes),
cachable_sequence_(std::move(cachable_sequence)) {
DCHECK_GT(max_cache_size_bytes, 0)
<< "CrossTrainerCache size must be greater than 0.";
VLOG(2) << "Initialized tf.data service cross-trainer cache with "
<< ByteSize::Bytes(max_cache_size_bytes) << " of memory.";
}
template <class ElementType>
StatusOr<std::shared_ptr<const ElementType>>
CrossTrainerCache<ElementType>::Get(const std::string& trainer_id)
TF_LOCKS_EXCLUDED(mu_) {
if (trainer_id.empty()) {
return errors::InvalidArgument(
"tf.data service cross-trainer cache requires a non-empty trainer ID.");
}
TF_ASSIGN_OR_RETURN(CacheQueryResult result, GetCacheQueryResult(trainer_id));
RecordMetrics(result);
return result.element;
}
template <class ElementType>
StatusOr<typename CrossTrainerCache<ElementType>::CacheQueryResult>
CrossTrainerCache<ElementType>::GetCacheQueryResult(
const std::string& trainer_id) {
bool should_extend_cache = false;
while (true) {
{
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(status_);
if (IsElementReady(trainer_id)) {
TF_ASSIGN_OR_RETURN(std::shared_ptr<const ElementType> element,
GetElement(trainer_id));
return CacheQueryResult{element,
!should_extend_cache};
}
if (extending_cache_) {
should_extend_cache = false;
cv_.wait(l);
} else {
should_extend_cache = true;
extending_cache_ = true;
}
}
if (should_extend_cache) {
Status s = ExtendCache();
mutex_lock l(mu_);
extending_cache_ = false;
cv_.notify_all();
TF_RETURN_IF_ERROR(s);
}
}
}
template <class ElementType>
bool CrossTrainerCache<ElementType>::IsElementReady(
const std::string& trainer_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return GetElementIndex(trainer_id) < cache_start_index_ + cache_.size();
}
template <class ElementType>
StatusOr<std::shared_ptr<const ElementType>>
CrossTrainerCache<ElementType>::GetElement(const std::string& trainer_id)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
size_t element_index = GetElementIndex(trainer_id);
if (element_index >= std::numeric_limits<size_t>::max()) {
return errors::Internal(
"tf.data service caching element index exceeds integer limit. Got ",
element_index);
}
std::shared_ptr<const ElementType> result =
cache_[element_index - cache_start_index_];
trainer_to_element_index_map_[trainer_id] = element_index + 1;
return result;
}
template <class ElementType>
size_t CrossTrainerCache<ElementType>::GetElementIndex(
const std::string& trainer_id) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
size_t element_index = trainer_to_element_index_map_[trainer_id];
if (element_index < cache_start_index_) {
element_index = cache_start_index_;
}
return element_index;
}
template <class ElementType>
Status CrossTrainerCache<ElementType>::ExtendCache() TF_LOCKS_EXCLUDED(mu_) {
TF_ASSIGN_OR_RETURN(ElementType element, cachable_sequence_->GetNext());
size_t new_element_size_bytes =
cachable_sequence_->GetElementSizeBytes(element);
if (new_element_size_bytes > max_cache_size_bytes_) {
return errors::InvalidArgument(
"tf.data service element size is larger than cache size in bytes. Got ",
"element size: ", new_element_size_bytes,
" and cache size: ", max_cache_size_bytes_);
}
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(status_);
FreeSpace(new_element_size_bytes);
cache_.push_back(std::make_shared<ElementType>(std::move(element)));
cache_size_bytes_ += new_element_size_bytes;
return absl::OkStatus();
}
template <class ElementType>
void CrossTrainerCache<ElementType>::FreeSpace(size_t new_element_size_bytes)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
size_t num_elements_discarded = 0;
while (!cache_.empty() &&
cache_size_bytes_ + new_element_size_bytes > max_cache_size_bytes_) {
size_t free_bytes =
cachable_sequence_->GetElementSizeBytes(*cache_.front());
cache_.pop_front();
cache_size_bytes_ -= free_bytes;
++cache_start_index_;
++num_elements_discarded;
}
VLOG(3) << "Freed " << num_elements_discarded << " element(s) from "
<< "tf.data service cross-trainer cache. Memory usage: "
<< ByteSize::Bytes(cache_size_bytes_) << ".";
}
template <class ElementType>
void CrossTrainerCache<ElementType>::Cancel(Status status)
TF_LOCKS_EXCLUDED(mu_) {
DCHECK(!status.ok())
<< "Cancelling CrossTrainerCache requires a non-OK status. Got "
<< status;
VLOG(2) << "Cancel tf.data service cross-trainer cache with status "
<< status;
mutex_lock l(mu_);
status_ = std::move(status);
cv_.notify_all();
}
template <class ElementType>
bool CrossTrainerCache<ElementType>::IsCancelled() const
TF_LOCKS_EXCLUDED(mu_) {
mutex_lock l(mu_);
return !status_.ok();
}
template <class ElementType>
void CrossTrainerCache<ElementType>::RecordMetrics(
const CacheQueryResult& result) {
metrics::RecordTFDataServiceCrossTrainerCacheQuery(result.cache_hit);
size_t cache_size_bytes = 0;
{
mutex_lock l(mu_);
cache_size_bytes = cache_size_bytes_;
}
metrics::RecordTFDataServiceCrossTrainerCacheSizeBytes(cache_size_bytes);
}
}
}
#endif | #include "tensorflow/core/data/service/cross_trainer_cache.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/monitoring/cell_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/random.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::monitoring::testing::CellReader;
using ::tensorflow::testing::IsOkAndHolds;
using ::tensorflow::testing::StatusIs;
using ::testing::Gt;
using ::testing::HasSubstr;
using ::testing::Pointee;
using ::testing::UnorderedElementsAreArray;
class InfiniteRange : public CachableSequence<int64_t> {
public:
absl::StatusOr<int64_t> GetNext() override { return next_++; }
size_t GetElementSizeBytes(const int64_t& element) const override {
return sizeof(element);
}
private:
int64_t next_ = 0;
};
class TensorDataset : public CachableSequence<Tensor> {
public:
absl::StatusOr<Tensor> GetNext() override { return Tensor("Test Tensor"); }
size_t GetElementSizeBytes(const Tensor& element) const override {
return element.TotalBytes();
}
};
class SlowDataset : public CachableSequence<Tensor> {
public:
explicit SlowDataset(absl::Duration delay) : delay_(delay) {}
absl::StatusOr<Tensor> GetNext() override {
Env::Default()->SleepForMicroseconds(absl::ToInt64Microseconds(delay_));
return Tensor("Test Tensor");
}
size_t GetElementSizeBytes(const Tensor& element) const override {
return element.TotalBytes();
}
private:
absl::Duration delay_;
};
template <class T>
class ElementOrErrorDataset : public CachableSequence<T> {
public:
explicit ElementOrErrorDataset(const std::vector<StatusOr<T>>& elements)
: elements_(elements) {}
StatusOr<T> GetNext() override {
if (next_ >= elements_.size()) {
return errors::OutOfRange("Out of range.");
}
return elements_[next_++];
}
size_t GetElementSizeBytes(const T& element) const override {
return sizeof(element);
}
private:
const std::vector<StatusOr<T>> elements_;
int64_t next_ = 0;
};
template <>
size_t ElementOrErrorDataset<std::string>::GetElementSizeBytes(
const std::string& element) const {
return element.size();
}
template <>
size_t ElementOrErrorDataset<Tensor>::GetElementSizeBytes(
const Tensor& element) const {
return element.TotalBytes();
}
std::vector<int64_t> GetRange(const size_t range) {
std::vector<int64_t> result;
for (int64_t i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
bool SequenceIsIncreasing(const std::vector<int64_t> sequence) {
for (int i = 1; i < sequence.size(); ++i) {
if (sequence[i - 1] > sequence[i - 1]) {
return false;
}
}
return true;
}
TEST(CrossTrainerCacheTest, GetFromOneTrainer) {
const size_t num_elements = 10;
CrossTrainerCache<int64_t> cache(
1024, std::make_unique<InfiniteRange>());
for (size_t i = 0; i < num_elements; ++i) {
EXPECT_THAT(cache.Get("Trainer ID"), IsOkAndHolds(Pointee(i)));
}
}
TEST(CrossTrainerCacheTest, GetFromMultipleTrainers) {
const size_t num_elements = 10;
const size_t num_trainers = 10;
CrossTrainerCache<int64_t> cache(
1024, std::make_unique<InfiniteRange>());
for (size_t i = 0; i < num_elements; ++i) {
for (size_t j = 0; j < num_trainers; ++j) {
const std::string trainer_id = absl::StrCat("Trainer ", j);
EXPECT_THAT(cache.Get(trainer_id), IsOkAndHolds(Pointee(i)));
}
}
}
TEST(CrossTrainerCacheTest, SlowTrainersSkipData) {
CrossTrainerCache<int64_t> cache(
5 * sizeof(int64_t),
std::make_unique<InfiniteRange>());
EXPECT_THAT(cache.Get("Fast trainer 1"), IsOkAndHolds(Pointee(0)));
EXPECT_THAT(cache.Get("Fast trainer 2"), IsOkAndHolds(Pointee(0)));
EXPECT_THAT(cache.Get("Slow trainer 1"), IsOkAndHolds(Pointee(0)));
EXPECT_THAT(cache.Get("Slow trainer 2"), IsOkAndHolds(Pointee(0)));
for (int i = 1; i < 20; ++i) {
EXPECT_THAT(cache.Get("Fast trainer 1"), IsOkAndHolds(Pointee(i)));
EXPECT_THAT(cache.Get("Fast trainer 2"), IsOkAndHolds(Pointee(i)));
}
EXPECT_THAT(cache.Get("Slow trainer 1"), IsOkAndHolds(Pointee(Gt(14))));
EXPECT_THAT(cache.Get("Slow trainer 2"), IsOkAndHolds(Pointee(Gt(14))));
for (int i = 20; i < 100; ++i) {
EXPECT_THAT(cache.Get("Fast trainer 1"), IsOkAndHolds(Pointee(i)));
EXPECT_THAT(cache.Get("Fast trainer 2"), IsOkAndHolds(Pointee(i)));
}
EXPECT_THAT(cache.Get("Slow trainer 1"), IsOkAndHolds(Pointee(Gt(94))));
EXPECT_THAT(cache.Get("Slow trainer 2"), IsOkAndHolds(Pointee(Gt(94))));
}
TEST(CrossTrainerCacheTest, NewTrainersStartLate) {
CrossTrainerCache<int64_t> cache(
5 * sizeof(int64_t),
std::make_unique<InfiniteRange>());
for (int i = 0; i < 100; ++i) {
EXPECT_THAT(cache.Get("Old trainer"), IsOkAndHolds(Pointee(i)));
}
for (int j = 0; j < 100; ++j) {
EXPECT_THAT(cache.Get(absl::StrCat("New trainer ", j)),
IsOkAndHolds(Pointee(Gt(94))));
}
}
TEST(CrossTrainerCacheTest, AlternateTrainerExtendsCache) {
CrossTrainerCache<int64_t> cache(
sizeof(int64_t),
std::make_unique<InfiniteRange>());
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(0)));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(1)));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(2)));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(0))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(1))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(2))));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(Gt(1))));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(Gt(2))));
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(Gt(3))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(2))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(3))));
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(Gt(4))));
EXPECT_THAT(cache.Get("Trainer 3"), IsOkAndHolds(Pointee(Gt(3))));
EXPECT_THAT(cache.Get("Trainer 3"), IsOkAndHolds(Pointee(Gt(4))));
EXPECT_THAT(cache.Get("Trainer 3"), IsOkAndHolds(Pointee(Gt(5))));
}
TEST(CrossTrainerCacheTest, CacheHitMetrics) {
CellReader<int64_t> cell_reader(
"/tensorflow/data/service/cross_trainer_cache_queries");
EXPECT_EQ(cell_reader.Delta("true"), 0);
EXPECT_EQ(cell_reader.Delta("false"), 0);
EXPECT_EQ(cell_reader.Read("true"), 0);
EXPECT_EQ(cell_reader.Read("false"), 0);
const size_t num_elements = 10;
CrossTrainerCache<int64_t> cache(
1024, std::make_unique<InfiniteRange>());
for (size_t i = 0; i < num_elements; ++i) {
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(i)));
}
EXPECT_EQ(cell_reader.Delta("true"), 0);
EXPECT_EQ(cell_reader.Delta("false"), 10);
EXPECT_EQ(cell_reader.Read("true"), 0);
EXPECT_EQ(cell_reader.Read("false"), 10);
for (size_t i = 0; i < num_elements; ++i) {
EXPECT_THAT(cache.Get("Trainer 2"), IsOkAndHolds(Pointee(i)));
}
EXPECT_EQ(cell_reader.Delta("true"), 10);
EXPECT_EQ(cell_reader.Delta("false"), 0);
EXPECT_EQ(cell_reader.Read("true"), 10);
EXPECT_EQ(cell_reader.Read("false"), 10);
}
TEST(CrossTrainerCacheTest, CacheSizeMetrics) {
CellReader<int64_t> cell_reader(
"/tensorflow/data/service/cross_trainer_cache_size_bytes");
const size_t num_elements = 5;
CrossTrainerCache<int64_t> cache(
num_elements * sizeof(int64_t),
std::make_unique<InfiniteRange>());
for (size_t i = 0; i < num_elements; ++i) {
EXPECT_THAT(cache.Get("Trainer 1"), IsOkAndHolds(Pointee(i)));
EXPECT_EQ(cell_reader.Read(), (i + 1) * sizeof(int64_t));
}
for (size_t i = 0; i < 100; ++i) {
EXPECT_THAT(cache.Get("Trainer 1"),
IsOkAndHolds(Pointee(num_elements + i)));
EXPECT_EQ(cell_reader.Read(), 5 * sizeof(int64_t));
}
}
TEST(CrossTrainerCacheTest, ConcurrentReaders) {
size_t num_trainers = 10;
size_t num_elements_to_read = 200;
CrossTrainerCache<int64_t> cache(
3 * sizeof(int64_t),
std::make_unique<InfiniteRange>());
std::vector<std::vector<int64_t>> results;
std::vector<std::unique_ptr<Thread>> reader_threads;
results.reserve(num_trainers);
for (size_t i = 0; i < num_trainers; ++i) {
results.emplace_back();
std::vector<int64_t>& result = results.back();
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&cache, num_elements_to_read, &result]() {
for (size_t i = 0; i < num_elements_to_read; ++i) {
if (random::New64() % 5 == 0) {
Env::Default()->SleepForMicroseconds(2000);
}
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<const int64_t> next,
cache.Get(absl::StrCat("Trainer_", i)));
result.push_back(*next);
}
})));
}
reader_threads.clear();
EXPECT_EQ(results.size(), num_trainers);
for (const std::vector<int64_t>& result : results) {
EXPECT_EQ(result.size(), num_elements_to_read);
EXPECT_TRUE(SequenceIsIncreasing(result));
}
}
TEST(CrossTrainerCacheTest, ConcurrentReadersFromOneTrainer) {
size_t num_trainers = 10;
size_t num_elements_to_read = 100;
CrossTrainerCache<int64_t> cache(
3 * sizeof(int64_t),
std::make_unique<InfiniteRange>());
mutex mu;
std::vector<int64_t> results;
std::vector<std::unique_ptr<Thread>> reader_threads;
for (size_t i = 0; i < num_trainers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Thread_", i),
[&cache, num_elements_to_read, &results, &mu]() {
for (size_t i = 0; i < num_elements_to_read; ++i) {
if (random::New64() % 5 == 0) {
Env::Default()->SleepForMicroseconds(1000);
}
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<const int64_t> next,
cache.Get("Trainer ID"));
mutex_lock l(mu);
results.push_back(*next);
}
})));
}
reader_threads.clear();
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(1000)));
}
TEST(CrossTrainerCacheTest, Cancel) {
size_t num_trainers = 10;
CrossTrainerCache<Tensor> cache(
1000, std::make_unique<TensorDataset>());
EXPECT_FALSE(cache.IsCancelled());
mutex mu;
Status status;
std::vector<std::unique_ptr<Thread>> reader_threads;
for (size_t i = 0; i < num_trainers; ++i) {
reader_threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("Trainer_", i),
[&cache, &status, &mu]() {
for (int j = 0; true; ++j) {
absl::StatusOr<std::shared_ptr<const Tensor>> tensor =
cache.Get(absl::StrCat("Trainer_", j % 1000));
{
mutex_lock l(mu);
status = tensor.status();
}
if (!tensor.status().ok()) {
return;
}
test::ExpectEqual(*tensor.value(), Tensor("Test Tensor"));
}
})));
}
Env::Default()->SleepForMicroseconds(1000000);
cache.Cancel(errors::Cancelled("Cancelled"));
reader_threads.clear();
mutex_lock l(mu);
EXPECT_THAT(status, StatusIs(error::CANCELLED));
EXPECT_THAT(cache.Get("New trainer"), StatusIs(error::CANCELLED));
EXPECT_TRUE(cache.IsCancelled());
}
TEST(CrossTrainerCacheTest, Errors) {
auto elements = std::make_unique<ElementOrErrorDataset<std::string>>(
std::vector<absl::StatusOr<std::string>>{
std::string("First element"),
errors::Cancelled("Cancelled"),
std::string("Second element"),
errors::InvalidArgument("InvalidArgument"),
std::string("Third element"),
errors::Unavailable("Unavailable"),
});
CrossTrainerCache<std::string> cache(
1000, std::move(elements));
EXPECT_THAT(cache.Get("Trainer ID"),
IsOkAndHolds(Pointee(std::string("First element"))));
EXPECT_THAT(cache.Get("Trainer ID"), StatusIs(error::CANCELLED));
EXPECT_THAT(cache.Get("Trainer ID"),
IsOkAndHolds(Pointee(std::string("Second element"))));
EXPECT_THAT(cache.Get("Trainer ID"), StatusIs(error::INVALID_ARGUMENT));
EXPECT_THAT(cache.Get("Trainer ID"),
IsOkAndHolds(Pointee(std::string("Third element"))));
EXPECT_THAT(cache.Get("Trainer ID"), StatusIs(error::UNAVAILABLE));
EXPECT_THAT(cache.Get("New Trainer"),
IsOkAndHolds(Pointee(std::string("First element"))));
EXPECT_THAT(cache.Get("New Trainer"),
IsOkAndHolds(Pointee(std::string("Second element"))));
EXPECT_THAT(cache.Get("New Trainer"),
IsOkAndHolds(Pointee(std::string("Third element"))));
}
TEST(CrossTrainerCacheTest, CacheSizeIsTooSmall) {
CrossTrainerCache<Tensor> cache(
1, std::make_unique<TensorDataset>());
EXPECT_THAT(cache.Get("Trainer ID"),
StatusIs(error::INVALID_ARGUMENT,
HasSubstr("tf.data service element size is larger than "
"cache size in bytes.")));
}
TEST(CrossTrainerCacheTest, TrainerIDMustBeNonEmpty) {
CrossTrainerCache<Tensor> cache(
1000, std::make_unique<TensorDataset>());
EXPECT_THAT(cache.Get(""), StatusIs(error::INVALID_ARGUMENT,
"tf.data service cross-trainer cache "
"requires a non-empty trainer ID."));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/cross_trainer_cache.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/cross_trainer_cache_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
18bfc23a-196f-4d42-a64d-c968866588d8 | cpp | google/cel-cpp | int_value | common/values/int_value.cc | common/values/int_value_test.cc | #include <cstddef>
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/number.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
namespace cel {
namespace {
std::string IntDebugString(int64_t value) { return absl::StrCat(value); }
}
std::string IntValue::DebugString() const {
return IntDebugString(NativeValue());
}
absl::Status IntValue::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return internal::SerializeInt64Value(NativeValue(), value);
}
absl::StatusOr<Json> IntValue::ConvertToJson(AnyToJsonConverter&) const {
return JsonInt(NativeValue());
}
absl::Status IntValue::Equal(ValueManager&, const Value& other,
Value& result) const {
if (auto other_value = As<IntValue>(other); other_value.has_value()) {
result = BoolValue{NativeValue() == other_value->NativeValue()};
return absl::OkStatus();
}
if (auto other_value = As<DoubleValue>(other); other_value.has_value()) {
result =
BoolValue{internal::Number::FromInt64(NativeValue()) ==
internal::Number::FromDouble(other_value->NativeValue())};
return absl::OkStatus();
}
if (auto other_value = As<UintValue>(other); other_value.has_value()) {
result =
BoolValue{internal::Number::FromInt64(NativeValue()) ==
internal::Number::FromUint64(other_value->NativeValue())};
return absl::OkStatus();
}
result = BoolValue{false};
return absl::OkStatus();
}
absl::StatusOr<Value> IntValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
} | #include <cstdint>
#include <sstream>
#include "absl/hash/hash.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::An;
using ::testing::Ne;
using IntValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(IntValueTest, Kind) {
EXPECT_EQ(IntValue(1).kind(), IntValue::kKind);
EXPECT_EQ(Value(IntValue(1)).kind(), IntValue::kKind);
}
TEST_P(IntValueTest, DebugString) {
{
std::ostringstream out;
out << IntValue(1);
EXPECT_EQ(out.str(), "1");
}
{
std::ostringstream out;
out << Value(IntValue(1));
EXPECT_EQ(out.str(), "1");
}
}
TEST_P(IntValueTest, ConvertToJson) {
EXPECT_THAT(IntValue(1).ConvertToJson(value_manager()),
IsOkAndHolds(Json(1.0)));
}
TEST_P(IntValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(IntValue(1)), NativeTypeId::For<IntValue>());
EXPECT_EQ(NativeTypeId::Of(Value(IntValue(1))),
NativeTypeId::For<IntValue>());
}
TEST_P(IntValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<IntValue>(IntValue(1)));
EXPECT_TRUE(InstanceOf<IntValue>(Value(IntValue(1))));
}
TEST_P(IntValueTest, Cast) {
EXPECT_THAT(Cast<IntValue>(IntValue(1)), An<IntValue>());
EXPECT_THAT(Cast<IntValue>(Value(IntValue(1))), An<IntValue>());
}
TEST_P(IntValueTest, As) {
EXPECT_THAT(As<IntValue>(Value(IntValue(1))), Ne(absl::nullopt));
}
TEST_P(IntValueTest, HashValue) {
EXPECT_EQ(absl::HashOf(IntValue(1)), absl::HashOf(int64_t{1}));
}
TEST_P(IntValueTest, Equality) {
EXPECT_NE(IntValue(0), 1);
EXPECT_NE(1, IntValue(0));
EXPECT_NE(IntValue(0), IntValue(1));
}
TEST_P(IntValueTest, LessThan) {
EXPECT_LT(IntValue(0), 1);
EXPECT_LT(0, IntValue(1));
EXPECT_LT(IntValue(0), IntValue(1));
}
INSTANTIATE_TEST_SUITE_P(
IntValueTest, IntValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
IntValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/int_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/int_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3718fbeb-f057-4522-8c2c-1f4e4ab2e411 | cpp | tensorflow/tensorflow | work_sharder | tensorflow/core/util/work_sharder.cc | tensorflow/core/util/work_sharder_test.cc | #include "tensorflow/core/util/work_sharder.h"
#include <algorithm>
#include <functional>
#include "xla/tsl/util/env_var.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/logging.h"
#include "tsl/profiler/lib/traceme.h"
namespace tensorflow {
namespace {
bool UseEigenParallelFor() {
static bool result = []() {
bool result = true;
if (auto status =
tsl::ReadBoolFromEnvVar("TF_USE_EIGEN_PARALLEL_FOR_IN_WORK_SHARDER",
true, &result);
status.ok()) {
return result;
}
return true;
}();
return result;
}
}
thread_local int per_thread_max_parallelism = 1000000;
void SetPerThreadMaxParallelism(int max_parallelism) {
CHECK_LE(0, max_parallelism);
per_thread_max_parallelism = max_parallelism;
}
int GetPerThreadMaxParallelism() { return per_thread_max_parallelism; }
void Shard(int max_parallelism, thread::ThreadPool* workers, int64_t total,
int64_t cost_per_unit, std::function<void(int64_t, int64_t)> work) {
CHECK_GE(total, 0);
if (total == 0) {
return;
}
max_parallelism = std::min(max_parallelism, GetPerThreadMaxParallelism());
if (max_parallelism <= 1) {
work(0, total);
return;
}
if (UseEigenParallelFor() && max_parallelism >= workers->NumThreads()) {
tsl::profiler::TraceMe trace_me([=, num_threads = workers->NumThreads()]() {
return tsl::profiler::TraceMeEncode("ParallelFor",
{{"cost_per_unit", cost_per_unit},
{"total", total},
{"max_parallelism", max_parallelism},
{"num_threads", num_threads}});
});
workers->ParallelFor(total, cost_per_unit, work);
return;
}
Sharder::Do(
total, cost_per_unit, work,
[&workers](Sharder::Closure c) { workers->Schedule(c); },
max_parallelism);
}
void Sharder::Do(int64_t total, int64_t cost_per_unit, const Work& work,
const Runner& runner, int max_parallelism) {
tsl::profiler::TraceMe trace_me([=]() {
return tsl::profiler::TraceMeEncode("Sharder::Do",
{{"cost_per_unit", cost_per_unit},
{"total", total},
{"max_parallelism", max_parallelism}});
});
cost_per_unit = std::max(int64_t{1}, cost_per_unit);
static const int64_t kMinCostPerShard = 10000;
const int num_shards =
std::max<int>(1, std::min(static_cast<int64_t>(max_parallelism),
total * cost_per_unit / kMinCostPerShard));
const int64_t block_size = (total + num_shards - 1) / num_shards;
CHECK_GT(block_size, 0);
if (block_size >= total) {
work(0, total);
return;
}
const int num_shards_used = (total + block_size - 1) / block_size;
BlockingCounter counter(num_shards_used - 1);
for (int64_t start = block_size; start < total; start += block_size) {
auto limit = std::min(start + block_size, total);
runner([&work, &counter, start, limit]() {
work(start, limit);
counter.DecrementCount();
});
}
work(0, std::min(block_size, total));
counter.Wait();
}
} | #include "tensorflow/core/util/work_sharder.h"
#include <algorithm>
#include <atomic>
#include <functional>
#include <vector>
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
void RunSharding(int64_t num_workers, int64_t total, int64_t cost_per_unit,
int64_t per_thread_max_parallelism,
thread::ThreadPool* threads) {
mutex mu;
int64_t num_shards = 0;
int64_t num_done_work = 0;
std::vector<bool> work(total, false);
Shard(num_workers, threads, total, cost_per_unit,
[=, &mu, &num_shards, &num_done_work, &work](int64_t start,
int64_t limit) {
VLOG(1) << "Shard [" << start << "," << limit << ")";
EXPECT_GE(start, 0);
EXPECT_LE(limit, total);
mutex_lock l(mu);
++num_shards;
for (; start < limit; ++start) {
EXPECT_FALSE(work[start]);
++num_done_work;
work[start] = true;
}
});
LOG(INFO) << num_workers << " " << total << " " << cost_per_unit << " "
<< num_shards;
EXPECT_EQ(num_done_work, total);
if (std::min(num_workers, per_thread_max_parallelism) <
threads->NumThreads()) {
EXPECT_LE(num_shards, 1 + per_thread_max_parallelism);
}
}
TEST(Shard, Basic) {
thread::ThreadPool threads(Env::Default(), "test", 16);
for (auto workers : {0, 1, 2, 3, 5, 7, 10, 11, 15, 100, 1000}) {
for (auto total : {0, 1, 7, 10, 64, 100, 256, 1000, 9999}) {
for (auto cost_per_unit : {0, 1, 11, 102, 1003, 10005, 1000007}) {
for (auto maxp : {1, 2, 4, 8, 100}) {
ScopedPerThreadMaxParallelism s(maxp);
RunSharding(workers, total, cost_per_unit, maxp, &threads);
}
}
}
}
}
TEST(Shard, OverflowTest) {
thread::ThreadPool threads(Env::Default(), "test", 3);
for (auto workers : {1, 2, 3}) {
const int64_t total_elements = 1LL << 32;
const int64_t cost_per_unit = 10;
std::atomic<int64_t> num_elements(0);
Shard(workers, &threads, total_elements, cost_per_unit,
[&num_elements](int64_t start, int64_t limit) {
num_elements += limit - start;
});
EXPECT_EQ(num_elements.load(), total_elements);
}
}
void BM_Sharding(::testing::benchmark::State& state) {
const int arg = state.range(0);
thread::ThreadPool threads(Env::Default(), "test", 16);
const int64_t total = 1LL << 30;
auto lambda = [](int64_t start, int64_t limit) {};
auto work = std::cref(lambda);
for (auto s : state) {
Shard(arg - 1, &threads, total, 1, work);
}
}
BENCHMARK(BM_Sharding)->Range(1, 128);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/work_sharder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/work_sharder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4d1025af-be87-4f2f-be05-74e276e9db38 | cpp | google/quiche | print_elements | quiche/common/print_elements.h | quiche/common/print_elements_test.cc | #ifndef QUICHE_COMMON_PRINT_ELEMENTS_H_
#define QUICHE_COMMON_PRINT_ELEMENTS_H_
#include <ostream>
#include <sstream>
#include <string>
#include "quiche/common/platform/api/quiche_export.h"
namespace quiche {
template <typename T>
QUICHE_NO_EXPORT inline std::string PrintElements(const T& container) {
std::stringstream debug_string;
debug_string << "{";
auto it = container.cbegin();
if (it != container.cend()) {
debug_string << *it;
++it;
while (it != container.cend()) {
debug_string << ", " << *it;
++it;
}
}
debug_string << "}";
return debug_string.str();
}
}
#endif | #include "quiche/common/print_elements.h"
#include <deque>
#include <list>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/common/platform/api/quiche_test.h"
using quic::QuicIetfTransportErrorCodes;
namespace quiche {
namespace test {
namespace {
TEST(PrintElementsTest, Empty) {
std::vector<std::string> empty{};
EXPECT_EQ("{}", PrintElements(empty));
}
TEST(PrintElementsTest, StdContainers) {
std::vector<std::string> one{"foo"};
EXPECT_EQ("{foo}", PrintElements(one));
std::list<std::string> two{"foo", "bar"};
EXPECT_EQ("{foo, bar}", PrintElements(two));
std::deque<absl::string_view> three{"foo", "bar", "baz"};
EXPECT_EQ("{foo, bar, baz}", PrintElements(three));
}
TEST(PrintElementsTest, CustomPrinter) {
std::vector<QuicIetfTransportErrorCodes> empty{};
EXPECT_EQ("{}", PrintElements(empty));
std::list<QuicIetfTransportErrorCodes> one{
QuicIetfTransportErrorCodes::NO_IETF_QUIC_ERROR};
EXPECT_EQ("{NO_IETF_QUIC_ERROR}", PrintElements(one));
std::vector<QuicIetfTransportErrorCodes> two{
QuicIetfTransportErrorCodes::FLOW_CONTROL_ERROR,
QuicIetfTransportErrorCodes::STREAM_LIMIT_ERROR};
EXPECT_EQ("{FLOW_CONTROL_ERROR, STREAM_LIMIT_ERROR}", PrintElements(two));
std::list<QuicIetfTransportErrorCodes> three{
QuicIetfTransportErrorCodes::CONNECTION_ID_LIMIT_ERROR,
QuicIetfTransportErrorCodes::PROTOCOL_VIOLATION,
QuicIetfTransportErrorCodes::INVALID_TOKEN};
EXPECT_EQ("{CONNECTION_ID_LIMIT_ERROR, PROTOCOL_VIOLATION, INVALID_TOKEN}",
PrintElements(three));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/print_elements.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/print_elements_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
5b701727-1be1-444f-9d6e-3f418b5d3490 | cpp | abseil/abseil-cpp | flags | absl/log/flags.cc | absl/log/flags_test.cc | #include "absl/log/internal/flags.h"
#include <stddef.h>
#include <algorithm>
#include <cstdlib>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/log_severity.h"
#include "absl/flags/flag.h"
#include "absl/flags/marshalling.h"
#include "absl/log/globals.h"
#include "absl/log/internal/config.h"
#include "absl/log/internal/vlog_config.h"
#include "absl/strings/numbers.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace log_internal {
namespace {
void SyncLoggingFlags() {
absl::SetFlag(&FLAGS_minloglevel, static_cast<int>(absl::MinLogLevel()));
absl::SetFlag(&FLAGS_log_prefix, absl::ShouldPrependLogPrefix());
}
bool RegisterSyncLoggingFlags() {
log_internal::SetLoggingGlobalsListener(&SyncLoggingFlags);
return true;
}
ABSL_ATTRIBUTE_UNUSED const bool unused = RegisterSyncLoggingFlags();
template <typename T>
T GetFromEnv(const char* varname, T dflt) {
const char* val = ::getenv(varname);
if (val != nullptr) {
std::string err;
ABSL_INTERNAL_CHECK(absl::ParseFlag(val, &dflt, &err), err.c_str());
}
return dflt;
}
constexpr absl::LogSeverityAtLeast StderrThresholdDefault() {
return absl::LogSeverityAtLeast::kError;
}
}
}
ABSL_NAMESPACE_END
}
ABSL_FLAG(int, stderrthreshold,
static_cast<int>(absl::log_internal::StderrThresholdDefault()),
"Log messages at or above this threshold level are copied to stderr.")
.OnUpdate([] {
absl::log_internal::RawSetStderrThreshold(
static_cast<absl::LogSeverityAtLeast>(
absl::GetFlag(FLAGS_stderrthreshold)));
});
ABSL_FLAG(int, minloglevel, static_cast<int>(absl::LogSeverityAtLeast::kInfo),
"Messages logged at a lower level than this don't actually "
"get logged anywhere")
.OnUpdate([] {
absl::log_internal::RawSetMinLogLevel(
static_cast<absl::LogSeverityAtLeast>(
absl::GetFlag(FLAGS_minloglevel)));
});
ABSL_FLAG(std::string, log_backtrace_at, "",
"Emit a backtrace when logging at file:linenum.")
.OnUpdate([] {
const std::string log_backtrace_at =
absl::GetFlag(FLAGS_log_backtrace_at);
if (log_backtrace_at.empty()) {
absl::ClearLogBacktraceLocation();
return;
}
const size_t last_colon = log_backtrace_at.rfind(':');
if (last_colon == log_backtrace_at.npos) {
absl::ClearLogBacktraceLocation();
return;
}
const absl::string_view file =
absl::string_view(log_backtrace_at).substr(0, last_colon);
int line;
if (!absl::SimpleAtoi(
absl::string_view(log_backtrace_at).substr(last_colon + 1),
&line)) {
absl::ClearLogBacktraceLocation();
return;
}
absl::SetLogBacktraceLocation(file, line);
});
ABSL_FLAG(bool, log_prefix, true,
"Prepend the log prefix to the start of each log line")
.OnUpdate([] {
absl::log_internal::RawEnableLogPrefix(absl::GetFlag(FLAGS_log_prefix));
});
ABSL_FLAG(int, v, 0,
"Show all VLOG(m) messages for m <= this. Overridable by --vmodule.")
.OnUpdate([] {
absl::log_internal::UpdateGlobalVLogLevel(absl::GetFlag(FLAGS_v));
});
ABSL_FLAG(
std::string, vmodule, "",
"per-module log verbosity level."
" Argument is a comma-separated list of <module name>=<log level>."
" <module name> is a glob pattern, matched against the filename base"
" (that is, name ignoring .cc/.h./-inl.h)."
" A pattern without slashes matches just the file name portion, otherwise"
" the whole file path below the workspace root"
" (still without .cc/.h./-inl.h) is matched."
" ? and * in the glob pattern match any single or sequence of characters"
" respectively including slashes."
" <log level> overrides any value given by --v.")
.OnUpdate([] {
absl::log_internal::UpdateVModule(absl::GetFlag(FLAGS_vmodule));
}); | #include "absl/log/internal/flags.h"
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/base/log_severity.h"
#include "absl/flags/flag.h"
#include "absl/flags/reflection.h"
#include "absl/log/globals.h"
#include "absl/log/internal/test_helpers.h"
#include "absl/log/internal/test_matchers.h"
#include "absl/log/log.h"
#include "absl/log/scoped_mock_log.h"
#include "absl/strings/str_cat.h"
namespace {
using ::absl::log_internal::TextMessage;
using ::testing::HasSubstr;
using ::testing::Not;
auto* test_env ABSL_ATTRIBUTE_UNUSED = ::testing::AddGlobalTestEnvironment(
new absl::log_internal::LogTestEnvironment);
constexpr static absl::LogSeverityAtLeast DefaultStderrThreshold() {
return absl::LogSeverityAtLeast::kError;
}
class LogFlagsTest : public ::testing::Test {
protected:
absl::FlagSaver flag_saver_;
};
TEST_F(LogFlagsTest, DISABLED_StderrKnobsDefault) {
EXPECT_EQ(absl::StderrThreshold(), DefaultStderrThreshold());
}
TEST_F(LogFlagsTest, SetStderrThreshold) {
absl::SetFlag(&FLAGS_stderrthreshold,
static_cast<int>(absl::LogSeverityAtLeast::kInfo));
EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kInfo);
absl::SetFlag(&FLAGS_stderrthreshold,
static_cast<int>(absl::LogSeverityAtLeast::kError));
EXPECT_EQ(absl::StderrThreshold(), absl::LogSeverityAtLeast::kError);
}
TEST_F(LogFlagsTest, SetMinLogLevel) {
absl::SetFlag(&FLAGS_minloglevel,
static_cast<int>(absl::LogSeverityAtLeast::kError));
EXPECT_EQ(absl::MinLogLevel(), absl::LogSeverityAtLeast::kError);
absl::log_internal::ScopedMinLogLevel scoped_min_log_level(
absl::LogSeverityAtLeast::kWarning);
EXPECT_EQ(absl::GetFlag(FLAGS_minloglevel),
static_cast<int>(absl::LogSeverityAtLeast::kWarning));
}
TEST_F(LogFlagsTest, PrependLogPrefix) {
absl::SetFlag(&FLAGS_log_prefix, false);
EXPECT_EQ(absl::ShouldPrependLogPrefix(), false);
absl::EnableLogPrefix(true);
EXPECT_EQ(absl::GetFlag(FLAGS_log_prefix), true);
}
TEST_F(LogFlagsTest, EmptyBacktraceAtFlag) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at, "");
LOG(INFO) << "hello world";
}
TEST_F(LogFlagsTest, BacktraceAtNonsense) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at, "gibberish");
LOG(INFO) << "hello world";
}
TEST_F(LogFlagsTest, BacktraceAtWrongFile) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at,
absl::StrCat("some_other_file.cc:", log_line));
do_log();
}
TEST_F(LogFlagsTest, BacktraceAtWrongLine) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at,
absl::StrCat("flags_test.cc:", log_line + 1));
do_log();
}
TEST_F(LogFlagsTest, BacktraceAtWholeFilename) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at, absl::StrCat(__FILE__, ":", log_line));
do_log();
}
TEST_F(LogFlagsTest, BacktraceAtNonmatchingSuffix) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at,
absl::StrCat("flags_test.cc:", log_line, "gibberish"));
do_log();
}
TEST_F(LogFlagsTest, LogsBacktrace) {
absl::SetMinLogLevel(absl::LogSeverityAtLeast::kInfo);
const int log_line = __LINE__ + 1;
auto do_log = [] { LOG(INFO) << "hello world"; };
absl::ScopedMockLog test_sink(absl::MockLogDefault::kDisallowUnexpected);
testing::InSequence seq;
EXPECT_CALL(test_sink, Send(TextMessage(HasSubstr("(stacktrace:"))));
EXPECT_CALL(test_sink, Send(TextMessage(Not(HasSubstr("(stacktrace:")))));
test_sink.StartCapturingLogs();
absl::SetFlag(&FLAGS_log_backtrace_at,
absl::StrCat("flags_test.cc:", log_line));
do_log();
absl::SetFlag(&FLAGS_log_backtrace_at, "");
do_log();
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/flags.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/log/flags_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
27394261-aac9-4dc4-be63-5a2f30536a7e | cpp | abseil/abseil-cpp | distributions | absl/random/distributions.h | absl/random/distributions_test.cc | #ifndef ABSL_RANDOM_DISTRIBUTIONS_H_
#define ABSL_RANDOM_DISTRIBUTIONS_H_
#include <limits>
#include <type_traits>
#include "absl/base/config.h"
#include "absl/base/internal/inline_variable.h"
#include "absl/meta/type_traits.h"
#include "absl/random/bernoulli_distribution.h"
#include "absl/random/beta_distribution.h"
#include "absl/random/exponential_distribution.h"
#include "absl/random/gaussian_distribution.h"
#include "absl/random/internal/distribution_caller.h"
#include "absl/random/internal/traits.h"
#include "absl/random/internal/uniform_helper.h"
#include "absl/random/log_uniform_int_distribution.h"
#include "absl/random/poisson_distribution.h"
#include "absl/random/uniform_int_distribution.h"
#include "absl/random/uniform_real_distribution.h"
#include "absl/random/zipf_distribution.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosedClosed,
{});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedClosedTag, IntervalClosed, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalClosedOpenTag, IntervalClosedOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpenOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenOpenTag, IntervalOpen, {});
ABSL_INTERNAL_INLINE_CONSTEXPR(IntervalOpenClosedTag, IntervalOpenClosed, {});
template <typename R = void, typename TagType, typename URBG>
typename absl::enable_if_t<!std::is_same<R, void>::value, R>
Uniform(TagType tag,
URBG&& urbg,
R lo, R hi) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
auto a = random_internal::uniform_lower_bound(tag, lo, hi);
auto b = random_internal::uniform_upper_bound(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, tag, lo, hi);
}
template <typename R = void, typename URBG>
typename absl::enable_if_t<!std::is_same<R, void>::value, R>
Uniform(URBG&& urbg,
R lo, R hi) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
constexpr auto tag = absl::IntervalClosedOpen;
auto a = random_internal::uniform_lower_bound(tag, lo, hi);
auto b = random_internal::uniform_upper_bound(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lo, hi);
}
template <typename R = void, typename TagType, typename URBG, typename A,
typename B>
typename absl::enable_if_t<std::is_same<R, void>::value,
random_internal::uniform_inferred_return_t<A, B>>
Uniform(TagType tag,
URBG&& urbg,
A lo, B hi) {
using gen_t = absl::decay_t<URBG>;
using return_t = typename random_internal::uniform_inferred_return_t<A, B>;
using distribution_t = random_internal::UniformDistributionWrapper<return_t>;
auto a = random_internal::uniform_lower_bound<return_t>(tag, lo, hi);
auto b = random_internal::uniform_upper_bound<return_t>(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, tag, static_cast<return_t>(lo),
static_cast<return_t>(hi));
}
template <typename R = void, typename URBG, typename A, typename B>
typename absl::enable_if_t<std::is_same<R, void>::value,
random_internal::uniform_inferred_return_t<A, B>>
Uniform(URBG&& urbg,
A lo, B hi) {
using gen_t = absl::decay_t<URBG>;
using return_t = typename random_internal::uniform_inferred_return_t<A, B>;
using distribution_t = random_internal::UniformDistributionWrapper<return_t>;
constexpr auto tag = absl::IntervalClosedOpen;
auto a = random_internal::uniform_lower_bound<return_t>(tag, lo, hi);
auto b = random_internal::uniform_upper_bound<return_t>(tag, lo, hi);
if (!random_internal::is_uniform_range_valid(a, b)) return lo;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, static_cast<return_t>(lo),
static_cast<return_t>(hi));
}
template <typename R, typename URBG>
typename absl::enable_if_t<!std::numeric_limits<R>::is_signed, R>
Uniform(URBG&& urbg) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = random_internal::UniformDistributionWrapper<R>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg);
}
template <typename URBG>
bool Bernoulli(URBG&& urbg,
double p) {
using gen_t = absl::decay_t<URBG>;
using distribution_t = absl::bernoulli_distribution;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, p);
}
template <typename RealType, typename URBG>
RealType Beta(URBG&& urbg,
RealType alpha, RealType beta) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Beta<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::beta_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, alpha, beta);
}
template <typename RealType, typename URBG>
RealType Exponential(URBG&& urbg,
RealType lambda = 1) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Exponential<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::exponential_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lambda);
}
template <typename RealType, typename URBG>
RealType Gaussian(URBG&& urbg,
RealType mean = 0, RealType stddev = 1) {
static_assert(
std::is_floating_point<RealType>::value,
"Template-argument 'RealType' must be a floating-point type, in "
"absl::Gaussian<RealType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::gaussian_distribution<RealType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, mean, stddev);
}
template <typename IntType, typename URBG>
IntType LogUniform(URBG&& urbg,
IntType lo, IntType hi, IntType base = 2) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::LogUniform<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::log_uniform_int_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, lo, hi, base);
}
template <typename IntType, typename URBG>
IntType Poisson(URBG&& urbg,
double mean = 1.0) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Poisson<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::poisson_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, mean);
}
template <typename IntType, typename URBG>
IntType Zipf(URBG&& urbg,
IntType hi = (std::numeric_limits<IntType>::max)(), double q = 2.0,
double v = 1.0) {
static_assert(random_internal::IsIntegral<IntType>::value,
"Template-argument 'IntType' must be an integral type, in "
"absl::Zipf<IntType, URBG>(...)");
using gen_t = absl::decay_t<URBG>;
using distribution_t = typename absl::zipf_distribution<IntType>;
return random_internal::DistributionCaller<gen_t>::template Call<
distribution_t>(&urbg, hi, q, v);
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/distributions.h"
#include <cfloat>
#include <cmath>
#include <cstdint>
#include <limits>
#include <type_traits>
#include <utility>
#include <vector>
#include "gtest/gtest.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/int128.h"
#include "absl/random/internal/distribution_test_util.h"
#include "absl/random/random.h"
namespace {
constexpr int kSize = 400000;
class RandomDistributionsTest : public testing::Test {};
struct Invalid {};
template <typename A, typename B>
auto InferredUniformReturnT(int)
-> decltype(absl::Uniform(std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename>
Invalid InferredUniformReturnT(...);
template <typename TagType, typename A, typename B>
auto InferredTaggedUniformReturnT(int)
-> decltype(absl::Uniform(std::declval<TagType>(),
std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename>
Invalid InferredTaggedUniformReturnT(...);
template <typename A, typename B, typename Expect>
void CheckArgsInferType() {
static_assert(
absl::conjunction<
std::is_same<Expect, decltype(InferredUniformReturnT<A, B>(0))>,
std::is_same<Expect,
decltype(InferredUniformReturnT<B, A>(0))>>::value,
"");
static_assert(
absl::conjunction<
std::is_same<Expect, decltype(InferredTaggedUniformReturnT<
absl::IntervalOpenOpenTag, A, B>(0))>,
std::is_same<Expect,
decltype(InferredTaggedUniformReturnT<
absl::IntervalOpenOpenTag, B, A>(0))>>::value,
"");
}
template <typename A, typename B, typename ExplicitRet>
auto ExplicitUniformReturnT(int) -> decltype(absl::Uniform<ExplicitRet>(
std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename ExplicitRet>
Invalid ExplicitUniformReturnT(...);
template <typename TagType, typename A, typename B, typename ExplicitRet>
auto ExplicitTaggedUniformReturnT(int)
-> decltype(absl::Uniform<ExplicitRet>(
std::declval<TagType>(), std::declval<absl::InsecureBitGen&>(),
std::declval<A>(), std::declval<B>()));
template <typename, typename, typename, typename ExplicitRet>
Invalid ExplicitTaggedUniformReturnT(...);
template <typename A, typename B, typename Expect>
void CheckArgsReturnExpectedType() {
static_assert(
absl::conjunction<
std::is_same<Expect,
decltype(ExplicitUniformReturnT<A, B, Expect>(0))>,
std::is_same<Expect, decltype(ExplicitUniformReturnT<B, A, Expect>(
0))>>::value,
"");
static_assert(
absl::conjunction<
std::is_same<Expect,
decltype(ExplicitTaggedUniformReturnT<
absl::IntervalOpenOpenTag, A, B, Expect>(0))>,
std::is_same<Expect, decltype(ExplicitTaggedUniformReturnT<
absl::IntervalOpenOpenTag, B, A,
Expect>(0))>>::value,
"");
}
template <typename R>
auto UniformNoBoundsReturnT(int)
-> decltype(absl::Uniform<R>(std::declval<absl::InsecureBitGen&>()));
template <typename>
Invalid UniformNoBoundsReturnT(...);
TEST_F(RandomDistributionsTest, UniformTypeInference) {
CheckArgsInferType<uint16_t, uint16_t, uint16_t>();
CheckArgsInferType<uint32_t, uint32_t, uint32_t>();
CheckArgsInferType<uint64_t, uint64_t, uint64_t>();
CheckArgsInferType<int16_t, int16_t, int16_t>();
CheckArgsInferType<int32_t, int32_t, int32_t>();
CheckArgsInferType<int64_t, int64_t, int64_t>();
CheckArgsInferType<float, float, float>();
CheckArgsInferType<double, double, double>();
CheckArgsReturnExpectedType<int16_t, int16_t, int32_t>();
CheckArgsReturnExpectedType<uint16_t, uint16_t, int32_t>();
CheckArgsReturnExpectedType<int16_t, int16_t, int64_t>();
CheckArgsReturnExpectedType<int16_t, int32_t, int64_t>();
CheckArgsReturnExpectedType<int16_t, int32_t, double>();
CheckArgsReturnExpectedType<float, float, double>();
CheckArgsReturnExpectedType<int, int, int16_t>();
CheckArgsInferType<uint16_t, uint32_t, uint32_t>();
CheckArgsInferType<uint16_t, uint64_t, uint64_t>();
CheckArgsInferType<uint16_t, int32_t, int32_t>();
CheckArgsInferType<uint16_t, int64_t, int64_t>();
CheckArgsInferType<uint16_t, float, float>();
CheckArgsInferType<uint16_t, double, double>();
CheckArgsInferType<int16_t, int32_t, int32_t>();
CheckArgsInferType<int16_t, int64_t, int64_t>();
CheckArgsInferType<int16_t, float, float>();
CheckArgsInferType<int16_t, double, double>();
CheckArgsInferType<uint16_t, int16_t, Invalid>();
CheckArgsInferType<int16_t, uint32_t, Invalid>();
CheckArgsInferType<int16_t, uint64_t, Invalid>();
CheckArgsInferType<uint32_t, uint64_t, uint64_t>();
CheckArgsInferType<uint32_t, int64_t, int64_t>();
CheckArgsInferType<uint32_t, double, double>();
CheckArgsInferType<int32_t, int64_t, int64_t>();
CheckArgsInferType<int32_t, double, double>();
CheckArgsInferType<uint32_t, int32_t, Invalid>();
CheckArgsInferType<int32_t, uint64_t, Invalid>();
CheckArgsInferType<int32_t, float, Invalid>();
CheckArgsInferType<uint32_t, float, Invalid>();
CheckArgsInferType<uint64_t, int64_t, Invalid>();
CheckArgsInferType<int64_t, float, Invalid>();
CheckArgsInferType<int64_t, double, Invalid>();
CheckArgsInferType<float, double, double>();
}
TEST_F(RandomDistributionsTest, UniformExamples) {
absl::InsecureBitGen gen;
EXPECT_NE(1, absl::Uniform(gen, static_cast<uint16_t>(0), 1.0f));
EXPECT_NE(1, absl::Uniform(gen, 0, 1.0));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen,
static_cast<uint16_t>(0), 1.0f));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen, 0, 1.0));
EXPECT_NE(1, absl::Uniform(absl::IntervalOpenOpen, gen, -1, 1.0));
EXPECT_NE(1, absl::Uniform<double>(absl::IntervalOpenOpen, gen, -1, 1));
EXPECT_NE(1, absl::Uniform<float>(absl::IntervalOpenOpen, gen, 0, 1));
EXPECT_NE(1, absl::Uniform<float>(gen, 0, 1));
}
TEST_F(RandomDistributionsTest, UniformNoBounds) {
absl::InsecureBitGen gen;
absl::Uniform<uint8_t>(gen);
absl::Uniform<uint16_t>(gen);
absl::Uniform<uint32_t>(gen);
absl::Uniform<uint64_t>(gen);
absl::Uniform<absl::uint128>(gen);
testing::StaticAssertTypeEq<uint8_t,
decltype(UniformNoBoundsReturnT<uint8_t>(0))>();
testing::StaticAssertTypeEq<uint16_t,
decltype(UniformNoBoundsReturnT<uint16_t>(0))>();
testing::StaticAssertTypeEq<uint32_t,
decltype(UniformNoBoundsReturnT<uint32_t>(0))>();
testing::StaticAssertTypeEq<uint64_t,
decltype(UniformNoBoundsReturnT<uint64_t>(0))>();
testing::StaticAssertTypeEq<
absl::uint128, decltype(UniformNoBoundsReturnT<absl::uint128>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int8_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int16_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int32_t>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<int64_t>(0))>();
testing::StaticAssertTypeEq<
Invalid, decltype(UniformNoBoundsReturnT<absl::int128>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<float>(0))>();
testing::StaticAssertTypeEq<Invalid,
decltype(UniformNoBoundsReturnT<double>(0))>();
}
TEST_F(RandomDistributionsTest, UniformNonsenseRanges) {
#if (defined(__i386__) || defined(_M_IX86)) && FLT_EVAL_METHOD != 0
GTEST_SKIP()
<< "Skipping the test because we detected x87 floating-point semantics";
#endif
absl::InsecureBitGen gen;
EXPECT_EQ(0, absl::Uniform<uint64_t>(gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<uint64_t>(gen, 1, 0));
EXPECT_EQ(0, absl::Uniform<uint64_t>(absl::IntervalOpenOpen, gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<uint64_t>(absl::IntervalOpenOpen, gen, 1, 0));
constexpr auto m = (std::numeric_limits<uint64_t>::max)();
EXPECT_EQ(m, absl::Uniform(gen, m, m));
EXPECT_EQ(m, absl::Uniform(gen, m, m - 1));
EXPECT_EQ(m - 1, absl::Uniform(gen, m - 1, m));
EXPECT_EQ(m, absl::Uniform(absl::IntervalOpenOpen, gen, m, m));
EXPECT_EQ(m, absl::Uniform(absl::IntervalOpenOpen, gen, m, m - 1));
EXPECT_EQ(m - 1, absl::Uniform(absl::IntervalOpenOpen, gen, m - 1, m));
EXPECT_EQ(0, absl::Uniform<int64_t>(gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<int64_t>(gen, 1, 0));
EXPECT_EQ(0, absl::Uniform<int64_t>(absl::IntervalOpenOpen, gen, 0, 0));
EXPECT_EQ(1, absl::Uniform<int64_t>(absl::IntervalOpenOpen, gen, 1, 0));
constexpr auto l = (std::numeric_limits<int64_t>::min)();
constexpr auto r = (std::numeric_limits<int64_t>::max)();
EXPECT_EQ(l, absl::Uniform(gen, l, l));
EXPECT_EQ(r, absl::Uniform(gen, r, r));
EXPECT_EQ(r, absl::Uniform(gen, r, r - 1));
EXPECT_EQ(r - 1, absl::Uniform(gen, r - 1, r));
EXPECT_EQ(l, absl::Uniform(absl::IntervalOpenOpen, gen, l, l));
EXPECT_EQ(r, absl::Uniform(absl::IntervalOpenOpen, gen, r, r));
EXPECT_EQ(r, absl::Uniform(absl::IntervalOpenOpen, gen, r, r - 1));
EXPECT_EQ(r - 1, absl::Uniform(absl::IntervalOpenOpen, gen, r - 1, r));
const double e = std::nextafter(1.0, 2.0);
const double f = std::nextafter(1.0, 0.0);
const double g = std::numeric_limits<double>::denorm_min();
EXPECT_EQ(1.0, absl::Uniform(gen, 1.0, e));
EXPECT_EQ(1.0, absl::Uniform(gen, 1.0, f));
EXPECT_EQ(0.0, absl::Uniform(gen, 0.0, g));
EXPECT_EQ(e, absl::Uniform(absl::IntervalOpenOpen, gen, 1.0, e));
EXPECT_EQ(f, absl::Uniform(absl::IntervalOpenOpen, gen, 1.0, f));
EXPECT_EQ(g, absl::Uniform(absl::IntervalOpenOpen, gen, 0.0, g));
}
TEST_F(RandomDistributionsTest, UniformReal) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Uniform(gen, 0, 1.0);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.5, moments.mean, 0.02);
EXPECT_NEAR(1 / 12.0, moments.variance, 0.02);
EXPECT_NEAR(0.0, moments.skewness, 0.02);
EXPECT_NEAR(9 / 5.0, moments.kurtosis, 0.02);
}
TEST_F(RandomDistributionsTest, UniformInt) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
const int64_t kMax = 1000000000000ll;
int64_t j = absl::Uniform(absl::IntervalClosedClosed, gen, 0, kMax);
values[i] = static_cast<double>(j) / static_cast<double>(kMax);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.5, moments.mean, 0.02);
EXPECT_NEAR(1 / 12.0, moments.variance, 0.02);
EXPECT_NEAR(0.0, moments.skewness, 0.02);
EXPECT_NEAR(9 / 5.0, moments.kurtosis, 0.02);
}
TEST_F(RandomDistributionsTest, Exponential) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Exponential<double>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(1.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.025);
EXPECT_NEAR(2.0, moments.skewness, 0.1);
EXPECT_LT(5.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, PoissonDefault) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Poisson<int64_t>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(1.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.02);
EXPECT_NEAR(1.0, moments.skewness, 0.025);
EXPECT_LT(2.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, PoissonLarge) {
constexpr double kMean = 100000000.0;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Poisson<int64_t>(gen, kMean);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(kMean, moments.mean, kMean * 0.015);
EXPECT_NEAR(kMean, moments.variance, kMean * 0.015);
EXPECT_NEAR(std::sqrt(kMean), moments.skewness, kMean * 0.02);
EXPECT_LT(2.0, moments.kurtosis);
}
TEST_F(RandomDistributionsTest, Bernoulli) {
constexpr double kP = 0.5151515151;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Bernoulli(gen, kP);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(kP, moments.mean, 0.01);
}
TEST_F(RandomDistributionsTest, Beta) {
constexpr double kAlpha = 2.0;
constexpr double kBeta = 3.0;
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Beta(gen, kAlpha, kBeta);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.4, moments.mean, 0.01);
}
TEST_F(RandomDistributionsTest, Zipf) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Zipf<int64_t>(gen, 100);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(6.5944, moments.mean, 2000) << moments;
}
TEST_F(RandomDistributionsTest, Gaussian) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::Gaussian<double>(gen);
}
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(0.0, moments.mean, 0.02);
EXPECT_NEAR(1.0, moments.variance, 0.04);
EXPECT_NEAR(0, moments.skewness, 0.2);
EXPECT_NEAR(3.0, moments.kurtosis, 0.5);
}
TEST_F(RandomDistributionsTest, LogUniform) {
std::vector<double> values(kSize);
absl::InsecureBitGen gen;
for (int i = 0; i < kSize; i++) {
values[i] = absl::LogUniform<int64_t>(gen, 0, (1 << 10) - 1);
}
const double mean = (0 + 1 + 1 + 2 + 3 + 4 + 7 + 8 + 15 + 16 + 31 + 32 + 63 +
64 + 127 + 128 + 255 + 256 + 511 + 512 + 1023) /
(2.0 * 11.0);
const auto moments =
absl::random_internal::ComputeDistributionMoments(values);
EXPECT_NEAR(mean, moments.mean, 2) << moments;
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/distributions.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/distributions_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8048927d-21bf-46dd-b380-1c7f05f9afa0 | cpp | google/quiche | qpack_decoder_stream_receiver | quiche/quic/core/qpack/qpack_decoder_stream_receiver.cc | quiche/quic/core/qpack/qpack_decoder_stream_receiver_test.cc | #include "quiche/quic/core/qpack/qpack_decoder_stream_receiver.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/quic/core/qpack/qpack_instructions.h"
namespace quic {
QpackDecoderStreamReceiver::QpackDecoderStreamReceiver(Delegate* delegate)
: instruction_decoder_(QpackDecoderStreamLanguage(), this),
delegate_(delegate),
error_detected_(false) {
QUICHE_DCHECK(delegate_);
}
void QpackDecoderStreamReceiver::Decode(absl::string_view data) {
if (data.empty() || error_detected_) {
return;
}
instruction_decoder_.Decode(data);
}
bool QpackDecoderStreamReceiver::OnInstructionDecoded(
const QpackInstruction* instruction) {
if (instruction == InsertCountIncrementInstruction()) {
delegate_->OnInsertCountIncrement(instruction_decoder_.varint());
return true;
}
if (instruction == HeaderAcknowledgementInstruction()) {
delegate_->OnHeaderAcknowledgement(instruction_decoder_.varint());
return true;
}
QUICHE_DCHECK_EQ(instruction, StreamCancellationInstruction());
delegate_->OnStreamCancellation(instruction_decoder_.varint());
return true;
}
void QpackDecoderStreamReceiver::OnInstructionDecodingError(
QpackInstructionDecoder::ErrorCode error_code,
absl::string_view error_message) {
QUICHE_DCHECK(!error_detected_);
error_detected_ = true;
QuicErrorCode quic_error_code =
(error_code == QpackInstructionDecoder::ErrorCode::INTEGER_TOO_LARGE)
? QUIC_QPACK_DECODER_STREAM_INTEGER_TOO_LARGE
: QUIC_INTERNAL_ERROR;
delegate_->OnErrorDetected(quic_error_code, error_message);
}
} | #include "quiche/quic/core/qpack/qpack_decoder_stream_receiver.h"
#include <string>
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_test.h"
using testing::Eq;
using testing::StrictMock;
namespace quic {
namespace test {
namespace {
class MockDelegate : public QpackDecoderStreamReceiver::Delegate {
public:
~MockDelegate() override = default;
MOCK_METHOD(void, OnInsertCountIncrement, (uint64_t increment), (override));
MOCK_METHOD(void, OnHeaderAcknowledgement, (QuicStreamId stream_id),
(override));
MOCK_METHOD(void, OnStreamCancellation, (QuicStreamId stream_id), (override));
MOCK_METHOD(void, OnErrorDetected,
(QuicErrorCode error_code, absl::string_view error_message),
(override));
};
class QpackDecoderStreamReceiverTest : public QuicTest {
protected:
QpackDecoderStreamReceiverTest() : stream_(&delegate_) {}
~QpackDecoderStreamReceiverTest() override = default;
QpackDecoderStreamReceiver stream_;
StrictMock<MockDelegate> delegate_;
};
TEST_F(QpackDecoderStreamReceiverTest, InsertCountIncrement) {
std::string encoded_data;
EXPECT_CALL(delegate_, OnInsertCountIncrement(0));
ASSERT_TRUE(absl::HexStringToBytes("00", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnInsertCountIncrement(10));
ASSERT_TRUE(absl::HexStringToBytes("0a", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnInsertCountIncrement(63));
ASSERT_TRUE(absl::HexStringToBytes("3f00", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnInsertCountIncrement(200));
ASSERT_TRUE(absl::HexStringToBytes("3f8901", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_,
OnErrorDetected(QUIC_QPACK_DECODER_STREAM_INTEGER_TOO_LARGE,
Eq("Encoded integer too large.")));
ASSERT_TRUE(absl::HexStringToBytes("3fffffffffffffffffffff", &encoded_data));
stream_.Decode(encoded_data);
}
TEST_F(QpackDecoderStreamReceiverTest, HeaderAcknowledgement) {
std::string encoded_data;
EXPECT_CALL(delegate_, OnHeaderAcknowledgement(0));
ASSERT_TRUE(absl::HexStringToBytes("80", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnHeaderAcknowledgement(37));
ASSERT_TRUE(absl::HexStringToBytes("a5", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnHeaderAcknowledgement(127));
ASSERT_TRUE(absl::HexStringToBytes("ff00", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnHeaderAcknowledgement(503));
ASSERT_TRUE(absl::HexStringToBytes("fff802", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_,
OnErrorDetected(QUIC_QPACK_DECODER_STREAM_INTEGER_TOO_LARGE,
Eq("Encoded integer too large.")));
ASSERT_TRUE(absl::HexStringToBytes("ffffffffffffffffffffff", &encoded_data));
stream_.Decode(encoded_data);
}
TEST_F(QpackDecoderStreamReceiverTest, StreamCancellation) {
std::string encoded_data;
EXPECT_CALL(delegate_, OnStreamCancellation(0));
ASSERT_TRUE(absl::HexStringToBytes("40", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnStreamCancellation(19));
ASSERT_TRUE(absl::HexStringToBytes("53", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnStreamCancellation(63));
ASSERT_TRUE(absl::HexStringToBytes("7f00", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_, OnStreamCancellation(110));
ASSERT_TRUE(absl::HexStringToBytes("7f2f", &encoded_data));
stream_.Decode(encoded_data);
EXPECT_CALL(delegate_,
OnErrorDetected(QUIC_QPACK_DECODER_STREAM_INTEGER_TOO_LARGE,
Eq("Encoded integer too large.")));
ASSERT_TRUE(absl::HexStringToBytes("7fffffffffffffffffffff", &encoded_data));
stream_.Decode(encoded_data);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_decoder_stream_receiver.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/qpack/qpack_decoder_stream_receiver_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
106e29d3-9c69-48ae-b424-79984c7aed25 | cpp | tensorflow/tensorflow | basic_batch_scheduler | tensorflow/core/kernels/batching_util/basic_batch_scheduler.h | tensorflow/core/kernels/batching_util/basic_batch_scheduler_test.cc | #ifndef TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BASIC_BATCH_SCHEDULER_H_
#define TENSORFLOW_CORE_KERNELS_BATCHING_UTIL_BASIC_BATCH_SCHEDULER_H_
#include <stddef.h>
#include <cstddef>
#include <functional>
#include <memory>
#include <string>
#include "tensorflow/core/kernels/batching_util/shared_batch_scheduler.h"
namespace tensorflow {
namespace serving {
template <typename TaskType>
class BasicBatchScheduler : public BatchScheduler<TaskType> {
public:
struct Options {
string thread_pool_name = {"batch_threads"};
int num_batch_threads = port::MaxParallelism();
std::shared_ptr<SharedBatchScheduler<TaskType>> shared_batch_scheduler =
nullptr;
int max_batch_size = 1000;
int64_t batch_timeout_micros = 0;
int max_enqueued_batches = 10;
bool enable_large_batch_splitting = false;
std::function<Status(std::unique_ptr<TaskType>* input_task,
int first_output_task_size, int input_batch_size_limit,
std::vector<std::unique_ptr<TaskType>>* output_tasks)>
split_input_task_func;
int max_execution_batch_size = 10;
Env* env = Env::Default();
};
static Status Create(const Options& options,
std::function<void(std::unique_ptr<Batch<TaskType>>)>
process_batch_callback,
std::unique_ptr<BasicBatchScheduler>* scheduler);
~BasicBatchScheduler() override = default;
Status Schedule(std::unique_ptr<TaskType>* task) override;
size_t NumEnqueuedTasks() const override;
size_t SchedulingCapacity() const override;
size_t max_task_size() const override {
return shared_scheduler_queue_->max_task_size();
}
private:
explicit BasicBatchScheduler(
std::unique_ptr<BatchScheduler<TaskType>> shared_scheduler_queue);
std::unique_ptr<BatchScheduler<TaskType>> shared_scheduler_queue_;
BasicBatchScheduler(const BasicBatchScheduler&) = delete;
void operator=(const BasicBatchScheduler&) = delete;
};
template <typename TaskType>
Status BasicBatchScheduler<TaskType>::Create(
const Options& options,
std::function<void(std::unique_ptr<Batch<TaskType>>)>
process_batch_callback,
std::unique_ptr<BasicBatchScheduler>* scheduler) {
std::shared_ptr<SharedBatchScheduler<TaskType>> shared_scheduler;
if (options.shared_batch_scheduler == nullptr) {
typename SharedBatchScheduler<TaskType>::Options shared_scheduler_options;
shared_scheduler_options.thread_pool_name = options.thread_pool_name;
shared_scheduler_options.num_batch_threads = options.num_batch_threads;
shared_scheduler_options.env = options.env;
TF_RETURN_IF_ERROR(SharedBatchScheduler<TaskType>::Create(
shared_scheduler_options, &shared_scheduler));
} else {
shared_scheduler = options.shared_batch_scheduler;
}
typename SharedBatchScheduler<TaskType>::QueueOptions
shared_scheduler_queue_options;
shared_scheduler_queue_options.input_batch_size_limit =
options.max_batch_size;
shared_scheduler_queue_options.batch_timeout_micros =
options.batch_timeout_micros;
shared_scheduler_queue_options.max_enqueued_batches =
options.max_enqueued_batches;
shared_scheduler_queue_options.enable_large_batch_splitting =
options.enable_large_batch_splitting;
shared_scheduler_queue_options.split_input_task_func =
options.split_input_task_func;
shared_scheduler_queue_options.max_execution_batch_size =
options.max_execution_batch_size;
std::unique_ptr<BatchScheduler<TaskType>> shared_scheduler_queue;
TF_RETURN_IF_ERROR(shared_scheduler->AddQueue(shared_scheduler_queue_options,
process_batch_callback,
&shared_scheduler_queue));
scheduler->reset(
new BasicBatchScheduler<TaskType>(std::move(shared_scheduler_queue)));
return absl::OkStatus();
}
template <typename TaskType>
Status BasicBatchScheduler<TaskType>::Schedule(
std::unique_ptr<TaskType>* task) {
return shared_scheduler_queue_->Schedule(task);
}
template <typename TaskType>
size_t BasicBatchScheduler<TaskType>::NumEnqueuedTasks() const {
return shared_scheduler_queue_->NumEnqueuedTasks();
}
template <typename TaskType>
size_t BasicBatchScheduler<TaskType>::SchedulingCapacity() const {
return shared_scheduler_queue_->SchedulingCapacity();
}
template <typename TaskType>
BasicBatchScheduler<TaskType>::BasicBatchScheduler(
std::unique_ptr<BatchScheduler<TaskType>> shared_scheduler_queue)
: shared_scheduler_queue_(std::move(shared_scheduler_queue)) {}
}
}
#endif | #include "tensorflow/core/kernels/batching_util/basic_batch_scheduler.h"
#include <utility>
#include "tensorflow/core/kernels/batching_util/batch_scheduler.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace serving {
namespace {
class FakeTask : public BatchTask {
public:
explicit FakeTask(size_t size) : size_(size) {}
~FakeTask() override = default;
size_t size() const override { return size_; }
private:
const size_t size_;
FakeTask(const FakeTask&) = delete;
void operator=(const FakeTask&) = delete;
};
Status ScheduleTask(size_t task_size, BatchScheduler<FakeTask>* scheduler) {
std::unique_ptr<FakeTask> task(new FakeTask(task_size));
Status status = scheduler->Schedule(&task);
CHECK_EQ(status.ok(), task == nullptr);
return status;
}
TEST(BasicBatchSchedulerTest, Basic) {
bool callback_called = false;
auto callback = [&callback_called](std::unique_ptr<Batch<FakeTask>> batch) {
callback_called = true;
ASSERT_TRUE(batch->IsClosed());
ASSERT_EQ(2, batch->num_tasks());
EXPECT_EQ(3, batch->task(0).size());
EXPECT_EQ(5, batch->task(1).size());
};
{
BasicBatchScheduler<FakeTask>::Options options;
options.max_batch_size = 10;
options.batch_timeout_micros = 100 * 1000;
options.num_batch_threads = 1;
options.max_enqueued_batches = 3;
std::unique_ptr<BasicBatchScheduler<FakeTask>> scheduler;
TF_ASSERT_OK(
BasicBatchScheduler<FakeTask>::Create(options, callback, &scheduler));
EXPECT_EQ(10, scheduler->max_task_size());
EXPECT_EQ(0, scheduler->NumEnqueuedTasks());
EXPECT_EQ(3 * 10, scheduler->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(3, scheduler.get()));
EXPECT_EQ(1, scheduler->NumEnqueuedTasks());
EXPECT_EQ((3 * 10) - 3, scheduler->SchedulingCapacity());
TF_ASSERT_OK(ScheduleTask(5, scheduler.get()));
EXPECT_EQ(2, scheduler->NumEnqueuedTasks());
EXPECT_EQ((3 * 10) - (3 + 5), scheduler->SchedulingCapacity());
}
EXPECT_TRUE(callback_called);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/basic_batch_scheduler.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/batching_util/basic_batch_scheduler_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d372ca5e-0ee3-4c33-b46f-25c5dac29903 | cpp | tensorflow/tensorflow | rocm_kernel | third_party/xla/xla/stream_executor/rocm/rocm_kernel.cc | third_party/xla/xla/stream_executor/rocm/rocm_kernel_test.cc | #include "xla/stream_executor/rocm/rocm_kernel.h"
#include <cstddef>
#include <cstdint>
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/gpu/gpu_driver.h"
#include "xla/stream_executor/launch_dim.h"
namespace stream_executor {
namespace gpu {
absl::StatusOr<int32_t> RocmKernel::GetMaxOccupiedBlocksPerCore(
ThreadDim threads, size_t dynamic_shared_memory_bytes) const {
int32_t threads_per_block = threads.x * threads.y * threads.z;
VLOG(0) << "Get kernel block occupancy: " << name()
<< "; threads_per_block: " << threads_per_block
<< "; dynamic_shared_memory_bytes: " << dynamic_shared_memory_bytes;
return GpuDriver::GetMaxOccupiedBlocksPerCore(
gpu_executor_->gpu_context(), rocm_function_, threads_per_block,
dynamic_shared_memory_bytes);
}
}
} | #include "xla/stream_executor/rocm/rocm_kernel.h"
#include <gtest/gtest.h>
#include "rocm/include/hip/hip_runtime.h"
#include "xla/stream_executor/gpu/gpu_executor.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/rocm/rocm_runtime.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace stream_executor::gpu {
namespace {
using testing::Ge;
using tsl::testing::IsOkAndHolds;
TEST(RocmKernelTest, GetMaxOccupiedBlocksPerCore) {
TF_ASSERT_OK_AND_ASSIGN(Platform * platform,
PlatformManager::PlatformWithName("ROCM"));
TF_ASSERT_OK_AND_ASSIGN(StreamExecutor * executor,
platform->ExecutorForDevice(0));
GpuExecutor* gpu_executor = ExtractGpuExecutor(executor);
RocmKernel rocm_kernel(gpu_executor);
rocm_kernel.set_arity(3);
TF_ASSERT_OK_AND_ASSIGN(
hipFunction_t function,
RocmRuntime::GetFuncBySymbol(internal::GetAddI32Kernel()));
rocm_kernel.set_gpu_function(function);
EXPECT_EQ(rocm_kernel.Arity(), 3);
EXPECT_EQ(rocm_kernel.gpu_function(), function);
EXPECT_THAT(rocm_kernel.GetMaxOccupiedBlocksPerCore(
ThreadDim(1, 1, 1), 0),
IsOkAndHolds(Ge(1)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0e3d9076-7c79-4ee2-b5df-2a986817bca7 | cpp | tensorflow/tensorflow | disable_prefetch_legacy_autotune | tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.cc | tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune_test.cc | #include "tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kLegacyAutotune[] = "legacy_autotune";
constexpr char kPrefetchDataset[] = "PrefetchDataset";
}
Status DisablePrefetchLegacyAutotune::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
if (!autotune_) {
VLOG(1) << "The optimization disable_prefetch_legacy_autotune is not "
"applied if autotune is off.";
return absl::OkStatus();
}
MutableGraphView graph(output);
for (NodeDef& node : *output->mutable_node()) {
if (node.op() == kPrefetchDataset) {
if (node.attr().find(kLegacyAutotune) == node.attr().end() ||
node.attr().at(kLegacyAutotune).b()) {
(*node.mutable_attr())[kLegacyAutotune].set_b(false);
stats->num_changes++;
}
}
}
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(DisablePrefetchLegacyAutotune,
"disable_prefetch_legacy_autotune");
}
} | #include "tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
Status OptimizeWithDisablePrefetchLegacyAutotune(const GrapplerItem &item,
GraphDef *output,
bool autotune) {
DisablePrefetchLegacyAutotune optimizer;
RewriterConfig_CustomGraphOptimizer config;
if (autotune) {
(*config.mutable_parameter_map())["autotune"].set_s("true");
} else {
(*config.mutable_parameter_map())["autotune"].set_s("false");
}
TF_RETURN_IF_ERROR(optimizer.Init(&config));
return optimizer.Optimize(nullptr, item, output);
}
class RewriteTest : public ::testing::TestWithParam<bool> {};
TEST_P(RewriteTest, DisablePrefetchLegacyAutotune) {
const bool autotune = GetParam();
GrapplerItem item;
item.graph = test::function::GDef({
NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("prefetch1", "PrefetchDataset", {"range"},
{{"legacy_autotune", true}}),
NDef("prefetch2", "PrefetchDataset", {"prefetch1"},
{{"legacy_autotune", false}}),
NDef("prefetch3", "PrefetchDataset", {"prefetch2"}, {}),
});
GraphDef output;
TF_ASSERT_OK(
OptimizeWithDisablePrefetchLegacyAutotune(item, &output, autotune));
NodeDef prefetch_node1 =
output.node(graph_utils::FindGraphNodeWithName("prefetch1", output));
EXPECT_EQ(prefetch_node1.attr().at("legacy_autotune").b(), !autotune);
NodeDef prefetch_node2 =
output.node(graph_utils::FindGraphNodeWithName("prefetch2", output));
EXPECT_FALSE(prefetch_node2.attr().at("legacy_autotune").b());
NodeDef prefetch_node3 =
output.node(graph_utils::FindGraphNodeWithName("prefetch3", output));
if (autotune) {
EXPECT_FALSE(prefetch_node3.attr().at("legacy_autotune").b());
} else {
EXPECT_TRUE(prefetch_node3.attr().find("legacy_autotune") ==
prefetch_node3.attr().end());
}
}
INSTANTIATE_TEST_SUITE_P(Test, RewriteTest, ::testing::Values(false, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/disable_prefetch_legacy_autotune_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7aa386ba-d412-45b1-a72a-31fec0ee84bf | cpp | google/quiche | quic_packet_number | quiche/quic/core/quic_packet_number.cc | quiche/quic/core/quic_packet_number_test.cc | #include "quiche/quic/core/quic_packet_number.h"
#include <algorithm>
#include <limits>
#include <ostream>
#include <string>
#include "absl/strings/str_cat.h"
namespace quic {
void QuicPacketNumber::Clear() { packet_number_ = UninitializedPacketNumber(); }
void QuicPacketNumber::UpdateMax(QuicPacketNumber new_value) {
if (!new_value.IsInitialized()) {
return;
}
if (!IsInitialized()) {
packet_number_ = new_value.ToUint64();
} else {
packet_number_ = std::max(packet_number_, new_value.ToUint64());
}
}
uint64_t QuicPacketNumber::Hash() const {
QUICHE_DCHECK(IsInitialized());
return packet_number_;
}
uint64_t QuicPacketNumber::ToUint64() const {
QUICHE_DCHECK(IsInitialized());
return packet_number_;
}
bool QuicPacketNumber::IsInitialized() const {
return packet_number_ != UninitializedPacketNumber();
}
QuicPacketNumber& QuicPacketNumber::operator++() {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_LT(ToUint64(), std::numeric_limits<uint64_t>::max() - 1);
#endif
packet_number_++;
return *this;
}
QuicPacketNumber QuicPacketNumber::operator++(int) {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_LT(ToUint64(), std::numeric_limits<uint64_t>::max() - 1);
#endif
QuicPacketNumber previous(*this);
packet_number_++;
return previous;
}
QuicPacketNumber& QuicPacketNumber::operator--() {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_GE(ToUint64(), 1UL);
#endif
packet_number_--;
return *this;
}
QuicPacketNumber QuicPacketNumber::operator--(int) {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_GE(ToUint64(), 1UL);
#endif
QuicPacketNumber previous(*this);
packet_number_--;
return previous;
}
QuicPacketNumber& QuicPacketNumber::operator+=(uint64_t delta) {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_GT(std::numeric_limits<uint64_t>::max() - ToUint64(), delta);
#endif
packet_number_ += delta;
return *this;
}
QuicPacketNumber& QuicPacketNumber::operator-=(uint64_t delta) {
#ifndef NDEBUG
QUICHE_DCHECK(IsInitialized());
QUICHE_DCHECK_GE(ToUint64(), delta);
#endif
packet_number_ -= delta;
return *this;
}
std::string QuicPacketNumber::ToString() const {
if (!IsInitialized()) {
return "uninitialized";
}
return absl::StrCat(ToUint64());
}
std::ostream& operator<<(std::ostream& os, const QuicPacketNumber& p) {
os << p.ToString();
return os;
}
} | #include "quiche/quic/core/quic_packet_number.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
TEST(QuicPacketNumberTest, BasicTest) {
QuicPacketNumber num;
EXPECT_FALSE(num.IsInitialized());
QuicPacketNumber num2(10);
EXPECT_TRUE(num2.IsInitialized());
EXPECT_EQ(10u, num2.ToUint64());
EXPECT_EQ(10u, num2.Hash());
num2.UpdateMax(num);
EXPECT_EQ(10u, num2.ToUint64());
num2.UpdateMax(QuicPacketNumber(9));
EXPECT_EQ(10u, num2.ToUint64());
num2.UpdateMax(QuicPacketNumber(11));
EXPECT_EQ(11u, num2.ToUint64());
num2.Clear();
EXPECT_FALSE(num2.IsInitialized());
num2.UpdateMax(QuicPacketNumber(9));
EXPECT_EQ(9u, num2.ToUint64());
QuicPacketNumber num4(0);
EXPECT_TRUE(num4.IsInitialized());
EXPECT_EQ(0u, num4.ToUint64());
EXPECT_EQ(0u, num4.Hash());
num4.Clear();
EXPECT_FALSE(num4.IsInitialized());
}
TEST(QuicPacketNumberTest, Operators) {
QuicPacketNumber num(100);
EXPECT_EQ(QuicPacketNumber(100), num++);
EXPECT_EQ(QuicPacketNumber(101), num);
EXPECT_EQ(QuicPacketNumber(101), num--);
EXPECT_EQ(QuicPacketNumber(100), num);
EXPECT_EQ(QuicPacketNumber(101), ++num);
EXPECT_EQ(QuicPacketNumber(100), --num);
QuicPacketNumber num3(0);
EXPECT_EQ(QuicPacketNumber(0), num3++);
EXPECT_EQ(QuicPacketNumber(1), num3);
EXPECT_EQ(QuicPacketNumber(2), ++num3);
EXPECT_EQ(QuicPacketNumber(2), num3--);
EXPECT_EQ(QuicPacketNumber(1), num3);
EXPECT_EQ(QuicPacketNumber(0), --num3);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_packet_number.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_packet_number_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
4e6fafa5-5d2d-4a50-9c2d-5cb980bc6d2e | cpp | tensorflow/tensorflow | runtime | tensorflow/lite/delegates/gpu/gl/runtime.cc | tensorflow/core/tfrt/runtime/runtime_test.cc | #include "tensorflow/lite/delegates/gpu/gl/runtime.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management.h"
#include "tensorflow/lite/delegates/gpu/common/memory_management/types.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/types.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_call.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_errors.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_program.h"
#include "tensorflow/lite/delegates/gpu/gl/gl_texture.h"
#include "tensorflow/lite/delegates/gpu/gl/object.h"
#include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h"
#include "tensorflow/lite/delegates/gpu/gl/variable.h"
namespace tflite {
namespace gpu {
namespace gl {
namespace {
struct TextureF16Maker {
absl::Status operator()(const uint3& size) const {
return CreateReadOnlyImageTextureF16(size, data, gl_texture);
}
absl::Status operator()(const uint2& size) const {
return CreateReadOnlyImageTextureF16(size, data, gl_texture);
}
absl::Status operator()(const size_t& size) const {
return CreateReadOnlyImageTextureF16(uint2(static_cast<uint32_t>(size), 1U),
data, gl_texture);
}
absl::Span<const uint16_t> data;
GlTexture* gl_texture;
};
struct TextureF32Maker {
absl::Status operator()(const uint3& size) const {
return CreateReadOnlyImageTexture(size, data, gl_texture);
}
absl::Status operator()(const uint2& size) const {
return CreateReadOnlyImageTexture(size, data, gl_texture);
}
absl::Status operator()(const size_t& size) const {
return CreateReadOnlyImageTexture(uint2(static_cast<uint32_t>(size), 1U),
data, gl_texture);
}
absl::Span<const float> data;
GlTexture* gl_texture;
};
absl::Status MakeGlTexture(const Object& object, const ObjectData& data,
GlTexture* gl_texture) {
if (object.access == AccessType::READ_WRITE ||
object.access == AccessType::WRITE) {
return absl::InvalidArgumentError("Read-write textures are not supported");
}
if (object.data_type != DataType::FLOAT16 &&
object.data_type != DataType::FLOAT32) {
return absl::InvalidArgumentError(
"Textures support float16 or float32 only.");
}
switch (object.data_type) {
case DataType::FLOAT16: {
if (data.size() % 2 != 0) {
return absl::InvalidArgumentError("Texture size is not aligned");
}
return std::visit(
TextureF16Maker{
.data = absl::MakeConstSpan(
reinterpret_cast<const uint16_t*>(data.data()),
data.size() / 2),
.gl_texture = gl_texture,
},
object.size);
}
case DataType::FLOAT32: {
if (data.size() % sizeof(float) != 0) {
return absl::InvalidArgumentError("Texture size is not aligned");
}
return std::visit(
TextureF32Maker{
.data = absl::MakeConstSpan(
reinterpret_cast<const float*>(data.data()),
data.size() / sizeof(float)),
.gl_texture = gl_texture,
},
object.size);
}
default:
return absl::InvalidArgumentError("Unsupported textures data type.");
}
}
struct TextureRefMaker {
absl::Status operator()(const uint3& size) const {
return CreateReadWriteRgbaImageTexture(type, size, gl_texture);
}
absl::Status operator()(const uint2& size) const {
return CreateReadWriteRgbaImageTexture(type, size, gl_texture);
}
absl::Status operator()(const size_t& size) const {
return CreateReadWriteRgbaImageTexture(
type, uint2(static_cast<uint32_t>(size), 1U), gl_texture);
}
DataType type;
GlTexture* gl_texture;
};
absl::Status MakeGlTextureRef(const Object& object, GlTexture* gl_texture) {
return std::visit(TextureRefMaker{object.data_type, gl_texture}, object.size);
}
absl::Status MakeGlBuffer(const Object& object, const ObjectData& data,
GlBuffer* gl_buffer) {
if (data.size() % SizeOf(object.data_type) != 0) {
return absl::InvalidArgumentError("Buffer size is not aligned");
}
return CreateReadOnlyShaderStorageBuffer(absl::MakeConstSpan(data),
gl_buffer);
}
absl::Status MakeBindingFunc(const Object& object, uint32_t id,
const ObjectManager* objects,
std::function<absl::Status()>* binding_func) {
const uint32_t binding = object.binding;
switch (object.object_type) {
case ObjectType::BUFFER: {
auto ptr = objects->FindBuffer(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Buffer ", id, " is not found"));
}
size_t size_in_bytes = ByteSizeOf(object);
if (ptr->bytes_size() < size_in_bytes) {
return absl::FailedPreconditionError(
absl::StrCat("Buffer ", id, " size in bytes ", ptr->bytes_size(),
" < requested size_in_bytes ", size_in_bytes));
}
*binding_func = [=]() { return ptr->BindToIndex(binding); };
break;
}
case ObjectType::TEXTURE: {
auto ptr = objects->FindTexture(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Texture ", id, " is not found"));
}
*binding_func = [=]() { return ptr->BindAsReadWriteImage(binding); };
break;
}
case ObjectType::UNKNOWN:
return absl::InvalidArgumentError("Unknown object type");
}
return absl::OkStatus();
}
absl::Status MakeLateBindingFunc(const Object& object, uint32_t id,
const ObjectManager* objects,
std::function<absl::Status()>* binding_func) {
const uint32_t binding = object.binding;
switch (object.object_type) {
case ObjectType::BUFFER: {
auto ptr = objects->FindBuffer(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Buffer ", id, " is not found"));
}
*binding_func = [=]() {
auto ptr = objects->FindBuffer(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Buffer ", id, " is not found"));
}
if (!ptr->is_valid()) {
return absl::InvalidArgumentError("Buffer is not initialized.");
}
size_t size_in_bytes = ByteSizeOf(object);
if (ptr->bytes_size() < size_in_bytes) {
return absl::FailedPreconditionError(
absl::StrCat("Buffer ", id, " size in bytes ", ptr->bytes_size(),
" < requested size_in_bytes ", size_in_bytes));
}
return ptr->BindToIndex(binding);
};
break;
}
case ObjectType::TEXTURE: {
auto ptr = objects->FindTexture(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Texture ", id, " is not found"));
}
*binding_func = [=]() {
auto ptr = objects->FindTexture(id);
if (!ptr) {
return absl::NotFoundError(
absl::StrCat("Texture ", id, " is not found"));
}
if (!ptr->is_valid()) {
return absl::InvalidArgumentError("Texture is not initialized.");
}
return ptr->BindAsReadWriteImage(binding);
};
break;
}
case ObjectType::UNKNOWN:
return absl::InvalidArgumentError("Unknown object type");
}
return absl::OkStatus();
}
}
Runtime::Runtime(const RuntimeOptions& options, const GpuInfo& gpu_info,
CommandQueue* command_queue,
const ObjectManager* external_objects)
: options_(options),
gpu_info_(gpu_info),
external_objects_(external_objects),
command_queue_(command_queue) {
programs_.reserve(256);
if (options_.bundle_readonly_objects) {
shared_readonly_buffer_ = std::make_unique<SharedBufferData>();
}
}
absl::Status Runtime::AddProgram(const GlShader& shader,
const std::vector<Variable>& parameters,
const std::vector<Object>& objects,
const uint3& num_workgroups) {
GlProgram program;
RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program));
for (auto& parameter : parameters) {
RETURN_IF_ERROR(program.SetParameter(parameter));
}
programs_.emplace_back(
CompiledProgramDescriptor{std::move(program), num_workgroups, {}});
for (auto& object : objects) {
auto& program = programs_.back();
BindFunc binding_func;
if (IsRef(object)) {
absl::Status status = MakeLateBindingFunc(
object, GetRef(object), external_objects_, &binding_func);
if (!status.ok()) {
if (absl::IsNotFound(status)) {
program.refs.push_back(object);
continue;
}
return status;
}
} else {
uint32_t id;
RETURN_IF_ERROR(AllocateConstObject(object, &id));
RETURN_IF_ERROR(
MakeBindingFunc(object, id, &const_objects_, &binding_func));
}
program.bindings.push_back(std::move(binding_func));
}
return absl::OkStatus();
}
absl::Status Runtime::AllocateInternalObject(const Object& object) {
const ObjectRef ref = GetRef(object);
switch (object.object_type) {
case ObjectType::BUFFER: {
GlBuffer gl_buffer;
RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<uint8_t>(
ByteSizeOf(object), &gl_buffer));
RETURN_IF_ERROR(
internal_objects_.RegisterBuffer(ref, std::move(gl_buffer)));
break;
}
case ObjectType::TEXTURE: {
GlTexture gl_texture;
RETURN_IF_ERROR(MakeGlTextureRef(object, &gl_texture));
RETURN_IF_ERROR(
internal_objects_.RegisterTexture(ref, std::move(gl_texture)));
break;
}
default:
return absl::InternalError("Unexpected internal object type");
}
return absl::OkStatus();
}
absl::Status Runtime::AllocateConstObject(const Object& object, uint32_t* id) {
const ObjectData* data = GetData(object);
if (data == nullptr) {
return absl::InternalError(
"Unable to allocate reference as a const object");
}
*id = next_const_id_++;
switch (object.object_type) {
case ObjectType::BUFFER: {
GlBuffer gl_buffer;
if (!shared_readonly_buffer_ ||
!shared_readonly_buffer_->Add(*data, &gl_buffer)) {
RETURN_IF_ERROR(MakeGlBuffer(object, *data, &gl_buffer));
}
RETURN_IF_ERROR(const_objects_.RegisterBuffer(*id, std::move(gl_buffer)));
break;
}
case ObjectType::TEXTURE: {
GlTexture gl_texture;
RETURN_IF_ERROR(MakeGlTexture(object, *data, &gl_texture));
RETURN_IF_ERROR(
const_objects_.RegisterTexture(*id, std::move(gl_texture)));
break;
}
case ObjectType::UNKNOWN:
return absl::InternalError("Unknown object type");
}
return absl::OkStatus();
}
absl::Status Runtime::PrepareForExecution() {
if (shared_readonly_buffer_ && !shared_readonly_buffer_->empty()) {
GlBuffer shared_buffer;
RETURN_IF_ERROR(
shared_readonly_buffer_->CreateSharedGlBuffer(&shared_buffer));
shared_readonly_buffer_.reset(nullptr);
RETURN_IF_ERROR(const_objects_.RegisterBuffer(next_const_id_++,
std::move(shared_buffer)));
}
if (options_.reuse_internal_objects) {
std::vector<Object> shared_objects;
RETURN_IF_ERROR(AssignInternalObjects(&shared_objects));
for (const Object& object : shared_objects) {
RETURN_IF_ERROR(AllocateInternalObject(object));
}
}
for (auto& program : programs_) {
for (auto& object : program.refs) {
BindFunc binding;
ObjectRef ref = GetRef(object);
absl::Status status =
MakeBindingFunc(object, ref, &internal_objects_, &binding);
if (!status.ok()) {
if (absl::IsNotFound(status)) {
RETURN_IF_ERROR(AllocateInternalObject(object));
RETURN_IF_ERROR(
MakeBindingFunc(object, ref, &internal_objects_, &binding));
} else {
return status;
}
}
program.bindings.push_back(std::move(binding));
}
program.refs.clear();
}
return absl::OkStatus();
}
namespace {
const size_t kNotAssigned = std::numeric_limits<size_t>::max();
struct CombinedUsageRecords {
std::vector<TensorUsageRecord<size_t>> buffers;
std::vector<TensorUsageRecord<size_t>> textures_1d;
std::vector<TensorUsageRecord<uint2>> textures_2d;
std::vector<TensorUsageRecord<uint3>> textures_3d;
std::vector<size_t> usage_refs;
};
template <typename TensorSizeT>
void UpdateUsageRecord(TensorUsageRecord<TensorSizeT>* usage_rec,
size_t task_id) {
usage_rec->first_task = std::min(usage_rec->first_task, task_id);
usage_rec->last_task = std::max(usage_rec->last_task, task_id);
}
struct AddUsageRecordForTextureFunc {
void operator()(const uint3& size) const {
auto& usage_ref = usage_records->usage_refs[object_ref];
if (usage_ref == kNotAssigned) {
usage_ref = usage_records->textures_3d.size();
usage_records->textures_3d.emplace_back(size,
program_id,
program_id);
} else {
UpdateUsageRecord(&usage_records->textures_3d[usage_ref], program_id);
}
}
void operator()(const uint2& size) const {
auto& usage_ref = usage_records->usage_refs[object_ref];
if (usage_ref == kNotAssigned) {
usage_ref = usage_records->textures_2d.size();
usage_records->textures_2d.emplace_back(size,
program_id,
program_id);
} else {
UpdateUsageRecord(&usage_records->textures_2d[usage_ref], program_id);
}
}
void operator()(size_t size) const {
auto& usage_ref = usage_records->usage_refs[object_ref];
if (usage_ref == kNotAssigned) {
usage_ref = usage_records->textures_1d.size();
usage_records->textures_1d.emplace_back(size,
program_id,
program_id);
} else {
UpdateUsageRecord(&usage_records->textures_1d[usage_ref], program_id);
}
}
CombinedUsageRecords* usage_records;
const ObjectRef& object_ref;
const size_t program_id;
};
absl::Status AddUsageRecord(CombinedUsageRecords* usage_records,
const Object& object, const size_t program_id) {
auto ref = GetRef(object);
if (ref >= usage_records->usage_refs.size()) {
usage_records->usage_refs.resize(ref + 1, kNotAssigned);
}
auto& usage_ref = usage_records->usage_refs[ref];
if (object.object_type == ObjectType::BUFFER) {
if (usage_ref == kNotAssigned) {
usage_ref = usage_records->buffers.size();
usage_records->buffers.emplace_back(
NumElements(object.size),
program_id,
program_id);
} else {
UpdateUsageRecord(&usage_records->buffers[usage_ref], program_id);
}
return absl::OkStatus();
}
if (object.object_type == ObjectType::TEXTURE) {
std::visit(AddUsageRecordForTextureFunc{usage_records, ref, program_id},
object.size);
return absl::OkStatus();
}
return absl::InternalError("Unexpected object type");
}
absl::Status ApplyBuffersAssignment(
const ObjectsAssignment<size_t>& assignment,
const std::vector<size_t>& global_ref_to_usage_rec,
const std::vector<Object*>& global_ref_to_object_ptr,
std::vector<ObjectRef>* global_ref_to_shared_ref,
std::vector<Object>* shared_objects) {
std::vector<ObjectRef> assigned_id_to_shared_ref(
assignment.object_sizes.size(), kInvalidObjectRef);
for (size_t global_ref = 0; global_ref < global_ref_to_usage_rec.size();
++global_ref) {
const auto& usage_rec_id = global_ref_to_usage_rec[global_ref];
Object* object = global_ref_to_object_ptr[global_ref];
if (usage_rec_id == kNotAssigned || object == nullptr ||
object->object_type != ObjectType::BUFFER) {
continue;
}
size_t assigned_id = assignment.object_ids[usage_rec_id];
ObjectRef shared_ref = assigned_id_to_shared_ref[assigned_id];
if (shared_ref == kInvalidObjectRef) {
shared_ref = shared_objects->size();
Object shared_object = *object;
shared_object.access = AccessType::READ_WRITE;
shared_object.object = shared_ref;
shared_object.size = assignment.object_sizes[assigned_id];
shared_objects->push_back(std::move(shared_object));
assigned_id_to_shared_ref[assigned_id] = shared_ref;
}
(*global_ref_to_shared_ref)[global_ref] = shared_ref;
}
return absl::OkStatus();
}
template <typename ObjectSizeT>
absl::Status ApplyTexturesAssignment(
const ObjectsAssignment<ObjectSizeT>& assignment,
const std::vector<size_t>& global_ref_to_usage_rec,
const std::vector<Object*>& global_ref_to_object_ptr,
std::vector<ObjectRef>* global_ref_to_shared_ref,
std::vector<Object>* shared_objects) {
std::vector<ObjectRef> assigned_id_to_shared_ref(
assignment.object_sizes.size(), kInvalidObjectRef);
for (size_t global_ref = 0; global_ref < global_ref_to_usage_rec.size();
++global_ref) {
const auto& usage_rec_id = global_ref_to_usage_rec[global_ref];
Object* object = global_ref_to_object_ptr[global_ref];
if (usage_rec_id == kNotAssigned || object == nullptr ||
object->object_type != ObjectType::TEXTURE ||
!std::holds_alternative<ObjectSizeT>(object->size)) {
continue;
}
size_t assigned_id = assignment.object_ids[usage_rec_id];
ObjectRef shared_ref = assigned_id_to_shared_ref[assigned_id];
if (shared_ref == kInvalidObjectRef) {
shared_ref = shared_objects->size();
Object shared_object = *object;
shared_object.access = AccessType::READ_WRITE;
shared_object.object = shared_ref;
shared_object.size = assignment.object_sizes[assigned_id];
shared_objects->push_back(std::move(shared_object));
assigned_id_to_shared_ref[assigned_id] = shared_ref;
}
(*global_ref_to_shared_ref)[global_ref] = shared_ref;
}
return absl::OkStatus();
}
}
absl::Status Runtime::AssignInternalObjects(
std::vector<Object>* shared_objects) {
std::map<DataType, CombinedUsageRecords> usage_records_by_data_type;
std::vector<Object*> global_ref_to_object_ptr;
for (size_t i = 0; i < programs_.size(); ++i) {
for (auto& object : programs_[i].refs) {
auto ref = GetRef(object);
if (ref >= global_ref_to_object_ptr.size()) {
global_ref_to_object_ptr.resize(ref + 1, nullptr);
}
if (global_ref_to_object_ptr[ref] == nullptr) {
global_ref_to_object_ptr[ref] = &object;
}
RETURN_IF_ERROR(AddUsageRecord(
&usage_records_by_data_type[object.data_type], object, i));
}
}
std::vector<ObjectRef> global_ref_to_shared_ref(
global_ref_to_object_ptr.size(), kInvalidObjectRef);
for (const auto& it : usage_records_by_data_type) {
const CombinedUsageRecords& usage_records = it.second;
if (!usage_records.buffers.empty()) {
ObjectsAssignment<size_t> buffer_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(usage_records.buffers,
MemoryStrategy::GREEDY_BEST,
&buffer_assignment));
RETURN_IF_ERROR(ApplyBuffersAssignment(
buffer_assignment, usage_records.usage_refs, global_ref_to_object_ptr,
&global_ref_to_shared_ref, shared_objects));
}
if (!usage_records.textures_1d.empty()) {
ObjectsAssignment<size_t> texture_1d_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(usage_records.textures_1d,
MemoryStrategy::GREEDY_BEST,
&texture_1d_assignment));
RETURN_IF_ERROR(ApplyTexturesAssignment(
texture_1d_assignment, usage_records.usage_refs,
global_ref_to_object_ptr, &global_ref_to_shared_ref, shared_objects));
}
if (!usage_records.textures_2d.empty()) {
ObjectsAssignment<uint2> texture_2d_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(usage_records.textures_2d,
MemoryStrategy::GREEDY_IN_ORDER,
&texture_2d_assignment));
RETURN_IF_ERROR(ApplyTexturesAssignment(
texture_2d_assignment, usage_records.usage_refs,
global_ref_to_object_ptr, &global_ref_to_shared_ref, shared_objects));
}
if (!usage_records.textures_3d.empty()) {
ObjectsAssignment<uint3> texture_3d_assignment;
RETURN_IF_ERROR(AssignObjectsToTensors(usage_records.textures_3d,
MemoryStrategy::GREEDY_IN_ORDER,
&texture_3d_assignment));
RETURN_IF_ERROR(ApplyTexturesAssignment(
texture_3d_assignment, usage_records.usage_refs,
global_ref_to_object_ptr, &global_ref_to_shared_ref, shared_objects));
}
}
for (size_t i = 0; i < programs_.size(); ++i) {
for (auto& object : programs_[i].refs) {
object.object = global_ref_to_shared_ref[GetRef(object)];
}
}
return absl::OkStatus();
}
absl::Status Runtime::Execute() {
for (const auto& descriptor : programs_) {
for (auto& b : descriptor.bindings) {
RETURN_IF_ERROR(b());
}
RETURN_IF_ERROR(command_queue_->Dispatch(descriptor.program,
descriptor.num_workgroups));
}
return absl::OkStatus();
}
}
}
} | #include "tensorflow/core/tfrt/runtime/runtime.h"
#include <gtest/gtest.h>
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(RuntimeTest, GlobalRuntimeWorks) {
EXPECT_EQ(GetGlobalRuntime(), nullptr);
SetGlobalRuntime(Runtime::Create(4));
EXPECT_NE(GetGlobalRuntime(), nullptr);
EXPECT_EQ(GetGlobalRuntime(), GetGlobalRuntime());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/gl/runtime.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/runtime_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dcc6598e-7586-4f85-ac2b-7dd32be7be0e | cpp | google/quiche | http_frames | quiche/quic/core/http/http_frames.h | quiche/quic/core/http/http_frames_test.cc | #ifndef QUICHE_QUIC_CORE_HTTP_HTTP_FRAMES_H_
#define QUICHE_QUIC_CORE_HTTP_HTTP_FRAMES_H_
#include <algorithm>
#include <cstdint>
#include <map>
#include <ostream>
#include <sstream>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/core/quic_types.h"
namespace quic {
enum class HttpFrameType {
DATA = 0x0,
HEADERS = 0x1,
CANCEL_PUSH = 0x3,
SETTINGS = 0x4,
PUSH_PROMISE = 0x5,
GOAWAY = 0x7,
ORIGIN = 0xC,
MAX_PUSH_ID = 0xD,
ACCEPT_CH = 0x89,
PRIORITY_UPDATE_REQUEST_STREAM = 0xF0700,
WEBTRANSPORT_STREAM = 0x41,
METADATA = 0x4d,
};
struct QUICHE_EXPORT DataFrame {
absl::string_view data;
};
struct QUICHE_EXPORT HeadersFrame {
absl::string_view headers;
};
using SettingsMap = absl::flat_hash_map<uint64_t, uint64_t>;
struct QUICHE_EXPORT SettingsFrame {
SettingsMap values;
bool operator==(const SettingsFrame& rhs) const {
return values == rhs.values;
}
std::string ToString() const {
std::string s;
for (auto it : values) {
std::string setting = absl::StrCat(
H3SettingsToString(
static_cast<Http3AndQpackSettingsIdentifiers>(it.first)),
" = ", it.second, "; ");
absl::StrAppend(&s, setting);
}
return s;
}
friend QUICHE_EXPORT std::ostream& operator<<(std::ostream& os,
const SettingsFrame& s) {
os << s.ToString();
return os;
}
};
struct QUICHE_EXPORT GoAwayFrame {
uint64_t id;
bool operator==(const GoAwayFrame& rhs) const { return id == rhs.id; }
};
struct QUICHE_EXPORT OriginFrame {
std::vector<std::string> origins;
bool operator==(const OriginFrame& rhs) const {
return origins == rhs.origins;
}
std::string ToString() const {
std::string result = "Origin Frame: {origins: ";
for (const std::string& origin : origins) {
absl::StrAppend(&result, "\n", origin);
}
result += "}";
return result;
}
friend QUICHE_EXPORT std::ostream& operator<<(std::ostream& os,
const OriginFrame& s) {
os << s.ToString();
return os;
}
};
inline constexpr QuicByteCount kPriorityFirstByteLength = 1;
struct QUICHE_EXPORT PriorityUpdateFrame {
uint64_t prioritized_element_id = 0;
std::string priority_field_value;
bool operator==(const PriorityUpdateFrame& rhs) const {
return std::tie(prioritized_element_id, priority_field_value) ==
std::tie(rhs.prioritized_element_id, rhs.priority_field_value);
}
std::string ToString() const {
return absl::StrCat(
"Priority Frame : {prioritized_element_id: ", prioritized_element_id,
", priority_field_value: ", priority_field_value, "}");
}
friend QUICHE_EXPORT std::ostream& operator<<(std::ostream& os,
const PriorityUpdateFrame& s) {
os << s.ToString();
return os;
}
};
struct QUICHE_EXPORT AcceptChFrame {
std::vector<spdy::AcceptChOriginValuePair> entries;
bool operator==(const AcceptChFrame& rhs) const {
return entries.size() == rhs.entries.size() &&
std::equal(entries.begin(), entries.end(), rhs.entries.begin());
}
std::string ToString() const {
std::stringstream s;
s << *this;
return s.str();
}
friend QUICHE_EXPORT std::ostream& operator<<(std::ostream& os,
const AcceptChFrame& frame) {
os << "ACCEPT_CH frame with " << frame.entries.size() << " entries: ";
for (auto& entry : frame.entries) {
os << "origin: " << entry.origin << "; value: " << entry.value;
}
return os;
}
};
}
#endif | #include "quiche/quic/core/http/http_frames.h"
#include <sstream>
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
TEST(HttpFramesTest, SettingsFrame) {
SettingsFrame a;
EXPECT_TRUE(a == a);
EXPECT_EQ("", a.ToString());
SettingsFrame b;
b.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = 1;
EXPECT_FALSE(a == b);
EXPECT_TRUE(b == b);
a.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = 2;
EXPECT_FALSE(a == b);
a.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = 1;
EXPECT_TRUE(a == b);
EXPECT_EQ("SETTINGS_QPACK_MAX_TABLE_CAPACITY = 1; ", b.ToString());
std::stringstream s;
s << b;
EXPECT_EQ("SETTINGS_QPACK_MAX_TABLE_CAPACITY = 1; ", s.str());
}
TEST(HttpFramesTest, GoAwayFrame) {
GoAwayFrame a{1};
EXPECT_TRUE(a == a);
GoAwayFrame b{2};
EXPECT_FALSE(a == b);
b.id = 1;
EXPECT_TRUE(a == b);
}
TEST(HttpFramesTest, PriorityUpdateFrame) {
PriorityUpdateFrame a{0, ""};
EXPECT_TRUE(a == a);
PriorityUpdateFrame b{4, ""};
EXPECT_FALSE(a == b);
a.prioritized_element_id = 4;
EXPECT_TRUE(a == b);
a.priority_field_value = "foo";
EXPECT_FALSE(a == b);
EXPECT_EQ(
"Priority Frame : {prioritized_element_id: 4, priority_field_value: foo}",
a.ToString());
std::stringstream s;
s << a;
EXPECT_EQ(
"Priority Frame : {prioritized_element_id: 4, priority_field_value: foo}",
s.str());
}
TEST(HttpFramesTest, AcceptChFrame) {
AcceptChFrame a;
EXPECT_TRUE(a == a);
EXPECT_EQ("ACCEPT_CH frame with 0 entries: ", a.ToString());
AcceptChFrame b{{{"foo", "bar"}}};
EXPECT_FALSE(a == b);
a.entries.push_back({"foo", "bar"});
EXPECT_TRUE(a == b);
EXPECT_EQ("ACCEPT_CH frame with 1 entries: origin: foo; value: bar",
a.ToString());
std::stringstream s;
s << a;
EXPECT_EQ("ACCEPT_CH frame with 1 entries: origin: foo; value: bar", s.str());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/http_frames.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/http_frames_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
42ba027f-473e-4e70-a1e2-6fa80dbda591 | cpp | tensorflow/tensorflow | cpu_instruction_fusion | third_party/xla/xla/service/cpu/cpu_instruction_fusion.cc | third_party/xla/xla/service/cpu/cpu_instruction_fusion_test.cc | #include "xla/service/cpu/cpu_instruction_fusion.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/fusion_node_indexing_evaluation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
namespace xla {
namespace cpu {
namespace {
bool CanBeLoopFused(const HloInstruction& hlo) {
return hlo.IsElementwise() ||
hlo.opcode() == HloOpcode::kBitcast ||
hlo.opcode() == HloOpcode::kBroadcast ||
hlo.opcode() == HloOpcode::kConcatenate ||
hlo.opcode() == HloOpcode::kDynamicSlice ||
hlo.opcode() == HloOpcode::kDynamicUpdateSlice ||
hlo.opcode() == HloOpcode::kGather ||
hlo.opcode() == HloOpcode::kIota || hlo.opcode() == HloOpcode::kPad ||
hlo.opcode() == HloOpcode::kReduce ||
hlo.opcode() == HloOpcode::kReshape ||
hlo.opcode() == HloOpcode::kReverse ||
hlo.opcode() == HloOpcode::kSlice ||
hlo.opcode() == HloOpcode::kTranspose;
}
bool IsNonComplexNonBatchedMatrixVectorDot(const HloInstruction* hlo) {
const Shape& hlo_shape = hlo->shape();
return !ShapeUtil::ElementIsComplex(hlo_shape) &&
hlo->opcode() == HloOpcode::kDot && hlo_shape.dimensions_size() <= 1 &&
hlo->dot_dimension_numbers().lhs_batch_dimensions_size() == 0;
}
bool HasExactlyOneUse(const HloInstruction& hlo_instr) {
return hlo_instr.user_count() == 1 &&
absl::c_count(hlo_instr.users().front()->operands(), &hlo_instr) == 1;
}
bool CanBeOutputFused(const HloInstruction* producer,
const HloInstruction* consumer) {
return consumer->opcode() == HloOpcode::kAdd &&
IsNonComplexNonBatchedMatrixVectorDot(producer) &&
HasExactlyOneUse(*producer) == 1;
}
bool CanBeOutputFusedIntoSomeOperand(const HloInstruction* consumer) {
return consumer->opcode() == HloOpcode::kAdd &&
(CanBeOutputFused(consumer->operand(0), consumer) ||
CanBeOutputFused(consumer->operand(1), consumer));
}
}
FusionDecision CpuInstructionFusion::ShouldFuse(HloInstruction* consumer,
int64_t operand_index) {
HloInstruction* producer = consumer->mutable_operand(operand_index);
VLOG(2) << "Considering for fusion: operand " << operand_index << " of "
<< consumer->ToString();
constexpr int kFusionThresholdBytes = 16 * 1024;
if (CanBeOutputFused(producer, consumer)) {
VLOG(2) << "Fusion OK: Can create output fusion.";
return FusionDecision::Allow();
}
if (CanBeOutputFusedIntoSomeOperand(producer)) {
return FusionDecision::Forbid(
"Bailing because producer can be output-fused into some operand.");
}
if (!CanBeLoopFused(*producer)) {
return FusionDecision::Forbid("Producer is not loop-fusible.");
}
if (producer->opcode() != HloOpcode::kFusion && is_expensive(*producer) &&
ReusesOperandElements(consumer, operand_index)) {
return FusionDecision::Forbid("Fusion is not profitable.");
}
RETURN_IF_NOT_FUSIBLE(InstructionFusion::ShouldFuse(consumer, operand_index));
if (producer->opcode() == HloOpcode::kConstant &&
consumer->opcode() != HloOpcode::kFusion) {
return FusionDecision::Forbid(
"Not fusing: insufficient non-constant nodes.");
}
if (producer->opcode() == HloOpcode::kFusion) {
return FusionDecision::Forbid(
"Not fusing: producer is itself a fusion node.");
}
if (consumer->opcode() == HloOpcode::kFusion) {
if (fusion_node_evaluations_.find(consumer) ==
fusion_node_evaluations_.end()) {
fusion_node_evaluations_.emplace(consumer,
FusionNodeIndexingEvaluation(consumer));
}
if (fusion_node_evaluations_.at(consumer).CodeDuplicationTooHigh(
producer)) {
return FusionDecision::Forbid("Code duplication too high");
}
}
if (consumer->opcode() == HloOpcode::kDot) {
const Shape& output_shape = consumer->shape();
if (output_shape.dimensions_size() <= 1) {
if (consumer->operand(0)->shape().rank() == 1 && operand_index == 1 &&
ShapeUtil::ByteSizeOfElements(consumer->operand(0)->shape()) <
kFusionThresholdBytes) {
VLOG(2) << "Fusing small matrix-vector product.";
return FusionDecision::Allow();
} else if (consumer->operand(1)->shape().rank() == 1 &&
operand_index == 0 &&
ShapeUtil::ByteSizeOfElements(consumer->operand(1)->shape()) <
kFusionThresholdBytes) {
VLOG(2) << "Fusing small matrix-vector product.";
return FusionDecision::Allow();
}
}
}
if (consumer->opcode() == HloOpcode::kReduce &&
!absl::c_linear_search(
consumer->dimensions(),
LayoutUtil::Minor(consumer->operand(0)->shape().layout(), 0))) {
return FusionDecision::Forbid(
"Not fusing reductions over major dimensions");
}
if (producer->opcode() == HloOpcode::kReduce &&
!absl::c_linear_search(
producer->dimensions(),
LayoutUtil::Minor(producer->operand(0)->shape().layout(), 0))) {
return FusionDecision::Forbid(
"Not fusing reductions over major dimensions");
}
if (consumer->IsLoopFusion()) {
VLOG(2) << "Fusing: consumer is a fusion node.";
return FusionDecision::Allow();
}
if (CanBeLoopFused(*consumer)) {
VLOG(2) << "Fusing: consumer is elementwise or fusible.";
return FusionDecision::Allow();
}
return FusionDecision::Forbid("Not fusing: not found a fusible case");
}
HloInstruction::FusionKind CpuInstructionFusion::ChooseKind(
const HloInstruction* producer, const HloInstruction* consumer) {
return CanBeOutputFused(producer, consumer)
? HloInstruction::FusionKind::kOutput
: HloInstruction::FusionKind::kLoop;
}
HloInstruction* CpuInstructionFusion::FuseInstruction(
HloInstruction* fusion_instruction, HloInstruction* producer) {
auto evaluation = fusion_node_evaluations_.find(fusion_instruction);
if (evaluation == fusion_node_evaluations_.end()) {
evaluation = fusion_node_evaluations_
.emplace(fusion_instruction,
FusionNodeIndexingEvaluation(fusion_instruction))
.first;
}
auto indexing_users = evaluation->second.RemoveFusionOperand(producer);
HloInstruction* new_producer =
InstructionFusion::FuseInstruction(fusion_instruction, producer);
evaluation->second.UpdateEvaluationCache(new_producer, indexing_users);
return new_producer;
}
}
} | #include "xla/service/cpu/cpu_instruction_fusion.h"
#include <algorithm>
#include <memory>
#include <set>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/transpose_folding.h"
#include "xla/shape.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla::cpu {
namespace {
using InstructionFusionTest = HloTestBase;
std::unique_ptr<HloInstruction> MakeDot(const Shape& shape, HloInstruction* lhs,
HloInstruction* rhs) {
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(lhs->shape().rank() - 1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
return HloInstruction::CreateDot(shape, lhs, rhs, dot_dnums,
precision_config);
}
TEST_F(InstructionFusionTest, DotOperationFusion_Basic_0) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1024, 256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1024, 256}), HloOpcode::kExp, arg0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), exp0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Basic_1) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256, 1024}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {256, 1024}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Bitcast) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), HloOpcode::kExp, arg0));
HloInstruction* bitcast0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {1024, 256}), HloOpcode::kBitcast, exp0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), bitcast0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_Reshape) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256}), "arg1"));
HloInstruction* exp0 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2, 512, 2, 128}), HloOpcode::kExp, arg0));
HloInstruction* reshape0 =
builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(F32, {1024, 256}), exp0));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {1024}), reshape0, arg1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_TRUE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_THAT(computation->root_instruction(), op::Fusion());
}
TEST_F(InstructionFusionTest, DotOperationFusion_TooLarge) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {32 * 1024}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {32 * 1024, 256}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {32 * 1024, 256}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {256}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_FALSE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_EQ(dot, computation->root_instruction());
}
TEST_F(InstructionFusionTest, DotOperationFusion_ElementReuse) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {2, 256}), "arg0"));
HloInstruction* arg1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {256, 1024}), "arg1"));
HloInstruction* exp1 = builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {256, 1024}), HloOpcode::kExp, arg1));
HloInstruction* dot = builder.AddInstruction(
MakeDot(ShapeUtil::MakeShape(F32, {2, 1024}), arg0, exp1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(dot, computation->root_instruction());
EXPECT_FALSE(CpuInstructionFusion().Run(module.get()).value());
EXPECT_EQ(dot, computation->root_instruction());
}
TEST_F(InstructionFusionTest, DotOperationFusion_TransposeFusion_RHS) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[1024,256] parameter(1)
exponential = f32[1024,256] exponential(arg1)
transpose = f32[256,1024] transpose(exponential), dimensions={1,0}
ROOT dot = f32[1,1024] dot(arg0, transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
1, 1));
}
TEST_F(InstructionFusionTest, DotOperationFusion_TransposeFusion_LHS) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[256,1] parameter(0)
arg1 = f32[256,1024] parameter(1)
transpose = f32[1,256] transpose(arg0), dimensions={1,0}
exponential = f32[256,1024] exponential(arg1)
ROOT dot = f32[1,1024] dot(transpose, exponential), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
0, 0));
}
TEST_F(InstructionFusionTest,
DotOperationFusion_TransposeFusion_LHS_NonDefault) {
std::string hlo_string = R"(
HloModule DotOperationFusion_TransposeFusion
ENTRY DotOperationFusion_TransposeFusion {
arg0 = f32[1,256] parameter(0)
arg1 = f32[256,1024] parameter(1)
transpose = f32[256,1] transpose(arg0), dimensions={1,0}
exponential = f32[256,1024] exponential(arg1)
ROOT dot = f32[1,1024] dot(transpose, exponential), lhs_contracting_dims={0}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation = module->entry_computation();
TF_ASSERT_OK_AND_ASSIGN(bool changed, TransposeFolding().Run(module.get()));
ASSERT_TRUE(changed);
ASSERT_THAT(computation->root_instruction(),
op::Dot(op::Parameter(0), op::Exp(op::Parameter(1)),
1, 0));
}
class OpcodeFusionTest : public InstructionFusionTest {
protected:
void RunFusionAndCheckOpcodesWereFused(
HloModule* module, const std::multiset<HloOpcode>& expected_opcodes,
HloInstruction::FusionKind fusion_kind =
HloInstruction::FusionKind::kLoop) {
auto computation = module->entry_computation();
auto did_fusion = CpuInstructionFusion().Run(module);
ASSERT_TRUE(did_fusion.ok());
EXPECT_TRUE(did_fusion.value());
HloInstruction* root = computation->root_instruction();
ASSERT_THAT(root, op::Fusion());
EXPECT_EQ(root->fusion_kind(), fusion_kind);
std::vector<HloOpcode> fused_opcodes(root->fused_instruction_count());
std::transform(root->fused_instructions().begin(),
root->fused_instructions().end(), fused_opcodes.begin(),
[](const HloInstruction* hlo) { return hlo->opcode(); });
EXPECT_EQ(
std::multiset<HloOpcode>(fused_opcodes.begin(), fused_opcodes.end()),
expected_opcodes);
}
HloComputation* CreateAdderToOne(HloModule* module) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, arg0, one));
return module->AddEmbeddedComputation(builder.Build());
}
HloComputation* CreateMax(HloModule* module) {
HloComputation::Builder builder(TestName());
HloInstruction* arg0 =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "arg0"));
HloInstruction* arg1 =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "arg1"));
builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMaximum, arg0, arg1));
return module->AddEmbeddedComputation(builder.Build());
}
};
TEST_F(OpcodeFusionTest, Exponential_Reshape_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {1, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
HloInstruction* reshape2 =
builder.AddInstruction(HloInstruction::CreateReshape(result_shape, exp1));
builder.AddInstruction(
HloInstruction::CreateUnary(result_shape, HloOpcode::kNegate, reshape2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kReshape, HloOpcode::kExp,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Broadcast_Reshape_DynamicSlice_Tanh) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
Shape starts_shape = ShapeUtil::MakeShape(S32, {});
Shape broadcast_shape = ShapeUtil::MakeShape(F32, {1, 8, 8});
Shape reshape_shape = ShapeUtil::MakeShape(F32, {8, 8});
Shape dynamic_slice_shape = ShapeUtil::MakeShape(F32, {4, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, starts_shape, "starts"));
HloInstruction* param2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, starts_shape, "starts"));
HloInstruction* broadcast2 = builder.AddInstruction(
HloInstruction::CreateBroadcast(broadcast_shape, param0, {1}));
HloInstruction* reshape3 = builder.AddInstruction(
HloInstruction::CreateReshape(reshape_shape, broadcast2));
HloInstruction* dynamic_slice4 =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
dynamic_slice_shape, reshape3, {param1, param2}, {4, 4}));
builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_slice_shape, HloOpcode::kTanh, dynamic_slice4));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kTanh, HloOpcode::kDynamicSlice, HloOpcode::kReshape,
HloOpcode::kBroadcast, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Broadcast_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
Shape result_shape = ShapeUtil::MakeShape(F32, {8, 8});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* broadcast1 = builder.AddInstruction(
HloInstruction::CreateBroadcast(result_shape, param0, {1}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, broadcast1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kBroadcast, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, DynamicSlice_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
Shape slice_shape = ShapeUtil::MakeShape(S32, {});
Shape result_shape = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, slice_shape, "starts"));
HloInstruction* dynamic_slice2 = builder.AddInstruction(
HloInstruction::CreateDynamicSlice(result_shape, param0, {param1}, {2}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, dynamic_slice2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kDynamicSlice,
HloOpcode::kParameter, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Exponential_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kNegate, exp1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kExp, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Reshape_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {16});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* reshape1 = builder.AddInstruction(
HloInstruction::CreateReshape(result_shape, param0));
builder.AddInstruction(
HloInstruction::CreateUnary(result_shape, HloOpcode::kNegate, reshape1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kReshape, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Reverse_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {8});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* reverse1 = builder.AddInstruction(
HloInstruction::CreateReverse(param_shape, param0, {0}));
builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kNegate, reverse1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kReverse, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Slice_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {4});
Shape slice_shape = ShapeUtil::MakeShape(F32, {2});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* slice1 = builder.AddInstruction(
HloInstruction::CreateSlice(slice_shape, param0, {0}, {4}, {2}));
builder.AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeShape(F32, {2}), HloOpcode::kNegate, slice1));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kNegate, HloOpcode::kSlice, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, Exponential_Transpose_Negate) {
HloComputation::Builder builder(TestName());
Shape param_shape = ShapeUtil::MakeShape(F32, {3, 4});
Shape result_shape = ShapeUtil::MakeShape(F32, {4, 3});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, param_shape, "param"));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(param_shape, HloOpcode::kExp, param0));
HloInstruction* transpose2 = builder.AddInstruction(
HloInstruction::CreateTranspose(result_shape, exp1, {1, 0}));
builder.AddInstruction(HloInstruction::CreateUnary(
result_shape, HloOpcode::kNegate, transpose2));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kNegate, HloOpcode::kTranspose, HloOpcode::kExp,
HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, UnaryMapOfExp) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {3, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* exp = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
builder.AddInstruction(
HloInstruction::CreateMap(shape, {exp}, CreateAdderToOne(module.get())));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kParameter, HloOpcode::kExp, HloOpcode::kMap});
}
TEST_F(OpcodeFusionTest, BinaryMapOfExps) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape shape = ShapeUtil::MakeShape(F32, {3, 4});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "param"));
HloInstruction* exp0 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param0));
HloInstruction* exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kExp, param1));
builder.AddInstruction(
HloInstruction::CreateMap(shape, {exp0, exp1}, CreateMax(module.get())));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(), {HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kExp, HloOpcode::kExp, HloOpcode::kMap});
}
TEST_F(OpcodeFusionTest, DynamicSliceWithDynamicUpdateSlice) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape full_shape = ShapeUtil::MakeShape(F32, {10, 100, 1000});
Shape slice_shape = ShapeUtil::MakeShape(F32, {10, 1, 1000});
std::vector<HloInstruction*> slice_indices, update_indices;
for (int i = 0; i < 3; ++i) {
slice_indices.push_back(
builder.AddInstruction(HloInstruction::CreateParameter(
1 + i, ShapeUtil::MakeShape(U32, {}), "slice_indices")));
update_indices.push_back(
builder.AddInstruction(HloInstruction::CreateParameter(
5 + i, ShapeUtil::MakeShape(U32, {}), "update_indices")));
}
HloInstruction* slice =
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
slice_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(0, full_shape, "slice_from")),
slice_indices,
{10, 1, 1000}));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(4, full_shape, "to_update")),
slice, update_indices));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDynamicSlice, HloOpcode::kDynamicUpdateSlice,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter});
}
TEST_F(OpcodeFusionTest, MessOfFusibleNodes) {
auto module = CreateNewVerifiedModule();
HloComputation::Builder builder(TestName());
Shape full_shape = ShapeUtil::MakeShape(F32, {4, 100, 10, 100, 50});
auto loop_idx = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "param0"));
auto param1 = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "param1"));
auto idx_choice = builder.AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(S32, {}),
builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(S32, {1}),
builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(S32, {4}), "param2")),
{loop_idx},
{1}))));
auto zero = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0(0)));
auto slice = builder.AddInstruction(HloInstruction::CreateDynamicSlice(
ShapeUtil::MakeShape(F32, {1, 100, 10, 100, 50}),
builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeShape(F32, {100, 100, 10, 100, 50}), "param3")),
{idx_choice, zero, zero, zero, zero},
{1, 100, 10, 100, 50}));
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
full_shape,
builder.AddInstruction(
HloInstruction::CreateParameter(4, full_shape, "param4")),
slice, {loop_idx, param1, param1, param1, param1}));
module->AddEntryComputation(builder.Build());
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDynamicSlice, HloOpcode::kDynamicSlice,
HloOpcode::kDynamicUpdateSlice, HloOpcode::kReshape,
HloOpcode::kConstant, HloOpcode::kParameter, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter, HloOpcode::kParameter});
}
void CreateComputationForDotAddOutputFusionTest(const std::string& test_name,
HloModule* module, int m, int k,
int n,
bool add_extra_use_for_dot) {
HloComputation::Builder builder(test_name);
Shape dot_lhs_shape = ShapeUtil::MakeShape(F32, {m, k});
Shape dot_rhs_shape = ShapeUtil::MakeShape(F32, {k, n});
Shape dot_shape = ShapeUtil::MakeShape(F32, {m, n});
if (m == 1) {
dot_lhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {n});
} else if (n == 1) {
dot_rhs_shape = ShapeUtil::MakeShape(F32, {k});
dot_shape = ShapeUtil::MakeShape(F32, {m});
}
auto* dot_lhs = builder.AddInstruction(
HloInstruction::CreateParameter(0, dot_lhs_shape, "param0"));
auto* dot_rhs = builder.AddInstruction(
HloInstruction::CreateParameter(1, dot_rhs_shape, "param1"));
auto* addend = builder.AddInstruction(
HloInstruction::CreateParameter(2, dot_shape, "param2"));
auto* dot =
builder.AddInstruction(CreateCanonicalDot(dot_shape, dot_lhs, dot_rhs));
builder.AddInstruction(
HloInstruction::CreateBinary(dot_shape, HloOpcode::kAdd, dot, addend));
if (add_extra_use_for_dot) {
auto* token = builder.AddInstruction(HloInstruction::CreateToken());
builder.AddInstruction(
HloInstruction::CreateOutfeed(dot_shape, dot, token, "no_config"));
}
module->AddEntryComputation(builder.Build());
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_1x50x19) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 1,
50, 19,
false);
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDot, HloOpcode::kAdd, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter},
HloInstruction::FusionKind::kOutput);
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x1) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 1,
false);
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kDot, HloOpcode::kAdd, HloOpcode::kParameter,
HloOpcode::kParameter, HloOpcode::kParameter},
HloInstruction::FusionKind::kOutput);
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x19) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 19,
false);
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(OpcodeFusionTest, DotAddOutputFusion_19x50x1_multi_use) {
auto module = CreateNewVerifiedModule();
CreateComputationForDotAddOutputFusionTest(TestName(), module.get(), 19,
50, 1,
true);
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(InstructionFusionTest,
DotOperationFusion_DontOutputFuseDuplicateOperands) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
a = f32[50,60]{1,0} parameter(0)
b = f32[60,1]{1,0} parameter(1)
c = f32[50,1]{1,0} dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT d = f32[50,1]{1,0} add(c, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
struct GatherLoopFusionTestSpec {
std::string test_name;
std::string hlo_computation_text;
static std::string Name(
const ::testing::TestParamInfo<GatherLoopFusionTestSpec>& info) {
return info.param.test_name;
}
};
class GatherLoopFusionTest
: public OpcodeFusionTest,
public ::testing::WithParamInterface<GatherLoopFusionTestSpec> {};
TEST_P(GatherLoopFusionTest, GatherLoopFusion) {
const GatherLoopFusionTestSpec& spec = GetParam();
std::string hlo_string = absl::StrCat("HloModule ", spec.test_name, "\n\n",
spec.hlo_computation_text);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
RunFusionAndCheckOpcodesWereFused(
module.get(),
{HloOpcode::kGather, HloOpcode::kAdd, HloOpcode::kBroadcast,
HloOpcode::kConstant, HloOpcode::kParameter, HloOpcode::kParameter});
}
std::vector<GatherLoopFusionTestSpec> GetGatherLoopFusionTestSpecs() {
std::vector<GatherLoopFusionTestSpec> result;
result.push_back({"FusedTensorFlowGatherV2", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
gather = s32[3,2] gather(operand, indices),
offset_dims={0},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=1,
slice_sizes={3, 1}
one = s32[] constant(1)
one_broadcasted = s32[3,2] broadcast(one), dimensions={}
ROOT result = s32[3,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherMultipleBatchDims", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,3,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={1},
start_index_map={1},
index_vector_dim=2,
slice_sizes={3, 1}
one = s32[] constant(1)
one_broadcasted = s32[2,3,2] broadcast(one), dimensions={}
ROOT result = s32[2,3,2]{2,1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherNdMultipleBatchDims", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2,2] parameter(1)
gather = s32[2,2] gather(operand, indices),
offset_dims={},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=2,
slice_sizes={1, 1}
one = s32[] constant(1)
one_broadcasted = s32[2,2] broadcast(one), dimensions={}
ROOT result = s32[2,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherNd_0", R"(
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=1,
slice_sizes={1,1,2}
one = s32[] constant(1)
one_broadcasted = s32[2,2] broadcast(one), dimensions={}
ROOT result = s32[2,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedTensorFlowGatherNd_1", R"(
ENTRY main {
operand = s32[3,3,2] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,2] gather(operand, indices),
offset_dims={1},
collapsed_slice_dims={0,1},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1,2}
one = s32[] constant(1)
one_broadcasted = s32[2,2] broadcast(one), dimensions={}
ROOT result = s32[2,2]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedDynamicSlice", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2] parameter(1)
gather = s32[1,1] gather(operand, indices),
offset_dims={0,1},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1}
one = s32[] constant(1)
one_broadcasted = s32[1,1] broadcast(one), dimensions={}
ROOT result = s32[1,1]{1,0} add(gather, one_broadcasted)
}
)"});
result.push_back({"FusedBatchDynamicSlice", R"(
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[2,2] parameter(1)
gather = s32[2,1,1] gather(operand, indices),
offset_dims={1,2},
collapsed_slice_dims={},
start_index_map={0,1},
index_vector_dim=0,
slice_sizes={1,1}
one = s32[] constant(1)
one_broadcasted = s32[2,1,1] broadcast(one), dimensions={}
ROOT result = s32[2,1,1]{2,1,0} add(gather, one_broadcasted)
}
)"});
return result;
}
INSTANTIATE_TEST_SUITE_P(GatherLoopFusionTestInstantiation,
GatherLoopFusionTest,
::testing::ValuesIn(GetGatherLoopFusionTestSpecs()),
GatherLoopFusionTestSpec::Name);
TEST_F(InstructionFusionTest, NoFuseReduceMajor) {
absl::string_view module_string = R"(
HloModule module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
a = f32[50,60]{1,0} parameter(0)
b = f32[50,60]{1,0} parameter(1)
c = f32[50,60]{1,0} add(a, b)
init = f32[] constant(0)
ROOT r = f32[60]{0} reduce(c, init), dimensions={0}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_FALSE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(),
Not(op::Fusion()));
}
TEST_F(InstructionFusionTest, FuseReduceMinor) {
absl::string_view module_string = R"(
HloModule module
add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
a = f32[50,60]{1,0} parameter(0)
b = f32[50,60]{1,0} parameter(1)
c = f32[50,60]{1,0} add(a, b)
init = f32[] constant(0)
ROOT r = f32[] reduce(c, init), dimensions={0,1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool fused_something,
CpuInstructionFusion().Run(module.get()));
EXPECT_TRUE(fused_something);
EXPECT_THAT(module->entry_computation()->root_instruction(), op::Fusion());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_instruction_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/cpu_instruction_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e71d581e-3184-4873-831f-aba1c7d47594 | cpp | tensorflow/tensorflow | hlo_rematerialization_test_utils | third_party/xla/xla/service/hlo_rematerialization_test_utils.h | third_party/xla/xla/service/hlo_rematerialization_test_utils_test.cc | #ifndef XLA_SERVICE_HLO_REMATERIALIZATION_TEST_UTILS_H_
#define XLA_SERVICE_HLO_REMATERIALIZATION_TEST_UTILS_H_
#include <cstdint>
#include <memory>
#include <string>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
class RematerializationTestBase : public HloTestBase {
protected:
std::unique_ptr<HloComputation> MakeRematerializableComputation(
const std::string& suffix = "") {
auto builder = HloComputation::Builder(TestName() + suffix);
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
auto reshape = builder.AddInstruction(
HloInstruction::CreateReshape(scalar_shape_, param));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, reshape, {}));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(vec1024_shape_, HloOpcode::kNegate, bcast));
auto concat_1 = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {2048}), {negate, negate},
0));
auto slice_1 = builder.AddInstruction(HloInstruction::CreateSlice(
vec1_shape_, concat_1, {0},
{1},
{1}));
auto concat_2 = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {1025}), {bcast, slice_1},
0));
builder.AddInstruction(HloInstruction::CreateSlice(vec1_shape_, concat_2,
{0},
{1},
{1}));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeRematerializableWhileComputation(
HloComputation* while_cond, HloComputation* while_body,
const std::string& suffix = "") {
auto builder = HloComputation::Builder(TestName() + suffix);
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
auto reshape = builder.AddInstruction(
HloInstruction::CreateReshape(scalar_shape_, param));
auto bcast = builder.AddInstruction(
HloInstruction::CreateBroadcast(vec1024_shape_, reshape, {}));
auto slice_1 = builder.AddInstruction(
HloInstruction::CreateSlice(vec1_shape_, bcast, {0},
{1},
{1}));
auto while_inst = builder.AddInstruction(HloInstruction::CreateWhile(
vec1_shape_, while_cond, while_body, slice_1));
auto concat = builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(xla::F32, {1025}), {bcast, while_inst},
0));
builder.AddInstruction(HloInstruction::CreateSlice(vec1_shape_, concat,
{0},
{1},
{1}));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeConditionComputation() {
auto builder = HloComputation::Builder(TestName() + ".cond");
builder.AddInstruction(
HloInstruction::CreateParameter(0, vec1_shape_, "param"));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
return builder.Build();
}
static int64_t ByteSizeOf(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, sizeof(void*));
}
protected:
const Shape scalar_shape_ = ShapeUtil::MakeShape(xla::F32, {});
const Shape vec1_shape_ = ShapeUtil::MakeShape(xla::F32, {1});
const Shape vec1024_shape_ = ShapeUtil::MakeShape(xla::F32, {1024});
};
}
#endif | #include "xla/service/hlo_rematerialization_test_utils.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
class HloRematerializationTestUtilsTest : public RematerializationTestBase {};
TEST_F(HloRematerializationTestUtilsTest, MakeRematerializableComputation) {
auto computation = MakeRematerializableComputation();
std::vector<HloInstruction*> instructions(computation->instructions().begin(),
computation->instructions().end());
EXPECT_EQ(instructions[0]->name(), "param");
EXPECT_EQ(instructions[1]->name(), "reshape");
EXPECT_THAT(instructions[1]->operands(),
UnorderedElementsAre(instructions[0]));
EXPECT_EQ(instructions[2]->name(), "broadcast");
EXPECT_THAT(instructions[2]->operands(),
UnorderedElementsAre(instructions[1]));
EXPECT_EQ(instructions[3]->name(), "negate");
EXPECT_THAT(instructions[3]->operands(),
UnorderedElementsAre(instructions[2]));
EXPECT_EQ(instructions[4]->name(), "concatenate");
EXPECT_THAT(instructions[4]->operands(),
UnorderedElementsAre(instructions[3], instructions[3]));
EXPECT_EQ(instructions[5]->name(), "slice");
EXPECT_THAT(instructions[5]->operands(),
UnorderedElementsAre(instructions[4]));
EXPECT_EQ(instructions[6]->name(), "concatenate");
EXPECT_THAT(instructions[6]->operands(),
UnorderedElementsAre(instructions[2], instructions[5]));
EXPECT_EQ(instructions[7]->name(), "slice");
EXPECT_THAT(instructions[7]->operands(),
UnorderedElementsAre(instructions[6]));
}
TEST_F(HloRematerializationTestUtilsTest,
MakeRematerializableWhileComputation) {
auto while_condition = MakeConditionComputation();
auto body_computation = MakeRematerializableComputation();
auto computation = MakeRematerializableWhileComputation(
while_condition.get(), body_computation.get());
std::vector<HloInstruction*> instructions(computation->instructions().begin(),
computation->instructions().end());
EXPECT_EQ(instructions[0]->name(), "param");
EXPECT_EQ(instructions[1]->name(), "reshape");
EXPECT_THAT(instructions[1]->operands(),
UnorderedElementsAre(instructions[0]));
EXPECT_EQ(instructions[2]->name(), "broadcast");
EXPECT_THAT(instructions[2]->operands(),
UnorderedElementsAre(instructions[1]));
EXPECT_EQ(instructions[3]->name(), "slice");
EXPECT_THAT(instructions[3]->operands(),
UnorderedElementsAre(instructions[2]));
EXPECT_EQ(instructions[4]->name(), "while");
EXPECT_THAT(instructions[4]->operands(),
UnorderedElementsAre(instructions[3]));
EXPECT_EQ(instructions[4]->while_condition()->name(),
"MakeRematerializableWhileComputation.cond");
EXPECT_EQ(instructions[4]->while_body()->name(),
"MakeRematerializableWhileComputation");
EXPECT_EQ(instructions[5]->name(), "concatenate");
EXPECT_THAT(instructions[5]->operands(),
UnorderedElementsAre(instructions[2], instructions[4]));
EXPECT_EQ(instructions[6]->name(), "slice");
EXPECT_THAT(instructions[6]->operands(),
UnorderedElementsAre(instructions[5]));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization_test_utils.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_rematerialization_test_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd13b244-2252-4f7c-8e58-0f8f18afb81d | cpp | tensorflow/tensorflow | work_queue_interface | tensorflow/core/tfrt/runtime/work_queue_interface.cc | tensorflow/core/tfrt/runtime/work_queue_interface_test.cc | #include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "tfrt/host_context/execution_context.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
class DefaultWorkQueueWrapper : public WorkQueueInterface {
public:
explicit DefaultWorkQueueWrapper(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue)
: WorkQueueInterface(0),
work_queue_owner_(std::move(work_queue)),
work_queue_(work_queue_owner_.get()) {}
DefaultWorkQueueWrapper(std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue,
thread::ThreadPoolInterface* intra_thread_pool)
: WorkQueueInterface(0, intra_thread_pool),
work_queue_owner_(std::move(work_queue)),
work_queue_(work_queue_owner_.get()) {}
DefaultWorkQueueWrapper(int64_t request_id,
tfrt::ConcurrentWorkQueue* work_queue,
thread::ThreadPoolInterface* intra_thread_pool)
: WorkQueueInterface(request_id, intra_thread_pool),
work_queue_(work_queue) {}
~DefaultWorkQueueWrapper() override = default;
private:
std::string name() const override { return work_queue_->name(); }
void AddTask(tfrt::TaskFunction work) override {
work_queue_->AddTask(WrapWork(id(), "inter", std::move(work)));
}
std::optional<tfrt::TaskFunction> AddBlockingTask(
tfrt::TaskFunction work, bool allow_queuing) override {
return work_queue_->AddBlockingTask(
WrapWork(id(), "blocking", std::move(work)), allow_queuing);
}
void Await(
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> values) override {
work_queue_->Await(values);
}
void Quiesce() override { work_queue_->Quiesce(); }
int GetParallelismLevel() const override {
return work_queue_->GetParallelismLevel();
}
bool IsInWorkerThread() const override {
return work_queue_->IsInWorkerThread();
}
absl::StatusOr<std::unique_ptr<WorkQueueInterface>> InitializeRequest(
int64_t request_id) const override {
return {std::make_unique<DefaultWorkQueueWrapper>(request_id, work_queue_,
GetIntraOpThreadPool())};
}
private:
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue_owner_;
tfrt::ConcurrentWorkQueue* work_queue_ = nullptr;
};
}
std::unique_ptr<WorkQueueInterface> WrapDefaultWorkQueue(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue) {
return std::make_unique<DefaultWorkQueueWrapper>(std::move(work_queue));
}
std::unique_ptr<WorkQueueInterface> WrapDefaultWorkQueue(
std::unique_ptr<tfrt::ConcurrentWorkQueue> work_queue,
thread::ThreadPoolInterface* intra_thread_pool) {
return std::make_unique<DefaultWorkQueueWrapper>(std::move(work_queue),
intra_thread_pool);
}
}
} | #include "tensorflow/core/tfrt/runtime/work_queue_interface.h"
#include <thread>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/tfrt/utils/thread_pool.h"
#include "tfrt/cpp_tests/test_util.h"
#include "tfrt/host_context/execution_context.h"
#include "tfrt/host_context/task_function.h"
namespace tensorflow {
namespace tfrt_stub {
namespace {
TEST(DefaultWorkQueueWrapperTest, Name) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->name(), work_queue_ptr->name());
}
TEST(DefaultWorkQueueWrapperTest, AddTask_OnlyTask) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
auto av = tfrt::MakeUnconstructedAsyncValueRef<int>().ReleaseRCRef();
work_queue_wrapper->AddTask(
tfrt::TaskFunction([av] { av->emplace<int>(0); }));
work_queue_wrapper->Await(std::move(av));
}
TEST(DefaultWorkQueueWrapperTest, AddBlockingTask_TaskAndAllowQueueing) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
auto av = tfrt::MakeUnconstructedAsyncValueRef<int>().ReleaseRCRef();
std::thread thread{[&] {
auto work = work_queue_wrapper->AddBlockingTask(
tfrt::TaskFunction([&] { av->emplace<int>(0); }),
true);
}};
work_queue_wrapper->Await(std::move(av));
thread.join();
}
TEST(DefaultWorkQueueWrapperTest, GetParallelismLevel) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->GetParallelismLevel(),
work_queue_ptr->GetParallelismLevel());
}
TEST(DefaultWorkQueueWrapperTest, IsInWorkerThread) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
auto work_queue_ptr = work_queue.get();
auto work_queue_wrapper = WrapDefaultWorkQueue(std::move(work_queue));
EXPECT_EQ(work_queue_wrapper->IsInWorkerThread(),
work_queue_ptr->IsInWorkerThread());
}
TEST(DefaultWorkQueueWrapperTest, IntraOpThreadPool) {
auto work_queue = tfrt::CreateSingleThreadedWorkQueue();
TfThreadPool intra_op_thread_pool("tf_intra",
1);
auto work_queue_wrapper =
WrapDefaultWorkQueue(std::move(work_queue), &intra_op_thread_pool);
TF_ASSERT_OK_AND_ASSIGN(auto queue, work_queue_wrapper->InitializeRequest(
0));
EXPECT_NE(queue, nullptr);
EXPECT_EQ(queue->GetIntraOpThreadPool(), &intra_op_thread_pool);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/work_queue_interface.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/runtime/work_queue_interface_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3c6f7a0c-58b2-4b13-a19e-66849e246b7a | cpp | google/arolla | edge | arolla/dense_array/edge.cc | arolla/dense_array/edge_test.cc | #include "arolla/dense_array/edge.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
absl::StatusOr<DenseArrayEdge> ComposeMappingEdge(
absl::Span<const DenseArrayEdge> edges, RawBufferFactory& buf_factory) {
DCHECK_GE(edges.size(), 2);
DenseArray<int64_t> mapping =
edges.back().ToMappingEdge(buf_factory).edge_values();
for (int i = edges.size() - 2; i >= 0; --i) {
const auto mapping_edge = edges[i].ToMappingEdge(buf_factory);
DenseArrayBuilder<int64_t> bldr(edges.back().child_size(), &buf_factory);
mapping.ForEachPresent([&bldr, &mapping_edge](int64_t id, int64_t value) {
bldr.Set(id, mapping_edge.edge_values()[value]);
});
mapping = std::move(bldr).Build();
}
return DenseArrayEdge::UnsafeFromMapping(std::move(mapping),
edges.front().parent_size());
}
absl::StatusOr<DenseArrayEdge> ComposeSplitPointsEdge(
absl::Span<const DenseArrayEdge> edges, RawBufferFactory& buf_factory) {
DCHECK_GE(edges.size(), 2);
Buffer<int64_t>::Builder bldr(edges.front().edge_values().size(),
&buf_factory);
auto mut_bldr_span = bldr.GetMutableSpan();
auto previous_split_points = edges.front().edge_values().values.span();
for (size_t i = 1; i < edges.size(); ++i) {
auto split_points = edges[i].edge_values().values.span();
for (size_t j = 0; j < mut_bldr_span.size(); ++j) {
mut_bldr_span[j] = split_points[previous_split_points[j]];
}
previous_split_points = mut_bldr_span;
}
return DenseArrayEdge::UnsafeFromSplitPoints({std::move(bldr).Build()});
}
}
absl::StatusOr<DenseArrayEdge> DenseArrayEdge::FromSplitPoints(
DenseArray<int64_t> split_points) {
if (!split_points.IsFull()) {
return absl::InvalidArgumentError("split points must be full");
}
if (split_points.empty()) {
return absl::InvalidArgumentError(
"split points array must have at least 1 element");
}
if (split_points.values[0] != 0) {
return absl::InvalidArgumentError(
"split points array must have first element equal to 0");
}
int64_t parent_size = split_points.size() - 1;
int64_t child_size = split_points.values.back();
if (!std::is_sorted(split_points.values.begin(), split_points.values.end())) {
return absl::InvalidArgumentError("split points must be sorted");
}
return DenseArrayEdge(DenseArrayEdge::SPLIT_POINTS, parent_size, child_size,
std::move(split_points));
}
absl::StatusOr<DenseArrayEdge> DenseArrayEdge::FromMapping(
DenseArray<int64_t> mapping, int64_t parent_size) {
if (parent_size < 0) {
return absl::InvalidArgumentError("parent_size can not be negative");
}
int64_t max_value = -1;
bool negative = false;
mapping.ForEach([&max_value, &negative](int64_t, bool present, int64_t v) {
if (present) {
max_value = std::max(max_value, v);
if (v < 0) negative = true;
}
});
if (negative) {
return absl::InvalidArgumentError("mapping can't contain negative values");
}
if (max_value >= parent_size) {
return absl::InvalidArgumentError(absl::StrFormat(
"parent_size=%d, but parent id %d is used", parent_size, max_value));
}
return UnsafeFromMapping(std::move(mapping), parent_size);
}
absl::StatusOr<DenseArrayEdge> DenseArrayEdge::FromUniformGroups(
int64_t parent_size, int64_t group_size, RawBufferFactory& buf_factory) {
if (parent_size < 0 || group_size < 0) {
return absl::InvalidArgumentError(
"parent_size and group_size cannot be negative");
}
Buffer<int64_t>::Builder split_points_builder(parent_size + 1, &buf_factory);
auto inserter = split_points_builder.GetInserter();
for (int64_t i = 0; i <= parent_size; ++i) inserter.Add(i * group_size);
return UnsafeFromSplitPoints({std::move(split_points_builder).Build()});
}
DenseArrayEdge DenseArrayEdge::UnsafeFromMapping(DenseArray<int64_t> mapping,
int64_t parent_size) {
int64_t child_size = mapping.size();
return DenseArrayEdge(DenseArrayEdge::MAPPING, parent_size, child_size,
std::move(mapping));
}
DenseArrayEdge DenseArrayEdge::UnsafeFromSplitPoints(
DenseArray<int64_t> split_points) {
int64_t parent_size = split_points.size() - 1;
int64_t child_size = split_points.values.back();
return DenseArrayEdge(DenseArrayEdge::SPLIT_POINTS, parent_size, child_size,
std::move(split_points));
}
DenseArrayEdge DenseArrayEdge::ToMappingEdge(
RawBufferFactory& buf_factory) const {
switch (edge_type()) {
case DenseArrayEdge::MAPPING:
return *this;
case DenseArrayEdge::SPLIT_POINTS: {
Buffer<int64_t>::Builder bldr(child_size(), &buf_factory);
int64_t* mapping = bldr.GetMutableSpan().begin();
const int64_t* splits = edge_values().values.begin();
for (int64_t parent_id = 0; parent_id < parent_size(); ++parent_id) {
std::fill(mapping + splits[parent_id], mapping + splits[parent_id + 1],
parent_id);
}
return DenseArrayEdge::UnsafeFromMapping({std::move(bldr).Build()},
parent_size());
}
}
ABSL_UNREACHABLE();
}
absl::StatusOr<DenseArrayEdge> DenseArrayEdge::ToSplitPointsEdge(
RawBufferFactory& buf_factory) const {
if (edge_type() == DenseArrayEdge::SPLIT_POINTS) return *this;
if (!edge_values().IsFull()) {
return absl::InvalidArgumentError("expected a full mapping");
}
Buffer<int64_t>::Builder split_points_builder(parent_size() + 1,
&buf_factory);
auto inserter = split_points_builder.GetInserter();
inserter.Add(0);
int64_t current_bin = 0;
auto values = edge_values().values.span();
for (size_t i = 0; i < values.size(); ++i) {
DCHECK_LE(values[i], parent_size());
if (values[i] < current_bin) {
return absl::InvalidArgumentError("expected a sorted mapping");
}
for (; current_bin < values[i]; ++current_bin) inserter.Add(i);
}
for (; current_bin < parent_size(); ++current_bin) {
inserter.Add(edge_values().size());
}
return DenseArrayEdge::UnsafeFromSplitPoints(
DenseArray<int64_t>{std::move(split_points_builder).Build()});
}
bool DenseArrayEdge::IsEquivalentTo(const DenseArrayEdge& other) const {
if (parent_size() != other.parent_size() ||
child_size() != other.child_size()) {
return false;
}
if (edge_type() == other.edge_type()) {
return ArraysAreEquivalent(edge_values(), other.edge_values());
}
ASSIGN_OR_RETURN(auto this_edge, ToSplitPointsEdge(),
_.With([](const auto&) { return false; }));
ASSIGN_OR_RETURN(auto other_edge, other.ToSplitPointsEdge(),
_.With([](const auto&) { return false; }));
return ArraysAreEquivalent(this_edge.edge_values(), other_edge.edge_values());
}
absl::StatusOr<DenseArrayEdge> DenseArrayEdge::ComposeEdges(
absl::Span<const DenseArrayEdge> edges, RawBufferFactory& buf_factory) {
if (edges.empty()) {
return absl::InvalidArgumentError("at least one edge must be present");
}
if (edges.size() == 1) return edges[0];
int64_t prior_child_size = edges[0].parent_size();
for (size_t i = 0; i < edges.size(); ++i) {
if (edges[i].parent_size() != prior_child_size) {
return absl::InvalidArgumentError(
absl::StrFormat("incompatible edges: edges[%d].child_size (%d) != "
"edges[%d].parent_size (%d)",
i - 1, prior_child_size, i, edges[i].parent_size()));
}
prior_child_size = edges[i].child_size();
}
std::vector<DenseArrayEdge> transformed_edges;
transformed_edges.reserve(edges.size());
size_t i = 0;
while (i < edges.size()) {
size_t split_points_end = i;
while (split_points_end < edges.size() &&
edges[split_points_end].edge_type() ==
DenseArrayEdge::SPLIT_POINTS) {
split_points_end++;
}
if (split_points_end - i >= 2) {
ASSIGN_OR_RETURN(auto composed_edge,
ComposeSplitPointsEdge(
absl::MakeSpan(edges.begin() + i,
edges.begin() + split_points_end),
buf_factory));
transformed_edges.push_back(std::move(composed_edge));
i = split_points_end;
} else {
transformed_edges.push_back(edges[i]);
i++;
}
}
if (transformed_edges.size() == 1) {
return std::move(transformed_edges[0]);
} else {
return ComposeMappingEdge(transformed_edges, buf_factory);
}
}
void FingerprintHasherTraits<DenseArrayEdge>::operator()(
FingerprintHasher* hasher, const DenseArrayEdge& value) const {
hasher->Combine(value.edge_type(), value.parent_size(), value.child_size(),
value.edge_values());
}
void FingerprintHasherTraits<DenseArrayGroupScalarEdge>::operator()(
FingerprintHasher* hasher, const DenseArrayGroupScalarEdge& value) const {
hasher->Combine(value.child_size());
}
} | #include "arolla/dense_array/edge.h"
#include <cstdint>
#include <optional>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/types/span.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/util/fingerprint.h"
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::Eq;
namespace arolla {
namespace {
TEST(DenseArrayEdgeTest, FromSplitPoints) {
DenseArray<int64_t> split_points{CreateBuffer<int64_t>({0, 10, 20})};
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromSplitPoints(split_points));
EXPECT_THAT(edge.edge_type(), Eq(DenseArrayEdge::SPLIT_POINTS));
EXPECT_THAT(edge.edge_values().values, ElementsAre(0, 10, 20));
EXPECT_EQ(edge.parent_size(), 2);
EXPECT_EQ(edge.child_size(), 20);
EXPECT_EQ(edge.split_size(0), 10);
EXPECT_EQ(edge.split_size(1), 10);
}
TEST(DenseArrayEdgeTest, FromSplitPointsEmptyGroup) {
DenseArray<int64_t> split_points{CreateBuffer<int64_t>({0})};
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromSplitPoints(split_points));
EXPECT_THAT(edge.edge_type(), Eq(DenseArrayEdge::SPLIT_POINTS));
EXPECT_THAT(edge.edge_values().values, ElementsAre(0));
EXPECT_EQ(edge.parent_size(), 0);
EXPECT_EQ(edge.child_size(), 0);
}
TEST(DenseArrayEdgeTest, FromSplitPointsNotFull) {
auto split_points = CreateDenseArray<int64_t>({0, 3, std::nullopt, 10});
EXPECT_THAT(DenseArrayEdge::FromSplitPoints(split_points),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("split points must be full")));
}
TEST(DenseArrayEdgeTest, FromSplitPointsTooFew) {
EXPECT_THAT(DenseArrayEdge::FromSplitPoints(DenseArray<int64_t>()),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"split points array must have at least 1 element")));
}
TEST(DenseArrayEdgeTest, FromSplitPointsInBadOrder) {
EXPECT_THAT(
DenseArrayEdge::FromSplitPoints({CreateBuffer<int64_t>({10, 20, 30})}),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr(
"split points array must have first element equal to 0")));
EXPECT_THAT(
DenseArrayEdge::FromSplitPoints({CreateBuffer<int64_t>({0, 40, 10})}),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("split points must be sorted")));
}
TEST(DenseArrayEdgeTest, UnsafeFromSplitPoints) {
DenseArray<int64_t> split_points(CreateDenseArray<int64_t>({0, 10, 20}));
auto edge = DenseArrayEdge::UnsafeFromSplitPoints(split_points);
EXPECT_THAT(edge.edge_type(), Eq(DenseArrayEdge::SPLIT_POINTS));
EXPECT_THAT(edge.edge_values().values, ElementsAre(0, 10, 20));
EXPECT_EQ(edge.parent_size(), 2);
EXPECT_EQ(edge.child_size(), 20);
EXPECT_EQ(edge.split_size(0), 10);
EXPECT_EQ(edge.split_size(1), 10);
}
TEST(ArrayEdgeTest, UnsafeFromSplitPointsEmptyGroup) {
DenseArray<int64_t> split_points(CreateDenseArray<int64_t>({0}));
auto edge = DenseArrayEdge::UnsafeFromSplitPoints(split_points);
EXPECT_THAT(edge.edge_type(), Eq(DenseArrayEdge::SPLIT_POINTS));
EXPECT_THAT(edge.edge_values(), ElementsAre(0));
EXPECT_EQ(edge.parent_size(), 0);
EXPECT_EQ(edge.child_size(), 0);
}
TEST(DenseArrayEdgeTest, FromMapping) {
DenseArray<int64_t> mapping{
CreateBuffer<int64_t>({0, 1, 2, 0, 1, 2, 0, 1, 2})};
DenseArray<int64_t> bad_mapping{
CreateBuffer<int64_t>({0, -1, 2, 0, 1, 2, 0, 1, 2})};
EXPECT_THAT(
DenseArrayEdge::FromMapping(mapping, -1),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("parent_size can not be negative")));
EXPECT_THAT(
DenseArrayEdge::FromMapping(mapping, 2),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("parent_size=2, but parent id 2 is used")));
EXPECT_THAT(
DenseArrayEdge::FromMapping(bad_mapping, 3),
StatusIs(absl::StatusCode::kInvalidArgument,
::testing::HasSubstr("mapping can't contain negative values")));
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromMapping(mapping, 3));
EXPECT_THAT(edge.edge_type(), Eq(DenseArrayEdge::MAPPING));
EXPECT_THAT(edge.edge_values().values, Eq(mapping.values));
EXPECT_EQ(edge.parent_size(), 3);
EXPECT_EQ(edge.child_size(), 9);
}
TEST(DenseArrayEdgeTest, FromUniformGroups) {
{
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromUniformGroups(0, 5));
EXPECT_THAT(edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_EQ(edge.parent_size(), 0);
EXPECT_EQ(edge.child_size(), 0);
EXPECT_THAT(edge.edge_values(), ElementsAre(0));
}
{
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromUniformGroups(3, 0));
EXPECT_THAT(edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_EQ(edge.parent_size(), 3);
EXPECT_EQ(edge.child_size(), 0);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 0, 0, 0));
}
{
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromUniformGroups(3, 4));
EXPECT_THAT(edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_EQ(edge.parent_size(), 3);
EXPECT_EQ(edge.child_size(), 12);
EXPECT_THAT(edge.edge_values(), ElementsAre(0, 4, 8, 12));
}
{
EXPECT_THAT(DenseArrayEdge::FromUniformGroups(-1, 3),
StatusIs(absl::StatusCode::kInvalidArgument,
"parent_size and group_size cannot be negative"));
EXPECT_THAT(DenseArrayEdge::FromUniformGroups(3, -1),
StatusIs(absl::StatusCode::kInvalidArgument,
"parent_size and group_size cannot be negative"));
}
{
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromUniformGroups(1, 1));
EXPECT_TRUE(edge.edge_values().values.is_owner());
}
{
UnsafeArenaBufferFactory arena{128};
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromUniformGroups(1, 1, arena));
EXPECT_FALSE(edge.edge_values().values.is_owner());
}
}
TEST(DenseArrayEdgeTest, DefaultEdge) {
DenseArrayEdge edge;
EXPECT_THAT(edge.edge_type(), Eq(DenseArrayEdge::MAPPING));
EXPECT_THAT(edge.edge_values().values, ElementsAre());
EXPECT_EQ(edge.parent_size(), 0);
EXPECT_EQ(edge.child_size(), 0);
}
TEST(DenseArrayEdgeTest, Fingerprint) {
const auto mapping = CreateDenseArray<int64_t>({0, 0, 0, 1, 1});
const auto split_points = CreateDenseArray<int64_t>({0, 3, 5});
ASSERT_OK_AND_ASSIGN(auto edge_from_mapping_1,
DenseArrayEdge::FromMapping(mapping, 2));
ASSERT_OK_AND_ASSIGN(auto edge_from_mapping_2,
DenseArrayEdge::FromMapping(mapping, 2));
ASSERT_OK_AND_ASSIGN(auto edge_from_split_points,
DenseArrayEdge::FromSplitPoints(split_points));
EXPECT_EQ(FingerprintHasher("salt").Combine(edge_from_mapping_1).Finish(),
FingerprintHasher("salt").Combine(edge_from_mapping_2).Finish());
EXPECT_NE(FingerprintHasher("salt").Combine(edge_from_mapping_1).Finish(),
FingerprintHasher("salt").Combine(edge_from_split_points).Finish());
}
TEST(DenseArrayEdgeTest, ToSplitPointsEdge_Success) {
{
const auto split_points = CreateDenseArray<int64_t>({0, 3, 5});
ASSERT_OK_AND_ASSIGN(auto edge,
DenseArrayEdge::FromSplitPoints(split_points));
ASSERT_OK_AND_ASSIGN(auto edge2, edge.ToSplitPointsEdge());
EXPECT_EQ(edge.parent_size(), edge2.parent_size());
EXPECT_EQ(edge.child_size(), edge2.child_size());
EXPECT_EQ(edge2.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_THAT(edge2.edge_values().values, ElementsAre(0, 3, 5));
}
{
const auto mapping = CreateDenseArray<int64_t>({0, 0, 1, 1, 3, 5});
ASSERT_OK_AND_ASSIGN(auto mapping_edge,
DenseArrayEdge::FromMapping(mapping, 8));
ASSERT_OK_AND_ASSIGN(auto split_point_edge,
mapping_edge.ToSplitPointsEdge());
EXPECT_EQ(mapping_edge.parent_size(), split_point_edge.parent_size());
EXPECT_EQ(mapping_edge.child_size(), split_point_edge.child_size());
EXPECT_EQ(split_point_edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_THAT(split_point_edge.edge_values().values,
ElementsAre(0, 2, 4, 4, 5, 5, 6, 6, 6));
}
}
TEST(DenseArrayEdgeTest, ToSplitPointsEdge_Errors) {
{
const auto mapping = CreateDenseArray<int64_t>({0, std::nullopt});
ASSERT_OK_AND_ASSIGN(auto mapping_edge,
DenseArrayEdge::FromMapping(mapping, 2));
EXPECT_THAT(mapping_edge.ToSplitPointsEdge(),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected a full mapping"));
}
{
const auto mapping = CreateDenseArray<int64_t>({1, 0});
ASSERT_OK_AND_ASSIGN(auto mapping_edge,
DenseArrayEdge::FromMapping(mapping, 2));
EXPECT_THAT(mapping_edge.ToSplitPointsEdge(),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected a sorted mapping"));
}
}
TEST(DenseArrayEdgeTest, ToSplitPointsEdge_BufferFactory) {
{
const auto mapping = CreateDenseArray<int64_t>({0, 0, 1, 1, 3, 5});
ASSERT_OK_AND_ASSIGN(auto mapping_edge,
DenseArrayEdge::FromMapping(mapping, 8));
ASSERT_OK_AND_ASSIGN(auto split_point_edge,
mapping_edge.ToSplitPointsEdge());
EXPECT_TRUE(split_point_edge.edge_values().values.is_owner());
}
{
const auto mapping = CreateDenseArray<int64_t>({0, 0, 1, 1, 3, 5});
ASSERT_OK_AND_ASSIGN(auto mapping_edge,
DenseArrayEdge::FromMapping(mapping, 8));
UnsafeArenaBufferFactory arena{128};
ASSERT_OK_AND_ASSIGN(auto split_point_edge,
mapping_edge.ToSplitPointsEdge(arena));
EXPECT_FALSE(split_point_edge.edge_values().values.is_owner());
}
}
TEST(DenseArrayEdgeTest, ToMappingEdge) {
{
const auto mapping =
CreateDenseArray<int64_t>({0, 1, 0, std::nullopt, 3, 5});
ASSERT_OK_AND_ASSIGN(auto edge, DenseArrayEdge::FromMapping(mapping, 8));
auto edge2 = edge.ToMappingEdge();
EXPECT_EQ(edge.parent_size(), edge2.parent_size());
EXPECT_EQ(edge.child_size(), edge2.child_size());
EXPECT_EQ(edge2.edge_type(), DenseArrayEdge::MAPPING);
EXPECT_THAT(edge2.edge_values(), ElementsAre(0, 1, 0, std::nullopt, 3, 5));
}
{
const auto split_points = CreateDenseArray<int64_t>({0, 3, 5});
ASSERT_OK_AND_ASSIGN(auto split_points_edge,
DenseArrayEdge::FromSplitPoints(split_points));
auto mapping_edge = split_points_edge.ToMappingEdge();
EXPECT_EQ(split_points_edge.parent_size(), mapping_edge.parent_size());
EXPECT_EQ(split_points_edge.child_size(), mapping_edge.child_size());
EXPECT_EQ(mapping_edge.edge_type(), DenseArrayEdge::MAPPING);
EXPECT_THAT(mapping_edge.edge_values(), ElementsAre(0, 0, 0, 1, 1));
}
{
const auto split_points = CreateDenseArray<int64_t>({0, 3, 5});
ASSERT_OK_AND_ASSIGN(auto split_points_edge,
DenseArrayEdge::FromSplitPoints(split_points));
auto mapping_edge = split_points_edge.ToMappingEdge();
EXPECT_TRUE(mapping_edge.edge_values().values.is_owner());
UnsafeArenaBufferFactory arena{128};
mapping_edge = split_points_edge.ToMappingEdge(arena);
EXPECT_FALSE(mapping_edge.edge_values().values.is_owner());
}
}
TEST(DenseArrayEdgeTest, IsEquivalentTo) {
{
const auto split_points = CreateDenseArray<int64_t>({0, 3, 5});
ASSERT_OK_AND_ASSIGN(auto edge1,
DenseArrayEdge::FromSplitPoints(split_points));
ASSERT_OK_AND_ASSIGN(auto edge2,
DenseArrayEdge::FromSplitPoints(split_points));
EXPECT_TRUE(edge1.IsEquivalentTo(edge2));
}
{
const auto mapping =
CreateDenseArray<int64_t>({0, 0, std::nullopt, 1, 3, 5});
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromMapping(mapping, 8));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromMapping(mapping, 8));
EXPECT_TRUE(edge1.IsEquivalentTo(edge2));
}
{
const auto mapping = CreateDenseArray<int64_t>({0, 0, 0, 1, 1});
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromMapping(mapping, 2));
const auto split_points = CreateDenseArray<int64_t>({0, 3, 5});
ASSERT_OK_AND_ASSIGN(auto edge2,
DenseArrayEdge::FromSplitPoints(split_points));
EXPECT_TRUE(edge1.IsEquivalentTo(edge2));
}
{
const auto mapping = CreateDenseArray<int64_t>({0, 0, 1, 0, 1});
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromMapping(mapping, 2));
const auto split_points = CreateDenseArray<int64_t>({0, 3, 5});
ASSERT_OK_AND_ASSIGN(auto edge2,
DenseArrayEdge::FromSplitPoints(split_points));
EXPECT_FALSE(edge1.IsEquivalentTo(edge2));
}
{
const auto mapping = CreateDenseArray<int64_t>({0, 0, 0, 1, 1});
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromMapping(mapping, 2));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromMapping(mapping, 3));
EXPECT_FALSE(edge1.IsEquivalentTo(edge2));
}
{
const auto mapping1 = CreateDenseArray<int64_t>({0, 0, 0, 1, 1});
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromMapping(mapping1, 2));
const auto mapping2 = CreateDenseArray<int64_t>({0, 1, 1});
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromMapping(mapping2, 2));
EXPECT_FALSE(edge1.IsEquivalentTo(edge2));
}
{
const auto mapping1 = CreateDenseArray<int64_t>({0, 0, 0, 1, 1});
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromMapping(mapping1, 2));
const auto mapping2 = CreateDenseArray<int64_t>({0, 0, 1, 1, 1});
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromMapping(mapping2, 2));
EXPECT_FALSE(edge1.IsEquivalentTo(edge2));
}
}
TEST(DenseArrayEdgeTest, ComposeEdges_SplitPoint) {
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 3})));
ASSERT_OK_AND_ASSIGN(
auto edge3,
DenseArrayEdge::FromSplitPoints(CreateDenseArray<int64_t>({0, 1, 2, 4})));
ASSERT_OK_AND_ASSIGN(auto edge4,
DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 3, 4, 11, 12})));
{
ASSERT_OK_AND_ASSIGN(auto composed_edge, DenseArrayEdge::ComposeEdges(
{edge1, edge2, edge3, edge4}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_THAT(composed_edge.edge_values(), ElementsAre(0, 12));
}
{
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge2, edge3, edge4}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_THAT(composed_edge.edge_values(), ElementsAre(0, 3, 12));
}
{
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge3, edge4}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_THAT(composed_edge.edge_values(), ElementsAre(0, 3, 4, 12));
}
{
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge4}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::SPLIT_POINTS);
EXPECT_THAT(composed_edge.edge_values(), ElementsAre(0, 3, 4, 11, 12));
}
{
EXPECT_THAT(DenseArrayEdge::ComposeEdges({}),
StatusIs(absl::StatusCode::kInvalidArgument,
"at least one edge must be present"));
}
{
EXPECT_THAT(DenseArrayEdge::ComposeEdges({edge1, edge3}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incompatible edges: edges[0].child_size (2) != "
"edges[1].parent_size (3)"));
}
}
TEST(DenseArrayEdgeTest, ComposeEdges_Mapping) {
ASSERT_OK_AND_ASSIGN(auto edge1,
DenseArrayEdge::FromMapping(
CreateDenseArray<int64_t>({0, std::nullopt}), 5));
ASSERT_OK_AND_ASSIGN(auto edge2,
DenseArrayEdge::FromMapping(
CreateDenseArray<int64_t>({0, 1, std::nullopt}), 2));
ASSERT_OK_AND_ASSIGN(
auto edge3,
DenseArrayEdge::FromMapping(CreateDenseArray<int64_t>({1, 2, 0, 2}), 3));
ASSERT_OK_AND_ASSIGN(auto edge4,
DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 3, 4, 11, 12})));
ASSERT_OK_AND_ASSIGN(
auto edge5, DenseArrayEdge::FromSplitPoints(CreateDenseArray<int64_t>(
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12})));
{
ASSERT_OK_AND_ASSIGN(
auto composed_edge,
DenseArrayEdge::ComposeEdges({edge1, edge2, edge3, edge4, edge5}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::MAPPING);
EXPECT_EQ(composed_edge.parent_size(), 5);
EXPECT_THAT(composed_edge.edge_values(),
ElementsAre(std::nullopt, std::nullopt, std::nullopt,
std::nullopt, 0, 0, 0, 0, 0, 0, 0, std::nullopt));
}
{
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge2, edge3, edge4}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::MAPPING);
EXPECT_EQ(composed_edge.parent_size(), 2);
EXPECT_THAT(
composed_edge.edge_values(),
ElementsAre(1, 1, 1, std::nullopt, 0, 0, 0, 0, 0, 0, 0, std::nullopt));
}
{
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge3, edge4}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::MAPPING);
EXPECT_EQ(composed_edge.parent_size(), 3);
EXPECT_THAT(composed_edge.edge_values(),
ElementsAre(1, 1, 1, 2, 0, 0, 0, 0, 0, 0, 0, 2));
}
{
ASSERT_OK_AND_ASSIGN(
auto composed_edge,
DenseArrayEdge::ComposeEdges({edge4, edge5.ToMappingEdge()}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::MAPPING);
EXPECT_EQ(composed_edge.parent_size(), 4);
EXPECT_THAT(composed_edge.edge_values(),
ElementsAre(0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 3));
}
{
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge2, edge3}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::MAPPING);
EXPECT_EQ(composed_edge.parent_size(), 2);
EXPECT_THAT(composed_edge.edge_values(),
ElementsAre(1, std::nullopt, 0, std::nullopt));
}
{
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge4.ToMappingEdge()}));
EXPECT_EQ(composed_edge.edge_type(), DenseArrayEdge::MAPPING);
EXPECT_EQ(composed_edge.parent_size(), 4);
EXPECT_THAT(composed_edge.edge_values(),
ElementsAre(0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 3));
}
{
EXPECT_THAT(DenseArrayEdge::ComposeEdges({}),
StatusIs(absl::StatusCode::kInvalidArgument,
"at least one edge must be present"));
}
{
EXPECT_THAT(DenseArrayEdge::ComposeEdges({edge1, edge3}),
StatusIs(absl::StatusCode::kInvalidArgument,
"incompatible edges: edges[0].child_size (2) != "
"edges[1].parent_size (3)"));
}
}
TEST(DenseArrayEdgeTest, ComposeEdges_BufferFactory) {
ASSERT_OK_AND_ASSIGN(auto edge1, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 2})));
ASSERT_OK_AND_ASSIGN(auto edge2, DenseArrayEdge::FromSplitPoints(
CreateDenseArray<int64_t>({0, 1, 3})));
{
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge1, edge2}));
EXPECT_TRUE(composed_edge.edge_values().values.is_owner());
}
{
UnsafeArenaBufferFactory arena{128};
ASSERT_OK_AND_ASSIGN(auto composed_edge,
DenseArrayEdge::ComposeEdges({edge1, edge2}, arena));
EXPECT_FALSE(composed_edge.edge_values().values.is_owner());
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/edge.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/dense_array/edge_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
bc07f90e-c5ca-4425-9d00-f2c2cd42fb47 | cpp | tensorflow/tensorflow | tmpl_tflite_op | tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.cc | tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op_test.cc | #include "tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/shim/op_kernel.h"
#include "tensorflow/lite/kernels/shim/test_op/tmpl_op.h"
#include "tensorflow/lite/kernels/shim/tflite_op_shim.h"
#include "tensorflow/lite/kernels/shim/tflite_op_wrapper.h"
#include "tensorflow/lite/mutable_op_resolver.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
const char a_type[]("AType"), b_type[]("BType");
}
using ::tflite::shim::op_wrapper::Attr;
using ::tflite::shim::op_wrapper::AttrName;
using ::tflite::shim::op_wrapper::OpWrapper;
template <shim::Runtime Rt>
using Op = OpWrapper<Rt, shim::TmplOp, Attr<AttrName<a_type>, int32_t, float>,
Attr<AttrName<b_type>, int32_t, int64_t, bool>>;
using OpKernel = ::tflite::shim::TfLiteOpKernel<Op>;
void AddTmplOp(MutableOpResolver* resolver) { OpKernel::Add(resolver); }
TfLiteRegistration* Register_TMPL_OP() {
return OpKernel::GetTfLiteRegistration();
}
const char* OpName_TMPL_OP() { return OpKernel::OpName(); }
}
}
} | #include "tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.h"
#include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace shim {
namespace {
template <typename AType, typename BType>
class TmplOpModel : public SingleOpModel {
public:
TmplOpModel(const std::vector<uint8_t>& op_options,
const std::vector<tflite::TensorType>& input_types,
const std::vector<std::vector<int>>& input_shapes,
const std::vector<AType>& input0,
const std::vector<BType>& input1,
const std::vector<tflite::TensorType>& output_types) {
std::vector<int> input_idx;
for (const auto input_type : input_types) {
input_idx.push_back(AddInput(input_type));
}
for (const auto output_type : output_types) {
output_idx_.push_back(AddOutput(output_type));
}
SetCustomOp(ops::custom::OpName_TMPL_OP(), op_options,
ops::custom::Register_TMPL_OP);
BuildInterpreter(input_shapes);
PopulateTensor(input_idx[0], input0);
PopulateTensor(input_idx[1], input1);
}
template <typename T>
std::vector<T> GetOutput(const int i) {
return ExtractVector<T>(output_idx_[i]);
}
std::vector<int> GetOutputShape(const int i) {
return GetTensorShape(output_idx_[i]);
}
protected:
std::vector<int> output_idx_;
};
TEST(TmplOpModel, float_int32) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteFloat32);
builder.Int("BType", kTfLiteInt32);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_FLOAT32,
tflite::TensorType_INT32};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<float> input0 = {5.6f};
const std::vector<int32_t> input1 = {3};
TmplOpModel<float, int32_t> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(8.6f));
}
TEST(TmplOpModel, int32_int64) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteInt32);
builder.Int("BType", kTfLiteInt64);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_INT32,
tflite::TensorType_INT64};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<int32_t> input0 = {12};
const std::vector<int64_t> input1 = {33l};
TmplOpModel<int32_t, int64_t> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(45.0f));
}
TEST(TmplOpModel, int32_bool) {
flexbuffers::Builder builder;
builder.Map([&]() {
builder.Int("AType", kTfLiteInt32);
builder.Int("BType", kTfLiteBool);
});
builder.Finish();
std::vector<std::vector<int>> input_shapes = {{}, {}};
std::vector<tflite::TensorType> input_types = {tflite::TensorType_INT32,
tflite::TensorType_BOOL};
std::vector<tflite::TensorType> output_types = {tflite::TensorType_FLOAT32};
const std::vector<int32_t> input0 = {12};
const std::vector<bool> input1 = {true};
TmplOpModel<int32_t, bool> m(
builder.GetBuffer(), input_types, input_shapes, input0,
input1, output_types);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetOutput<float>(0), testing::ElementsAre(13.0f));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/shim/test_op/tmpl_tflite_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c1b32f96-8037-495e-84ce-029851f2e85d | cpp | google/quiche | quiche_stack_trace | quiche/common/platform/api/quiche_stack_trace.h | quiche/common/platform/api/quiche_stack_trace_test.cc | #ifndef QUICHE_COMMON_PLATFORM_API_QUICHE_STACK_TRACE_H_
#define QUICHE_COMMON_PLATFORM_API_QUICHE_STACK_TRACE_H_
#include <string>
#include <vector>
#include "absl/types/span.h"
#include "quiche_platform_impl/quiche_stack_trace_impl.h"
namespace quiche {
inline std::vector<void*> CurrentStackTrace() {
return CurrentStackTraceImpl();
}
inline std::string SymbolizeStackTrace(absl::Span<void* const> stacktrace) {
return SymbolizeStackTraceImpl(stacktrace);
}
inline std::string QuicheStackTrace() { return QuicheStackTraceImpl(); }
inline bool QuicheShouldRunStackTraceTest() {
return QuicheShouldRunStackTraceTestImpl();
}
}
#endif | #include "quiche/common/platform/api/quiche_stack_trace.h"
#include <cstdint>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/optimization.h"
#include "absl/strings/str_cat.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace test {
namespace {
bool ShouldRunTest() {
#if defined(ABSL_HAVE_ATTRIBUTE_NOINLINE)
return QuicheShouldRunStackTraceTest();
#else
return false;
#endif
}
ABSL_ATTRIBUTE_NOINLINE std::string QuicheDesignatedStackTraceTestFunction() {
std::string result = QuicheStackTrace();
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
return result;
}
ABSL_ATTRIBUTE_NOINLINE std::string
QuicheDesignatedTwoStepStackTraceTestFunction() {
std::string result = SymbolizeStackTrace(CurrentStackTrace());
ABSL_BLOCK_TAIL_CALL_OPTIMIZATION();
return result;
}
TEST(QuicheStackTraceTest, GetStackTrace) {
if (!ShouldRunTest()) {
return;
}
std::string stacktrace = QuicheDesignatedStackTraceTestFunction();
EXPECT_THAT(stacktrace,
testing::HasSubstr("QuicheDesignatedStackTraceTestFunction"));
}
TEST(QuicheStackTraceTest, GetStackTraceInTwoSteps) {
if (!ShouldRunTest()) {
return;
}
std::string stacktrace = QuicheDesignatedTwoStepStackTraceTestFunction();
EXPECT_THAT(stacktrace, testing::HasSubstr(
"QuicheDesignatedTwoStepStackTraceTestFunction"));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_stack_trace.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/platform/api/quiche_stack_trace_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
a18745e0-7850-49be-a0f1-f376124d5507 | cpp | google/quiche | quic_spdy_stream_body_manager | quiche/quic/core/http/quic_spdy_stream_body_manager.cc | quiche/quic/core/http/quic_spdy_stream_body_manager_test.cc | #include "quiche/quic/core/http/quic_spdy_stream_body_manager.h"
#include <algorithm>
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_logging.h"
namespace quic {
QuicSpdyStreamBodyManager::QuicSpdyStreamBodyManager()
: total_body_bytes_received_(0) {}
size_t QuicSpdyStreamBodyManager::OnNonBody(QuicByteCount length) {
QUICHE_DCHECK_NE(0u, length);
if (fragments_.empty()) {
return length;
}
fragments_.back().trailing_non_body_byte_count += length;
return 0;
}
void QuicSpdyStreamBodyManager::OnBody(absl::string_view body) {
QUICHE_DCHECK(!body.empty());
fragments_.push_back({body, 0});
total_body_bytes_received_ += body.length();
}
size_t QuicSpdyStreamBodyManager::OnBodyConsumed(size_t num_bytes) {
QuicByteCount bytes_to_consume = 0;
size_t remaining_bytes = num_bytes;
while (remaining_bytes > 0) {
if (fragments_.empty()) {
QUIC_BUG(quic_bug_10394_1) << "Not enough available body to consume.";
return 0;
}
Fragment& fragment = fragments_.front();
const absl::string_view body = fragment.body;
if (body.length() > remaining_bytes) {
bytes_to_consume += remaining_bytes;
fragment.body = body.substr(remaining_bytes);
return bytes_to_consume;
}
remaining_bytes -= body.length();
bytes_to_consume += body.length() + fragment.trailing_non_body_byte_count;
fragments_.pop_front();
}
return bytes_to_consume;
}
int QuicSpdyStreamBodyManager::PeekBody(iovec* iov, size_t iov_len) const {
QUICHE_DCHECK(iov);
QUICHE_DCHECK_GT(iov_len, 0u);
if (fragments_.empty()) {
iov[0].iov_base = nullptr;
iov[0].iov_len = 0;
return 0;
}
size_t iov_filled = 0;
while (iov_filled < fragments_.size() && iov_filled < iov_len) {
absl::string_view body = fragments_[iov_filled].body;
iov[iov_filled].iov_base = const_cast<char*>(body.data());
iov[iov_filled].iov_len = body.size();
iov_filled++;
}
return iov_filled;
}
size_t QuicSpdyStreamBodyManager::ReadableBytes() const {
size_t count = 0;
for (auto const& fragment : fragments_) {
count += fragment.body.length();
}
return count;
}
size_t QuicSpdyStreamBodyManager::ReadBody(const struct iovec* iov,
size_t iov_len,
size_t* total_bytes_read) {
*total_bytes_read = 0;
QuicByteCount bytes_to_consume = 0;
size_t index = 0;
char* dest = reinterpret_cast<char*>(iov[index].iov_base);
size_t dest_remaining = iov[index].iov_len;
while (!fragments_.empty()) {
Fragment& fragment = fragments_.front();
const absl::string_view body = fragment.body;
const size_t bytes_to_copy =
std::min<size_t>(body.length(), dest_remaining);
if (bytes_to_copy > 0) {
memcpy(dest, body.data(), bytes_to_copy);
}
bytes_to_consume += bytes_to_copy;
*total_bytes_read += bytes_to_copy;
if (bytes_to_copy == body.length()) {
bytes_to_consume += fragment.trailing_non_body_byte_count;
fragments_.pop_front();
} else {
fragment.body = body.substr(bytes_to_copy);
}
if (bytes_to_copy == dest_remaining) {
++index;
if (index == iov_len) {
break;
}
dest = reinterpret_cast<char*>(iov[index].iov_base);
dest_remaining = iov[index].iov_len;
} else {
dest += bytes_to_copy;
dest_remaining -= bytes_to_copy;
}
}
return bytes_to_consume;
}
} | #include "quiche/quic/core/http/quic_spdy_stream_body_manager.h"
#include <algorithm>
#include <numeric>
#include <string>
#include <vector>
#include "absl/base/macros.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_test.h"
namespace quic {
namespace test {
namespace {
class QuicSpdyStreamBodyManagerTest : public QuicTest {
protected:
QuicSpdyStreamBodyManager body_manager_;
};
TEST_F(QuicSpdyStreamBodyManagerTest, HasBytesToRead) {
EXPECT_FALSE(body_manager_.HasBytesToRead());
EXPECT_EQ(body_manager_.ReadableBytes(), 0u);
EXPECT_EQ(0u, body_manager_.total_body_bytes_received());
const QuicByteCount header_length = 3;
EXPECT_EQ(header_length, body_manager_.OnNonBody(header_length));
EXPECT_FALSE(body_manager_.HasBytesToRead());
EXPECT_EQ(body_manager_.ReadableBytes(), 0u);
EXPECT_EQ(0u, body_manager_.total_body_bytes_received());
std::string body(1024, 'a');
body_manager_.OnBody(body);
EXPECT_TRUE(body_manager_.HasBytesToRead());
EXPECT_EQ(body_manager_.ReadableBytes(), 1024u);
EXPECT_EQ(1024u, body_manager_.total_body_bytes_received());
}
TEST_F(QuicSpdyStreamBodyManagerTest, ConsumeMoreThanAvailable) {
std::string body(1024, 'a');
body_manager_.OnBody(body);
size_t bytes_to_consume = 0;
EXPECT_QUIC_BUG(bytes_to_consume = body_manager_.OnBodyConsumed(2048),
"Not enough available body to consume.");
EXPECT_EQ(0u, bytes_to_consume);
}
TEST_F(QuicSpdyStreamBodyManagerTest, OnBodyConsumed) {
struct {
std::vector<QuicByteCount> frame_header_lengths;
std::vector<const char*> frame_payloads;
std::vector<QuicByteCount> body_bytes_to_read;
std::vector<QuicByteCount> expected_return_values;
} const kOnBodyConsumedTestData[] = {
{{2}, {"foobar"}, {6}, {6}},
{{3, 5}, {"foobar", "baz"}, {9}, {14}},
{{2}, {"foobar"}, {4, 2}, {4, 2}},
{{3, 5}, {"foobar", "baz"}, {6, 3}, {11, 3}},
{{3, 5}, {"foobar", "baz"}, {5, 4}, {5, 9}},
{{3, 5}, {"foobar", "baz"}, {7, 2}, {12, 2}},
};
for (size_t test_case_index = 0;
test_case_index < ABSL_ARRAYSIZE(kOnBodyConsumedTestData);
++test_case_index) {
const std::vector<QuicByteCount>& frame_header_lengths =
kOnBodyConsumedTestData[test_case_index].frame_header_lengths;
const std::vector<const char*>& frame_payloads =
kOnBodyConsumedTestData[test_case_index].frame_payloads;
const std::vector<QuicByteCount>& body_bytes_to_read =
kOnBodyConsumedTestData[test_case_index].body_bytes_to_read;
const std::vector<QuicByteCount>& expected_return_values =
kOnBodyConsumedTestData[test_case_index].expected_return_values;
for (size_t frame_index = 0; frame_index < frame_header_lengths.size();
++frame_index) {
EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u,
body_manager_.OnNonBody(frame_header_lengths[frame_index]));
body_manager_.OnBody(frame_payloads[frame_index]);
}
for (size_t call_index = 0; call_index < body_bytes_to_read.size();
++call_index) {
EXPECT_EQ(expected_return_values[call_index],
body_manager_.OnBodyConsumed(body_bytes_to_read[call_index]));
}
EXPECT_FALSE(body_manager_.HasBytesToRead());
EXPECT_EQ(body_manager_.ReadableBytes(), 0u);
}
}
TEST_F(QuicSpdyStreamBodyManagerTest, PeekBody) {
struct {
std::vector<QuicByteCount> frame_header_lengths;
std::vector<const char*> frame_payloads;
size_t iov_len;
} const kPeekBodyTestData[] = {
{{}, {}, 1},
{{3}, {"foobar"}, 1},
{{3}, {"foobar"}, 2},
{{3, 5}, {"foobar", "baz"}, 1},
{{3, 5}, {"foobar", "baz"}, 2},
{{3, 5}, {"foobar", "baz"}, 3},
};
for (size_t test_case_index = 0;
test_case_index < ABSL_ARRAYSIZE(kPeekBodyTestData); ++test_case_index) {
const std::vector<QuicByteCount>& frame_header_lengths =
kPeekBodyTestData[test_case_index].frame_header_lengths;
const std::vector<const char*>& frame_payloads =
kPeekBodyTestData[test_case_index].frame_payloads;
size_t iov_len = kPeekBodyTestData[test_case_index].iov_len;
QuicSpdyStreamBodyManager body_manager;
for (size_t frame_index = 0; frame_index < frame_header_lengths.size();
++frame_index) {
EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u,
body_manager.OnNonBody(frame_header_lengths[frame_index]));
body_manager.OnBody(frame_payloads[frame_index]);
}
std::vector<iovec> iovecs;
iovecs.resize(iov_len);
size_t iovs_filled = std::min(frame_payloads.size(), iov_len);
ASSERT_EQ(iovs_filled,
static_cast<size_t>(body_manager.PeekBody(&iovecs[0], iov_len)));
for (size_t iovec_index = 0; iovec_index < iovs_filled; ++iovec_index) {
EXPECT_EQ(frame_payloads[iovec_index],
absl::string_view(
static_cast<const char*>(iovecs[iovec_index].iov_base),
iovecs[iovec_index].iov_len));
}
}
}
TEST_F(QuicSpdyStreamBodyManagerTest, ReadBody) {
struct {
std::vector<QuicByteCount> frame_header_lengths;
std::vector<const char*> frame_payloads;
std::vector<std::vector<QuicByteCount>> iov_lengths;
std::vector<QuicByteCount> expected_total_bytes_read;
std::vector<QuicByteCount> expected_return_values;
} const kReadBodyTestData[] = {
{{4}, {"foo"}, {{2}}, {2}, {2}},
{{4}, {"foo"}, {{3}}, {3}, {3}},
{{4}, {"foo"}, {{5}}, {3}, {3}},
{{4}, {"foobar"}, {{2, 3}}, {5}, {5}},
{{4}, {"foobar"}, {{2, 4}}, {6}, {6}},
{{4}, {"foobar"}, {{2, 6}}, {6}, {6}},
{{4}, {"foobar"}, {{2, 4, 4, 3}}, {6}, {6}},
{{4}, {"foobar"}, {{2, 7, 4, 3}}, {6}, {6}},
{{4}, {"foobarbaz"}, {{2, 1}, {3, 2}}, {3, 5}, {3, 5}},
{{4}, {"foobarbaz"}, {{2, 1}, {4, 2}}, {3, 6}, {3, 6}},
{{4}, {"foobarbaz"}, {{2, 1}, {4, 10}}, {3, 6}, {3, 6}},
{{4, 3}, {"foobar", "baz"}, {{8}}, {8}, {11}},
{{4, 3}, {"foobar", "baz"}, {{9}}, {9}, {12}},
{{4, 3}, {"foobar", "baz"}, {{10}}, {9}, {12}},
{{4, 3}, {"foobar", "baz"}, {{4, 3}}, {7}, {10}},
{{4, 3}, {"foobar", "baz"}, {{4, 5}}, {9}, {12}},
{{4, 3}, {"foobar", "baz"}, {{4, 6}}, {9}, {12}},
{{4, 3}, {"foobar", "baz"}, {{4, 6, 4, 3}}, {9}, {12}},
{{4, 3}, {"foobar", "baz"}, {{4, 7, 4, 3}}, {9}, {12}},
{{4, 3}, {"foobar", "baz"}, {{2, 4}, {2, 1}}, {6, 3}, {9, 3}},
{{4, 3, 6},
{"foobar", "bazquux", "qux"},
{{4, 3}, {2, 3}, {5, 3}},
{7, 5, 4},
{10, 5, 10}},
};
for (size_t test_case_index = 0;
test_case_index < ABSL_ARRAYSIZE(kReadBodyTestData); ++test_case_index) {
const std::vector<QuicByteCount>& frame_header_lengths =
kReadBodyTestData[test_case_index].frame_header_lengths;
const std::vector<const char*>& frame_payloads =
kReadBodyTestData[test_case_index].frame_payloads;
const std::vector<std::vector<QuicByteCount>>& iov_lengths =
kReadBodyTestData[test_case_index].iov_lengths;
const std::vector<QuicByteCount>& expected_total_bytes_read =
kReadBodyTestData[test_case_index].expected_total_bytes_read;
const std::vector<QuicByteCount>& expected_return_values =
kReadBodyTestData[test_case_index].expected_return_values;
QuicSpdyStreamBodyManager body_manager;
std::string received_body;
for (size_t frame_index = 0; frame_index < frame_header_lengths.size();
++frame_index) {
EXPECT_EQ(frame_index == 0 ? frame_header_lengths[frame_index] : 0u,
body_manager.OnNonBody(frame_header_lengths[frame_index]));
body_manager.OnBody(frame_payloads[frame_index]);
received_body.append(frame_payloads[frame_index]);
}
std::string read_body;
for (size_t call_index = 0; call_index < iov_lengths.size(); ++call_index) {
size_t total_iov_length = std::accumulate(iov_lengths[call_index].begin(),
iov_lengths[call_index].end(),
static_cast<size_t>(0));
std::string buffer(total_iov_length, 'z');
std::vector<iovec> iovecs;
size_t offset = 0;
for (size_t iov_length : iov_lengths[call_index]) {
QUICHE_CHECK(offset + iov_length <= buffer.size());
iovecs.push_back({&buffer[offset], iov_length});
offset += iov_length;
}
size_t total_bytes_read = expected_total_bytes_read[call_index] + 12;
EXPECT_EQ(
expected_return_values[call_index],
body_manager.ReadBody(&iovecs[0], iovecs.size(), &total_bytes_read));
read_body.append(buffer.substr(0, total_bytes_read));
}
EXPECT_EQ(received_body.substr(0, read_body.size()), read_body);
EXPECT_EQ(read_body.size() < received_body.size(),
body_manager.HasBytesToRead());
}
}
TEST_F(QuicSpdyStreamBodyManagerTest, Clear) {
const QuicByteCount header_length = 3;
EXPECT_EQ(header_length, body_manager_.OnNonBody(header_length));
std::string body("foo");
body_manager_.OnBody(body);
EXPECT_TRUE(body_manager_.HasBytesToRead());
body_manager_.Clear();
EXPECT_FALSE(body_manager_.HasBytesToRead());
iovec iov;
size_t total_bytes_read = 5;
EXPECT_EQ(0, body_manager_.PeekBody(&iov, 1));
EXPECT_EQ(0u, body_manager_.ReadBody(&iov, 1, &total_bytes_read));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_spdy_stream_body_manager.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_spdy_stream_body_manager_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
b0342d3b-45bb-49f1-8468-d8b5280b1d89 | cpp | tensorflow/tensorflow | rfft2d | tensorflow/lite/kernels/rfft2d.cc | tensorflow/lite/kernels/rfft2d_test.cc | #include <math.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
#include <algorithm>
#include <complex>
#include "third_party/fft2d/fft2d.h"
#include "ruy/profiler/instrumentation.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace rfft2d {
using std::complex;
constexpr int kInputTensor = 0;
constexpr int kFftLengthTensor = 1;
constexpr int kOutputTensor = 0;
constexpr int kFftIntegerWorkingAreaTensor = 0;
constexpr int kFftDoubleWorkingAreaTensor = 1;
constexpr int kTensorNotAllocated = -1;
struct OpData {
int fft_integer_working_area_id = kTensorNotAllocated;
int fft_double_working_area_id = kTensorNotAllocated;
};
bool IsPowerOfTwo(uint32_t v) { return v && !(v & (v - 1)); }
static TfLiteStatus InitTemporaryTensors(TfLiteContext* context,
TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
if (data->fft_integer_working_area_id != kTensorNotAllocated &&
data->fft_double_working_area_id != kTensorNotAllocated) {
return kTfLiteOk;
}
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(2);
int first_new_index;
TF_LITE_ENSURE_STATUS(context->AddTensors(context, 2, &first_new_index));
node->temporaries->data[kFftIntegerWorkingAreaTensor] = first_new_index;
data->fft_integer_working_area_id = first_new_index;
node->temporaries->data[kFftDoubleWorkingAreaTensor] = first_new_index + 1;
data->fft_double_working_area_id = first_new_index + 1;
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
fft_integer_working_area->type = kTfLiteInt32;
fft_integer_working_area->allocation_type = kTfLiteArenaRw;
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
fft_double_working_area->type = kTfLiteInt64;
fft_double_working_area->allocation_type = kTfLiteArenaRw;
return kTfLiteOk;
}
TfLiteStatus ResizeOutputandTemporaryTensors(TfLiteContext* context,
TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const int num_dims = NumDimensions(input);
TF_LITE_ENSURE(context, num_dims >= 2);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[0]));
TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[1]));
int fft_height, fft_width;
fft_height = fft_length_data[0];
fft_width = fft_length_data[1];
int fft_working_length = std::max(fft_height, fft_width / 2);
int half_fft_working_length = fft_working_length / 2;
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TfLiteIntArray* output_shape = TfLiteIntArrayCopy(input->dims);
output_shape->data[num_dims - 2] = fft_length_data[0];
output_shape->data[num_dims - 1] = fft_length_data[1] / 2 + 1;
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_shape));
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
TfLiteIntArray* fft_integer_working_area_shape = TfLiteIntArrayCreate(1);
fft_integer_working_area_shape->data[0] =
2 + static_cast<int>(sqrt(fft_working_length));
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_integer_working_area,
fft_integer_working_area_shape));
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
TfLiteIntArray* fft_double_working_area_shape = TfLiteIntArrayCreate(1);
fft_double_working_area_shape->data[0] =
half_fft_working_length + fft_width / 4;
TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, fft_double_working_area,
fft_double_working_area_shape));
return kTfLiteOk;
}
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TF_LITE_ENSURE(context, NumDimensions(input) >= 2);
if (input->type != kTfLiteFloat32) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for input is not supported by rfft2d.",
TfLiteTypeGetName(input->type));
return kTfLiteError;
}
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const RuntimeShape fft_length_shape = GetTensorShape(fft_length);
TF_LITE_ENSURE_EQ(context, NumDimensions(fft_length), 1);
TF_LITE_ENSURE_EQ(context, fft_length_shape.Dims(0), 2);
if (fft_length->type != kTfLiteInt32) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for fft_length is not supported by rfft2d.",
TfLiteTypeGetName(fft_length->type));
return kTfLiteError;
}
TF_LITE_ENSURE_STATUS(InitTemporaryTensors(context, node));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
output->type = kTfLiteComplex64;
if (!IsConstantOrPersistentTensor(fft_length)) {
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
SetTensorToDynamic(fft_integer_working_area);
SetTensorToDynamic(fft_double_working_area);
SetTensorToDynamic(output);
return kTfLiteOk;
}
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
return kTfLiteOk;
}
void Rfft2dReorder(int fft_height, int fft_width, double** fft_input_output) {
int fft_height_half;
ruy::profiler::ScopeLabel label("Rfft2dReorder");
double real, img;
fft_height_half = fft_height >> 1;
for (int i = fft_height_half + 1; i < fft_height; ++i) {
real = fft_input_output[i][0];
img = fft_input_output[i][1];
fft_input_output[i][fft_width] = img;
fft_input_output[i][fft_width + 1] = real;
fft_input_output[fft_height - i][fft_width] = img;
fft_input_output[fft_height - i][fft_width + 1] = -real;
fft_input_output[i][0] = fft_input_output[fft_height - i][0];
fft_input_output[i][1] = -fft_input_output[fft_height - i][1];
}
double temp = fft_input_output[0][1];
fft_input_output[0][fft_width + 1] = 0;
fft_input_output[0][1] = 0;
fft_input_output[fft_height_half][fft_width] =
fft_input_output[fft_height_half][1];
fft_input_output[fft_height_half][fft_width + 1] = 0;
fft_input_output[fft_height_half][1] = 0;
fft_input_output[0][fft_width] = temp;
for (int i = 0; i < fft_height; ++i) {
for (int j = 1; j < fft_width + 2; j += 2) {
fft_input_output[i][j] = -fft_input_output[i][j];
}
}
}
void Rfft2dImpl(int fft_height, int fft_width, double** fft_input_output,
int* fft_integer_working_area_data,
double* fft_double_working_area_data) {
ruy::profiler::ScopeLabel label("Rfft2dImpl");
double* fft_dynamic_working_area = nullptr;
const int kForwardFft = 1;
rdft2d(fft_height, fft_width, kForwardFft, fft_input_output,
fft_dynamic_working_area, fft_integer_working_area_data,
fft_double_working_area_data);
Rfft2dReorder(fft_height, fft_width, fft_input_output);
}
void PrepareInputBuffer(const float* input_data, int input_height,
int input_width, int fft_height, int fft_width,
double** fft_input_output) {
int valid_input_height = std::min(input_height, fft_height);
int valid_input_width = std::min(input_width, fft_width);
for (int i = 0; i < valid_input_height; ++i) {
int in_pos = i * input_width;
for (int j = 0; j < valid_input_width; ++j) {
fft_input_output[i][j] = input_data[in_pos++];
}
for (int j = valid_input_width; j < fft_width + 2; ++j) {
fft_input_output[i][j] = 0;
}
}
for (int i = valid_input_height; i < fft_height; ++i) {
for (int j = 0; j < fft_width + 2; ++j) {
fft_input_output[i][j] = 0;
}
}
}
void PrepareOutputBuffer(complex<float>* output_data, int fft_height,
int fft_width, double** fft_input_output) {
int cnt = 0;
for (int i = 0; i < fft_height; ++i) {
for (int j = 0; j < fft_width / 2 + 1; ++j) {
output_data[cnt++] = complex<float>(fft_input_output[i][j * 2],
fft_input_output[i][j * 2 + 1]);
}
}
}
TfLiteStatus Rfft2dHelper(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const float* input_data = GetTensorData<float>(input);
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
complex<float>* output_data = GetTensorData<complex<float>>(output);
int fft_height, fft_width;
fft_height = fft_length_data[0];
fft_width = fft_length_data[1];
const RuntimeShape input_shape = GetTensorShape(input);
const int input_dims_count = input_shape.DimensionsCount();
const auto* input_dims_data = input_shape.DimsData();
int num_slices = 1;
for (int i = 0; i < input_dims_count - 2; ++i) {
num_slices *= input_dims_data[i];
}
int input_height = input_dims_data[input_dims_count - 2];
int input_width = input_dims_data[input_dims_count - 1];
int input_slice_size = input_height * input_width;
int output_slice_size = fft_height * (fft_width / 2 + 1);
double** fft_input_output = new double*[fft_height];
for (int i = 0; i < fft_height; ++i) {
fft_input_output[i] = new double[fft_width + 2];
}
TfLiteTensor* fft_integer_working_area;
TF_LITE_ENSURE_OK(
context, GetTemporarySafe(context, node, kFftIntegerWorkingAreaTensor,
&fft_integer_working_area));
int* fft_integer_working_area_data =
GetTensorData<int>(fft_integer_working_area);
TfLiteTensor* fft_double_working_area;
TF_LITE_ENSURE_OK(context,
GetTemporarySafe(context, node, kFftDoubleWorkingAreaTensor,
&fft_double_working_area));
double* fft_double_working_area_data = reinterpret_cast<double*>(
GetTensorData<int64_t>(fft_double_working_area));
for (int i = 0; i < num_slices; ++i) {
PrepareInputBuffer(input_data, input_height, input_width, fft_height,
fft_width, fft_input_output);
memset(fft_integer_working_area_data, 0, fft_integer_working_area->bytes);
memset(fft_double_working_area_data, 0, fft_double_working_area->bytes);
Rfft2dImpl(fft_height, fft_width, fft_input_output,
fft_integer_working_area_data, fft_double_working_area_data);
PrepareOutputBuffer(output_data, fft_height, fft_width, fft_input_output);
input_data += input_slice_size;
output_data += output_slice_size;
}
for (int i = 0; i < fft_height; ++i) {
delete[] fft_input_output[i];
}
delete[] fft_input_output;
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
const TfLiteTensor* fft_length;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kFftLengthTensor, &fft_length));
const int32_t* fft_length_data = GetTensorData<int32_t>(fft_length);
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (output->type != kTfLiteComplex64) {
TF_LITE_KERNEL_LOG(context,
"Type '%s' for output is not supported by rfft2d.",
TfLiteTypeGetName(output->type));
return kTfLiteError;
}
if (!IsConstantTensor(fft_length)) {
TF_LITE_ENSURE_STATUS(ResizeOutputandTemporaryTensors(context, node));
} else {
int num_dims_output = NumDimensions(output);
const RuntimeShape output_shape = GetTensorShape(output);
TF_LITE_ENSURE_EQ(context, num_dims_output, NumDimensions(input));
TF_LITE_ENSURE(context, num_dims_output >= 2);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 2),
fft_length_data[0]);
TF_LITE_ENSURE_EQ(context, output_shape.Dims(num_dims_output - 1),
fft_length_data[1] / 2 + 1);
}
return Rfft2dHelper(context, node);
}
}
TfLiteRegistration* Register_RFFT2D() {
static TfLiteRegistration r = {rfft2d::Init, rfft2d::Free, rfft2d::Prepare,
rfft2d::Eval};
return &r;
}
}
}
} | #include <stdint.h>
#include <complex>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
using std::complex;
using ::testing::ElementsAreArray;
class Rfft2dOpModel : public SingleOpModel {
public:
Rfft2dOpModel(const TensorData& input, const TensorData& fft_lengths) {
input_ = AddInput(input);
fft_lengths_ = AddInput(fft_lengths);
TensorType output_type = TensorType_COMPLEX64;
output_ = AddOutput({output_type, {}});
SetBuiltinOp(BuiltinOperator_RFFT2D, BuiltinOptions_Rfft2dOptions,
CreateRfft2dOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
}
int input() { return input_; }
int fft_lengths() { return fft_lengths_; }
std::vector<complex<float>> GetOutput() {
return ExtractVector<complex<float>>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
private:
int input_;
int fft_lengths_;
int output_;
};
TEST(Rfft2dOpTest, FftLengthMatchesInputSize) {
Rfft2dOpModel model({TensorType_FLOAT32, {4, 4}}, {TensorType_INT32, {2}});
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4,
3, 8, 6, 3,
5, 2, 7, 6,
9, 5, 8, 3});
model.PopulateTensor<int32_t>(model.fft_lengths(), {4, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::complex<float> expected_result[12] = {
{75, 0}, {-6, -1}, {9, 0}, {-10, 5}, {-3, 2}, {-6, 11},
{-15, 0}, {-2, 13}, {-5, 0}, {-10, -5}, {3, -6}, {-6, -11}};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Rfft2dOpTest, FftLengthSmallerThanInputSize) {
Rfft2dOpModel model({TensorType_FLOAT32, {4, 5}}, {TensorType_INT32, {2}});
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4, 0,
3, 8, 6, 3, 0,
5, 2, 7, 6, 0,
9, 5, 8, 3, 0});
model.PopulateTensor<int32_t>(model.fft_lengths(), {4, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::complex<float> expected_result[12] = {
{75, 0}, {-6, -1}, {9, 0}, {-10, 5}, {-3, 2}, {-6, 11},
{-15, 0}, {-2, 13}, {-5, 0}, {-10, -5}, {3, -6}, {-6, -11}};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Rfft2dOpTest, FftLengthGreaterThanInputSize) {
Rfft2dOpModel model({TensorType_FLOAT32, {3, 4}}, {TensorType_INT32, {2}});
model.PopulateTensor<float>(model.input(),
{1, 2, 3, 4,
3, 8, 6, 3,
5, 2, 7, 6});
model.PopulateTensor<int32_t>(model.fft_lengths(), {4, 8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::complex<float> expected_result[20] = {
{50, 0}, {8.29289341, -33.6776695}, {-7, 1}, {9.70710659, -1.67766953},
{0, 0},
{-10, -20}, {-16.3639603, -1.12132037}, {-5, 1}, {-7.19238806, -2.05025244},
{-6, 2},
{10, 0}, {-4.7781744, -6.12132025}, {-1, 11}, {10.7781744, 1.87867963},
{4, 0},
{-10, 20}, {11.1923885, 11.9497471}, {5, -5}, {-3.63603902, -3.12132025},
{-6, -2}};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
TEST(Rfft2dOpTest, InputDimsGreaterThan2) {
Rfft2dOpModel model({TensorType_FLOAT32, {2, 2, 4}}, {TensorType_INT32, {2}});
model.PopulateTensor<float>(model.input(),
{1., 2., 3., 4.,
3., 8., 6., 3.,
5., 2., 7., 6.,
7., 3., 23., 5.});
model.PopulateTensor<int32_t>(model.fft_lengths(), {2, 4});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
std::complex<float> expected_result[12] = {
{30., 0.}, {-5, -3.}, { -4., 0.},
{-10., 0.}, {1., 7.}, { 0., 0.},
{58., 0.}, {-18., 6.}, { 26., 0.},
{-18., 0.}, { 14., 2.}, {-18., 0.}};
EXPECT_THAT(model.GetOutput(), ElementsAreArray(expected_result));
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rfft2d.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/rfft2d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
411d864e-9c80-468d-8190-fd508633bd99 | cpp | tensorflow/tensorflow | quantization_utils | tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc | tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantization_utils_test.cc | #include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <string>
#include <vector>
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Quant/IR/QuantTypes.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Diagnostics.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantizeUtils.h"
#include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/portable_tensor_utils.h"
#include "tensorflow/compiler/mlir/quantization/common/ir/FakeQuantSupport.h"
#include "tensorflow/compiler/mlir/quantization/common/ir/UniformSupport.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_traits.h"
#include "tensorflow/compiler/mlir/tools/optimize/quantization_utils.h"
namespace mlir {
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_interface.cc.inc"
namespace quant {
namespace {
constexpr double kSmallestHalfRange = kNearZeroTolerance / 2;
using QType = quant::QuantizedType;
template <typename T>
bool BroadcastVector(int target_size, SmallVectorImpl<T>& data) {
const int size = data.size();
if (size != target_size) {
if (target_size % size != 0) return true;
data.reserve(target_size);
for (int i = 1; i < target_size / size; ++i) {
data.insert(data.end(), data.begin(), data.begin() + size);
}
}
return false;
}
void ExpandVerySmallRange(const ArrayRef<double> mins,
const ArrayRef<double> maxs,
SmallVectorImpl<double>& effective_mins,
SmallVectorImpl<double>& effective_maxs) {
for (const auto [min, max] : llvm::zip(mins, maxs)) {
if (max - min > kNearZeroTolerance) {
effective_mins.push_back(min);
effective_maxs.push_back(max);
} else {
effective_mins.push_back(std::min(min, -kSmallestHalfRange));
effective_maxs.push_back(std::max(max, kSmallestHalfRange));
}
}
}
QuantizedType ResetMinMaxFromNumBits(const QuantizedType type,
const int num_bits,
const bool narrow_range,
const bool is_signed) {
if (num_bits >= 8) {
return type;
}
int64_t qmin = QType::getDefaultMinimumForInteger(is_signed, num_bits);
int64_t qmax = QType::getDefaultMaximumForInteger(is_signed, num_bits);
if (narrow_range) {
qmin += 1;
}
const int64_t storage_type_min = type.getStorageTypeMin();
const int64_t storage_type_max = type.getStorageTypeMax();
const double rate =
static_cast<double>(storage_type_max - storage_type_min) / (qmax - qmin);
const auto& recalculate_scale = [&](double scale) -> double {
return scale * rate;
};
const auto& recalculate_zero_point = [&](int64_t zero_point) -> int64_t {
return qmax - std::round((storage_type_max - zero_point) / rate);
};
if (auto q_type = dyn_cast<UniformQuantizedType>(type)) {
const double scale = recalculate_scale(q_type.getScale());
const double zero_point = recalculate_zero_point(q_type.getZeroPoint());
return UniformQuantizedType::get(q_type.getFlags(), q_type.getStorageType(),
q_type.getExpressedType(), scale,
zero_point, qmin, qmax);
} else if (auto q_type = dyn_cast<quant::UniformQuantizedPerAxisType>(type)) {
const int size = q_type.getScales().size();
SmallVector<double, 4> scales(size);
SmallVector<int64_t, 4> zero_points(size);
for (int i = 0; i < size; ++i) {
scales[i] = recalculate_scale(q_type.getScales()[i]);
zero_points[i] = recalculate_zero_point(q_type.getZeroPoints()[i]);
}
return quant::UniformQuantizedPerAxisType::get(
q_type.getFlags(), q_type.getStorageType(), q_type.getExpressedType(),
scales, zero_points, q_type.getQuantizedDimension(), qmin, qmax);
} else {
llvm_unreachable("Unsupported QuantizedType in ResetMinMaxFromNumBits");
}
return type;
}
quant::UniformQuantizedPerAxisType ResetAxisAndBroadcast(
const ArrayRef<int64_t> shape,
const quant::UniformQuantizedPerAxisType qtype, const Type target,
const int quant_dim) {
const auto shaped = dyn_cast<RankedTensorType>(target);
if (!shaped) return {};
const ArrayRef<int64_t> new_shape = shaped.getShape();
SmallVector<double, 4> scales(qtype.getScales().begin(),
qtype.getScales().end());
SmallVector<int64_t, 4> zero_points(qtype.getZeroPoints().begin(),
qtype.getZeroPoints().end());
if (new_shape.size() == shape.size()) {
if (BroadcastVector<double>(shaped.getDimSize(quant_dim), scales) ||
BroadcastVector<int64_t>(shaped.getDimSize(quant_dim), zero_points)) {
return {};
}
} else if ((new_shape.size() == shape.size() + 1) && new_shape.front() == 1) {
if (!(std::equal(shape.begin(), shape.end(), new_shape.begin() + 1) &&
quant_dim == new_shape.size() - 1)) {
return {};
}
} else {
return {};
}
return quant::UniformQuantizedPerAxisType::get(
qtype.getFlags(), qtype.getStorageType(), qtype.getExpressedType(),
scales, zero_points, quant_dim, qtype.getStorageTypeMin(),
qtype.getStorageTypeMax());
}
}
bool IsOpQuantizable(Operation* op) {
if (isa<func::ConstantOp, arith::ConstantOp, quantfork::StatisticsOp>(op)) {
return true;
} else if (op->hasTrait<OpTrait::IsTerminator>() ||
isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(op)) {
return false;
}
const bool attr_enforced_quantizable =
op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) &&
op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue().str() ==
QuantTraitValues[QuantizationTrait::FullyQuantizable];
const bool trait_enforced_quantizable =
op->hasTrait<OpTrait::quant::QuantizableResult>();
return attr_enforced_quantizable || trait_enforced_quantizable;
}
Type GetQuantizedType(Builder builder, const Type input_type,
const ArrayRef<double> min, const ArrayRef<double> max,
const int quant_dim, const int storage_type_width,
const bool narrow_range, const bool is_signed,
const bool legacy_float_scale,
const bool use_fake_quant_num_bits) {
auto converter =
quantfork::ExpressedToQuantizedConverter::forInputType(input_type);
SmallVector<double, 4> effective_mins, effective_maxs;
ExpandVerySmallRange(min, max, effective_mins, effective_maxs);
quant::QuantizedType quantized_element_type;
if (min.size() == 1 && max.size() == 1 && quant_dim == -1) {
quantized_element_type = quantfork::fakeQuantAttrsToType(
builder.getUnknownLoc(), storage_type_width, effective_mins[0],
effective_maxs[0], narrow_range, converter.expressed_type, is_signed);
if (legacy_float_scale) {
quantized_element_type =
DownCastScale(quantized_element_type, effective_mins[0],
effective_maxs[0], builder.getUnknownLoc());
}
} else if (min.size() == max.size()) {
auto shape = dyn_cast<ShapedType>(input_type);
if (!shape || shape.getRank() <= quant_dim ||
static_cast<int64_t>(min.size()) != shape.getDimSize(quant_dim)) {
return {};
}
quantized_element_type = quantfork::fakeQuantAttrsToType(
builder.getUnknownLoc(), storage_type_width, quant_dim, effective_mins,
effective_maxs, narrow_range, converter.expressed_type, is_signed);
if (legacy_float_scale) {
quantized_element_type =
DownCastScale(quantized_element_type, effective_mins, effective_maxs,
builder.getUnknownLoc());
}
}
if (!quantized_element_type) return {};
if (use_fake_quant_num_bits && storage_type_width > 1 &&
storage_type_width < 8 &&
quantized_element_type.getStorageTypeMax() >
QType::getDefaultMinimumForInteger(is_signed, storage_type_width)) {
const auto resetEleType = ResetMinMaxFromNumBits(
quantized_element_type, storage_type_width, narrow_range, is_signed);
return converter.convert(resetEleType);
}
return converter.convert(quantized_element_type);
}
TypeAttr RescaleQuantizedType(const Type input, const Attribute factor) {
const auto factor_values = dyn_cast_or_null<DenseFPElementsAttr>(factor);
if (!factor_values) return {};
const auto element_type =
quant::QuantizedType::getQuantizedElementType(input);
if (!element_type) return {};
if (auto qtype = dyn_cast<quant::UniformQuantizedPerAxisType>(element_type)) {
const ArrayRef<double> scales = qtype.getScales();
if (static_cast<int64_t>(scales.size()) != factor_values.getNumElements())
return {};
SmallVector<double, 4> new_scales;
new_scales.reserve(scales.size());
auto scales_iter = scales.begin();
for (const auto& f : factor_values) {
new_scales.push_back(*scales_iter *
std::fabs(FloatAttr::getValueAsDouble(f)));
++scales_iter;
}
auto new_ele_type = quant::UniformQuantizedPerAxisType::get(
qtype.getFlags(), qtype.getStorageType(), qtype.getExpressedType(),
new_scales, qtype.getZeroPoints(), qtype.getQuantizedDimension(),
qtype.getStorageTypeMin(), qtype.getStorageTypeMax());
if (const auto new_type = new_ele_type.castFromExpressedType(
quant::QuantizedType::castToExpressedType(input))) {
return TypeAttr::get(new_type);
}
}
return {};
}
TypeAttr GetQuantizedTypeAttr(const Builder builder, const Type input_type,
const Attribute min, const Attribute max,
const int quant_dim, const IntegerAttr num_bits,
const BoolAttr narrow_range, const bool is_signed,
const bool legacy_float_scale,
const bool use_fake_quant_num_bits) {
SmallVector<double, 4> min_value, max_value;
const auto mins = dyn_cast<DenseFPElementsAttr>(min);
const auto maxs = dyn_cast<DenseFPElementsAttr>(max);
if (mins && maxs) {
min_value.reserve(mins.getNumElements());
max_value.reserve(maxs.getNumElements());
for (auto it = mins.begin(); it != mins.end(); ++it) {
min_value.push_back(FloatAttr::getValueAsDouble(*it));
}
for (auto it = maxs.begin(); it != maxs.end(); ++it) {
max_value.push_back(FloatAttr::getValueAsDouble(*it));
}
} else {
const auto fmin = dyn_cast<FloatAttr>(min);
const auto fmax = dyn_cast<FloatAttr>(max);
if (fmin && fmax) {
min_value.push_back(fmin.getValueAsDouble());
max_value.push_back(fmax.getValueAsDouble());
} else {
return {};
}
}
const Type final_type =
GetQuantizedType(builder, input_type, min_value, max_value, quant_dim,
num_bits.getInt(), narrow_range.getValue(), is_signed,
legacy_float_scale, use_fake_quant_num_bits);
if (!final_type) return {};
return TypeAttr::get(final_type);
}
TypeAttr CastQuantizedTypeAttrFromExpressedType(const Builder builder,
const TypeAttr source,
const Type target,
const int axis) {
const auto source_type = dyn_cast_or_null<ShapedType>(source.getValue());
if (!source_type) return {};
const auto src_ele_type = source_type.getElementType();
auto qtype = dyn_cast<quant::QuantizedType>(src_ele_type);
if (const auto per_axis =
dyn_cast_or_null<quant::UniformQuantizedPerAxisType>(qtype)) {
if (axis == -1) return {};
qtype =
ResetAxisAndBroadcast(source_type.getShape(), per_axis, target, axis);
}
if (!qtype) return {};
const Type final_type = qtype.castFromExpressedType(target);
if (!final_type) return {};
return TypeAttr::get(final_type);
}
void ExtractMinMaxFromAttr(const DenseFPElementsAttr values, const int dim_size,
const int slice_size, bool symmetric,
SmallVectorImpl<double>& mins,
SmallVectorImpl<double>& maxs) {
if (values.isSplat()) {
const double single_value =
FloatAttr::getValueAsDouble(values.getSplatValue<llvm::APFloat>());
if (single_value < 0.0) {
mins[0] = single_value;
maxs[0] = symmetric ? -single_value : 0.0;
} else if (single_value > 0.0) {
mins[0] = symmetric ? -single_value : 0.0;
maxs[0] = single_value;
} else {
mins[0] = maxs[0] = single_value;
}
for (int i = 1; i < dim_size; ++i) {
mins[i] = mins[0];
maxs[i] = maxs[0];
}
} else {
int64_t flatten_index = 0;
auto begin = values.begin();
auto end = values.end();
for (auto it = begin; it != end; ++it, ++flatten_index) {
const double ele_value = FloatAttr::getValueAsDouble(*it);
const int slice_index = flatten_index / slice_size;
const int channel_index = slice_index % dim_size;
mins[channel_index] = std::min(mins[channel_index], ele_value);
maxs[channel_index] = std::max(maxs[channel_index], ele_value);
}
for (int i = 0; i < dim_size; ++i) {
maxs[i] = std::max(maxs[i], 0.0);
mins[i] = std::min(mins[i], 0.0);
}
if (symmetric) {
for (int i = 0; i < dim_size; ++i) {
maxs[i] = std::max(std::abs(mins[i]), std::abs(maxs[i]));
mins[i] = -maxs[i];
}
}
}
}
Type GetUniformQuantizedTypeForWeight(
const ElementsAttr attr, const bool symmetric, const unsigned num_bits,
const bool is_signed, const bool narrow_range,
const bool legacy_float_scale, const bool use_fake_quant_num_bits) {
const Builder builder(attr.getContext());
if (symmetric && (!is_signed || !narrow_range)) return {};
SmallVector<double, 4> mins(1, std::numeric_limits<double>::max());
SmallVector<double, 4> maxs(1, std::numeric_limits<double>::min());
const auto fp = dyn_cast<DenseFPElementsAttr>(attr);
if (!fp) return {};
ExtractMinMaxFromAttr(fp, 1, 1, symmetric, mins,
maxs);
const auto type =
GetQuantizedType(builder, attr.getType(), mins[0], maxs[0],
-1, num_bits, narrow_range, is_signed,
legacy_float_scale, use_fake_quant_num_bits);
if (const auto ele_type = dyn_cast_or_null<TensorType>(type))
return ele_type.getElementType();
return {};
}
Type GetUniformQuantizedPerAxisTypeForWeight(
const ElementsAttr attr, const int quant_dim, const bool symmetric,
const unsigned num_bits, const bool is_signed, const bool narrow_range,
const bool legacy_float_scale, const bool use_fake_quant_num_bits) {
const Builder builder(attr.getContext());
const auto shape = cast<ShapedType>(attr.getType()).getShape();
if (static_cast<int>(shape.size()) <= quant_dim) return {};
if (symmetric && (!is_signed || !narrow_range)) return {};
const int dim_size = shape[quant_dim];
const int slice_size =
std::accumulate(std::next(shape.begin(), quant_dim + 1), shape.end(), 1,
std::multiplies<int64_t>());
SmallVector<double, 4> mins(dim_size, std::numeric_limits<double>::max());
SmallVector<double, 4> maxs(dim_size, std::numeric_limits<double>::min());
const auto fp = dyn_cast<DenseFPElementsAttr>(attr);
if (!fp) return {};
ExtractMinMaxFromAttr(fp, dim_size, slice_size, symmetric, mins, maxs);
const auto type = GetQuantizedType(
builder, attr.getType(), mins, maxs, quant_dim, num_bits, narrow_range,
is_signed, legacy_float_scale, use_fake_quant_num_bits);
if (auto ele_type = dyn_cast_or_null<TensorType>(type))
return ele_type.getElementType();
return {};
}
quant::QuantizedType GetUniformQuantizedTypeForBias(
const std::vector<quant::QuantizedType>& op_types,
const int adjusted_quant_dim, const bool legacy_float_scale) {
if (op_types.empty()) return {};
size_t axis_size = 1;
int32_t quant_dim = -1;
Type expressed_type;
for (const auto op_type : op_types) {
if (!op_type) return {};
if (expressed_type && expressed_type != op_type.getExpressedType()) {
return {};
}
expressed_type = op_type.getExpressedType();
if (const auto type =
dyn_cast<quant::UniformQuantizedPerAxisType>(op_type)) {
if (axis_size != 1 && axis_size != type.getScales().size()) return {};
if (quant_dim != -1 && quant_dim != type.getQuantizedDimension())
return {};
axis_size = type.getScales().size();
quant_dim = type.getQuantizedDimension();
} else if (!isa<quant::UniformQuantizedType>(op_type)) {
return {};
}
}
SmallVector<double, 4> scales(axis_size, 1.0);
for (const auto op_type : op_types) {
if (const auto type =
dyn_cast<quant::UniformQuantizedPerAxisType>(op_type)) {
for (const auto& index_scale : llvm::enumerate(type.getScales())) {
scales[index_scale.index()] *= index_scale.value();
}
} else if (const auto type =
dyn_cast<quant::UniformQuantizedType>(op_type)) {
for (int index = 0; index < axis_size; ++index) {
scales[index] *= type.getScale();
}
}
}
if (legacy_float_scale) {
for (int i = 0; i < scales.size(); ++i) {
scales[i] = static_cast<float>(scales[i]);
}
}
Builder builder(expressed_type.getContext());
const IntegerType storage_type = builder.getIntegerType(32);
const int64_t storage_type_min =
quant::QuantizedType::getDefaultMinimumForInteger(true, 32);
const int64_t storage_type_max =
quant::QuantizedType::getDefaultMaximumForInteger(true, 32);
if (axis_size == 1) {
return quant::UniformQuantizedType::getChecked(
builder.getUnknownLoc(),
true, storage_type, expressed_type, scales[0],
0, storage_type_min, storage_type_max);
} else {
SmallVector<int64_t, 4> zero_points(axis_size, 0);
return quant::UniformQuantizedPerAxisType::getChecked(
builder.getUnknownLoc(),
true, storage_type, expressed_type, scales, zero_points,
std::max(adjusted_quant_dim, 0),
storage_type_min, storage_type_max);
}
}
ElementsAttr QuantizeLegacy(const Attribute real_value,
const Type tensor_type) {
if (!isa<DenseFPElementsAttr>(real_value) ||
!quant::QuantizedType::getQuantizedElementType(tensor_type)) {
return {};
}
const auto real_values_attr = cast<DenseFPElementsAttr>(real_value);
auto q_type = quant::QuantizedType::getQuantizedElementType(tensor_type);
std::vector<float> real_values;
SmallVector<APInt, 8> quantized_attr;
real_values.reserve(real_values_attr.getNumElements());
quantized_attr.reserve(real_values_attr.getNumElements());
std::transform(real_values_attr.begin(), real_values_attr.end(),
std::back_inserter(real_values), [&](APFloat value) -> float {
return value.convertToFloat();
});
const ShapedType new_dense_type = dyn_cast_or_null<ShapedType>(
q_type.castExpressedToStorageType(real_values_attr.getType()));
const int width = dyn_cast<IntegerType>(q_type.getStorageType()).getWidth();
if (width == 8 && q_type.getStorageTypeMax() == 127 &&
q_type.getStorageTypeMin() == -127) {
std::vector<int8_t> quantized_values(real_values_attr.getNumElements());
if (auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) {
float min, max, scale;
mlir::lite::toco_legacy::PortableSymmetricQuantizeFloats(
real_values.data(), real_values.size(), quantized_values.data(), &min,
&max, &scale);
if (std::abs(scale - uniform_type.getScale()) > 1e-3) {
return Quantize(real_value, tensor_type);
}
} else if (auto uniform_type =
dyn_cast<quant::UniformQuantizedPerAxisType>(q_type)) {
std::vector<float> scales_inv;
std::vector<int32_t> dimension;
dimension.insert(dimension.end(), new_dense_type.getShape().begin(),
new_dense_type.getShape().end());
std::transform(uniform_type.getScales().begin(),
uniform_type.getScales().end(),
std::back_inserter(scales_inv),
[](float scale) { return 1.0 / scale; });
tflite_migration::optimize::utils::SymmetricPerChannelQuantizeValues(
real_values.data(), scales_inv, dimension,
uniform_type.getQuantizedDimension(), &quantized_values);
} else {
return {};
}
std::transform(quantized_values.begin(), quantized_values.end(),
std::back_inserter(quantized_attr),
[&](int8_t value) -> APInt {
return APInt(8, value, true);
});
return DenseElementsAttr::get(new_dense_type, quantized_attr);
} else if (width == 8) {
return Quantize(real_value, tensor_type);
} else if (width == 16) {
if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) {
const auto quantized_values =
tflite_migration::optimize::utils::SymmetricQuantizeFloatsToInt16(
real_values.data(), real_values.size(), uniform_type.getScale());
std::transform(quantized_values.begin(), quantized_values.end(),
std::back_inserter(quantized_attr),
[&](int16_t value) -> APInt {
return APInt(16, value, true);
});
return DenseElementsAttr::get(new_dense_type, quantized_attr);
}
} else if (width == 32) {
std::vector<float> scales;
if (const auto uniform_type = dyn_cast<UniformQuantizedType>(q_type)) {
scales.push_back(uniform_type.getScale());
} else if (const auto uniform_type =
dyn_cast<quant::UniformQuantizedPerAxisType>(q_type)) {
scales.insert(scales.end(), uniform_type.getScales().begin(),
uniform_type.getScales().end());
} else {
return {};
}
const auto quantized_bias =
tflite_migration::optimize::utils::SymmetricBiasQuantize<std::int32_t>(
real_values.data(), real_values.size(), scales);
std::transform(quantized_bias.begin(), quantized_bias.end(),
std::back_inserter(quantized_attr),
[&](int32_t value) -> APInt {
return APInt(32, value, true);
});
return DenseElementsAttr::get(new_dense_type, quantized_attr);
}
return {};
}
ElementsAttr Quantize(const Attribute real_value, const Type tensor_type) {
if (const auto q_type =
quant::QuantizedType::getQuantizedElementType(tensor_type)) {
Type converted_type;
return dyn_cast_or_null<ElementsAttr>(
quantfork::quantizeAttr(real_value, q_type, converted_type));
}
return {};
}
quant::QuantizedType DownCastScale(QuantizedType type, double min, double max,
Location loc) {
const SmallVector<double, 1> mins = {min};
const SmallVector<double, 1> maxs = {max};
return DownCastScale(type, mins, maxs, loc);
}
quant::QuantizedType DownCastScale(QuantizedType type,
const SmallVectorImpl<double>& mins,
const SmallVectorImpl<double>& maxs,
Location loc) {
if (!type) return type;
SmallVector<double, 4> scales(mins.size());
SmallVector<int64_t, 4> zero_points(mins.size());
if (auto q_type = dyn_cast<UniformQuantizedType>(type)) {
zero_points.push_back(q_type.getZeroPoint());
} else if (auto q_type = dyn_cast<quant::UniformQuantizedPerAxisType>(type)) {
zero_points = {q_type.getZeroPoints().begin(),
q_type.getZeroPoints().end()};
}
for (int i = 0; i < mins.size(); ++i) {
scales[i] = (static_cast<float>(maxs[i]) - static_cast<float>(mins[i])) /
(type.getStorageTypeMax() - type.getStorageTypeMin());
if (type.getStorageTypeMax() != -type.getStorageTypeMin()) {
const float zero_point_from_min =
type.getStorageTypeMin() - mins[i] / scales[i];
if (zero_point_from_min < type.getStorageTypeMin()) {
zero_points[i] = static_cast<int64_t>(type.getStorageTypeMin());
} else if (zero_point_from_min > type.getStorageTypeMax()) {
zero_points[i] = static_cast<int64_t>(type.getStorageTypeMax());
} else {
zero_points[i] = static_cast<int64_t>(std::round(zero_point_from_min));
}
}
}
if (auto q_type = dyn_cast<UniformQuantizedType>(type)) {
return UniformQuantizedType::get(q_type.getFlags(), q_type.getStorageType(),
q_type.getExpressedType(), scales[0],
zero_points[0], q_type.getStorageTypeMin(),
q_type.getStorageTypeMax());
} else if (auto q_type = dyn_cast<quant::UniformQuantizedPerAxisType>(type)) {
return quant::UniformQuantizedPerAxisType::get(
q_type.getFlags(), q_type.getStorageType(), q_type.getExpressedType(),
scales, zero_points, q_type.getQuantizedDimension(),
q_type.getStorageTypeMin(), q_type.getStorageTypeMax());
}
return type;
}
static bool PreferResultScale(Operation* op) {
int float_operands = 0;
for (auto operand : op->getOperands()) {
if (auto operand_type = dyn_cast<ShapedType>(operand.getType())) {
if (isa<FloatType>(operand_type.getElementType())) {
if (++float_operands > 1) return true;
}
}
}
return false;
}
std::unique_ptr<OpQuantScaleSpec> GetDefaultQuantScaleSpec(Operation* op) {
auto spec = std::make_unique<OpQuantScaleSpec>();
if (isa<SameScalesOpInterface>(op)) {
spec->has_same_scale_requirement = true;
spec->required_same_scale_func = [op](const bool sign,
const int bit_width) {
return cast<SameScalesOpInterface>(op)
.RequiredSameOperandsAndResultsScale(sign, bit_width);
};
spec->required_same_quantized_axes_func = [op]() {
return cast<SameScalesOpInterface>(op).RequiredSameQuantizedAxes();
};
}
if (isa<FixedOutputRangeInterface>(op)) {
spec->has_fixed_output_range = true;
spec->fixed_output_range_func = [op](bool sign, int bit_width) {
return cast<FixedOutputRangeInterface>(op).GetFixedOutputRange(sign,
bit_width);
};
}
return spec;
}
static bool IsStatsRedundant(
Operation* op, const OpQuantSpecGetter op_quant_spec_getter,
const OpQuantScaleSpecGetter op_quant_scale_spec_getter) {
return isa<FixedOutputRangeInterface>(op) ||
op_quant_scale_spec_getter(op)->has_fixed_output_range;
}
static bool IsSameScaleOp(
Operation* op, const OpQuantScaleSpecGetter op_quant_scale_spec_getter) {
return dyn_cast<SameScalesOpInterface>(op) ||
op_quant_scale_spec_getter(op)->has_same_scale_requirement;
}
bool RemoveRedundantStatsOps(
func::FuncOp func, const OpQuantSpecGetter op_quant_spec_getter,
const OpQuantScaleSpecGetter op_quant_scale_spec_getter) {
SmallVector<quantfork::StatisticsOp, 16> all_stats_ops;
llvm::DenseSet<Operation*> redundant_stats_ops;
func.walk([&](quantfork::QuantizeCastOp q) {
auto input_op = q.getArg().getDefiningOp();
if (auto stats = dyn_cast_or_null<quantfork::StatisticsOp>(input_op)) {
q.setOperand(stats.getArg());
if (stats.use_empty()) stats.erase();
}
});
func.walk([&](quantfork::StatisticsOp stats_op) {
all_stats_ops.push_back(stats_op);
});
while (!all_stats_ops.empty()) {
quantfork::StatisticsOp stats_op = all_stats_ops.back();
all_stats_ops.pop_back();
if (auto def = stats_op.getArg().getDefiningOp()) {
if (IsStatsRedundant(def, op_quant_spec_getter,
op_quant_scale_spec_getter)) {
redundant_stats_ops.insert(stats_op);
}
}
for (Operation* user : stats_op.getResult().getUsers()) {
if (!IsSameScaleOp(user, op_quant_scale_spec_getter) ||
PreferResultScale(user)) {
continue;
}
for (Value res : user->getResults()) {
if (!res.hasOneUse()) {
continue;
}
if (auto next_stats =
dyn_cast<quantfork::StatisticsOp>(*res.getUsers().begin())) {
redundant_stats_ops.insert(next_stats);
all_stats_ops.push_back(next_stats);
}
}
}
}
func.walk([&](quantfork::StatisticsOp stats_op) {
if (redundant_stats_ops.find(stats_op) == redundant_stats_ops.end()) {
all_stats_ops.push_back(stats_op);
}
});
while (!all_stats_ops.empty()) {
quantfork::StatisticsOp stats_op = all_stats_ops.back();
all_stats_ops.pop_back();
if (Operation* def = stats_op.getArg().getDefiningOp()) {
if (!IsSameScaleOp(def, op_quant_scale_spec_getter)) {
continue;
}
for (Value input : def->getOperands()) {
if (auto next_stats = dyn_cast_or_null<quantfork::StatisticsOp>(
input.getDefiningOp())) {
redundant_stats_ops.insert(next_stats);
all_stats_ops.push_back(next_stats);
}
}
}
}
for (Operation* it : redundant_stats_ops) {
if (!isa<quantfork::StatisticsOp>(it)) return true;
auto stats_op = cast<quantfork::StatisticsOp>(it);
stats_op.getResult().replaceAllUsesWith(stats_op.getArg());
stats_op.erase();
}
return false;
}
LogicalResult VerifySameScales(Operation* op) {
auto same_scale_op = cast<SameScalesOpInterface>(op);
SmallVector<QuantizedType, 4> collected_quant_params;
for (Value input : op->getOperands()) {
QuantizedType quant_params =
QuantizedType::getQuantizedElementType(input.getType());
if (quant_params) {
collected_quant_params.push_back(quant_params);
}
}
for (Value output : op->getResults()) {
const QuantizedType quant_params =
QuantizedType::getQuantizedElementType(output.getType());
if (quant_params) {
collected_quant_params.push_back(quant_params);
}
}
if (collected_quant_params.size() <= 1) return success();
const auto& expected_params = collected_quant_params[0];
for (int i = 1; i < collected_quant_params.size(); ++i) {
const auto& compared_params = collected_quant_params[i];
if (!same_scale_op.RequiredSameQuantizedAxes()) {
const auto expected_per_axis_qtype =
dyn_cast<quant::UniformQuantizedPerAxisType>(expected_params);
const auto compared_per_axis_qtype =
dyn_cast<quant::UniformQuantizedPerAxisType>(compared_params);
if (expected_per_axis_qtype && compared_per_axis_qtype &&
llvm::equal(expected_per_axis_qtype.getScales(),
compared_per_axis_qtype.getScales()) &&
llvm::equal(expected_per_axis_qtype.getZeroPoints(),
compared_per_axis_qtype.getZeroPoints()) &&
expected_params.getStorageType() ==
compared_params.getStorageType() &&
expected_params.getExpressedType() ==
compared_params.getExpressedType()) {
continue;
}
}
if (expected_params == compared_params) continue;
if (expected_params.isSigned() == compared_params.isSigned() &&
expected_params.getStorageTypeIntegralWidth() ==
compared_params.getStorageTypeIntegralWidth() &&
!same_scale_op.RequiredSameOperandsAndResultsScale(
expected_params.isSigned(),
expected_params.getStorageTypeIntegralWidth()))
continue;
std::string err_msg =
"quantization parameters violate the same scale constraint: ";
llvm::raw_string_ostream os(err_msg);
expected_params.print(os);
os << " vs. ";
compared_params.print(os);
os.flush();
return op->emitOpError(err_msg);
}
return success();
}
quant::UniformQuantizedType GetFixedOutputRange(
const bool is_signed, const int bit_width, const Type tensor_type,
const double scale, int64_t zero_point, int64_t storage_min,
int64_t storage_max) {
const auto result_type = cast<ShapedType>(tensor_type);
if (!isa<FloatType>(result_type.getElementType())) return {};
Builder builder(result_type.getContext());
if (bit_width != 8 && bit_width != 16) return {};
const IntegerType storage_type = builder.getIntegerType(bit_width);
if (!is_signed && bit_width == 8) {
zero_point += 128;
storage_min += 128;
storage_max += 128;
}
return quant::UniformQuantizedType::getChecked(
builder.getUnknownLoc(), is_signed, storage_type,
result_type.getElementType(), scale, zero_point, storage_min,
storage_max);
}
quant::UniformQuantizedType GetFixedOutputRange(const bool is_signed,
const int bit_width,
const Type tensor_type,
const double scale,
const int64_t zero_point) {
return GetFixedOutputRange(is_signed, bit_width, tensor_type, scale,
zero_point,
-(1 << (bit_width - 1)),
(1 << (bit_width - 1)) - 1);
}
Type ConvertSignedQuantizedToUnsigned(const Type signed_tensor_type,
const Location loc) {
const auto qtype = QType::getQuantizedElementType(signed_tensor_type);
if (!qtype || !qtype.isSigned()) return {};
const int num_bits = qtype.getStorageTypeIntegralWidth();
const int64_t offset =
QType::getDefaultMinimumForInteger(true, num_bits) -
QType::getDefaultMinimumForInteger(false, num_bits);
const auto flags = !quant::QuantizationFlags::Signed;
QType new_qtype;
if (auto uqtype = dyn_cast<quant::UniformQuantizedType>(qtype)) {
new_qtype = quant::UniformQuantizedType::getChecked(
loc, flags, qtype.getStorageType(), qtype.getExpressedType(),
uqtype.getScale(), uqtype.getZeroPoint() - offset,
uqtype.getStorageTypeMin() - offset,
uqtype.getStorageTypeMax() - offset);
} else if (auto aqtype =
dyn_cast<quant::UniformQuantizedPerAxisType>(qtype)) {
const auto zero_points = aqtype.getZeroPoints();
SmallVector<int64_t, 4> new_zero_points(zero_points.begin(),
zero_points.end());
for (int i = 0; i < new_zero_points.size(); ++i) {
new_zero_points[i] -= offset;
}
new_qtype = quant::UniformQuantizedPerAxisType::getChecked(
loc, flags, qtype.getStorageType(), qtype.getExpressedType(),
aqtype.getScales(), new_zero_points, aqtype.getQuantizedDimension(),
aqtype.getStorageTypeMin() - offset,
aqtype.getStorageTypeMax() - offset);
}
return new_qtype.castFromExpressedType(
QType::castToExpressedType(signed_tensor_type));
}
LogicalResult RemoveDebugAttrPattern::matchAndRewrite(
Operation* op, PatternRewriter& rewriter) const {
return success(
op->removeAttr(kDebugModeOpQuantAttrName) ||
op->removeAttr(kDebugModeOpFloatAttrName));
}
}
} | #include "tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantization_utils.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorflow/compiler/mlir/lite/core/absl_error_model_builder.h"
#include "tensorflow/compiler/mlir/lite/quantization/lite/test_util.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_generated.h"
#include "tensorflow/compiler/mlir/lite/schema/schema_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/tsl/util/command_line_flags.h"
#include "tsl/platform/init_main.h"
#include "tsl/platform/path.h"
namespace {
std::string* g_test_model_dir = nullptr;
}
namespace mlir {
namespace lite {
namespace toco_legacy {
namespace {
using mlir::TFL::FlatBufferModelAbslError;
using tflite::BuiltinOperator_CONV_2D;
using tflite::QuantizationParametersT;
using tflite::SubGraphT;
using tflite::TensorT;
using tflite::TensorType_FLOAT16;
using tflite::TensorType_FLOAT32;
using tflite::TensorType_INT8;
std::unique_ptr<FlatBufferModelAbslError> ReadModel(const char* model) {
auto model_path = tsl::io::JoinPath(*g_test_model_dir, model);
return FlatBufferModelAbslError::BuildFromFile(model_path.c_str());
}
std::unique_ptr<FlatBufferModelAbslError> ReadConvModel() {
return ReadModel(mlir::lite::internal::kConvModelWith0Plus10Weights);
}
using ::testing::ElementsAreArray;
class QuantizationUtilsTest : public testing::Test {};
TEST_F(QuantizationUtilsTest, NumElements) {
TensorT tensor;
tensor.shape = {1, 2, 3, 4};
uint64_t num_elements;
TF_EXPECT_OK(NumElements(tensor, &num_elements));
EXPECT_EQ(num_elements, 1 * 2 * 3 * 4);
tensor.shape = {5};
TF_EXPECT_OK(NumElements(tensor, &num_elements));
EXPECT_EQ(num_elements, 5);
tensor.shape = {};
TF_EXPECT_OK(NumElements(tensor, &num_elements));
EXPECT_EQ(num_elements, 1);
tensor.shape = {1, 2, 3, -1};
EXPECT_EQ(NumElements(tensor, &num_elements).code(),
absl::StatusCode::kInternal);
}
TEST_F(QuantizationUtilsTest, SymmetricPerChannelQuantizationWithNullQParams) {
const std::vector<float> input = {
3.0, 2.0, 5.0, -2.0, 3.0, 2.0, 5.0, -2.0,
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
1.0, 0.0, -1.0, -2.0, -3.0, -4.0, -5.0, -6.0,
};
const int channel_index = 0;
std::vector<float> output_scales(3);
std::vector<int8_t> output_data(3 * 2 * 2 * 2);
TensorT tensor = TensorT();
tensor.quantization = nullptr;
tensor.shape = {3, 2, 2, 2};
TF_EXPECT_OK(mlir::lite::toco_legacy::SymmetricPerChannelQuantization(
&tensor, input.data(), channel_index, &output_scales, &output_data));
const std::vector<float> expected_output_scales = {0.0393700786, 0.0629921257,
0.0472440943};
const std::vector<int8_t> expected_output_data = {
76, 51, 127, -51, 76, 51, 127, -51,
16, 32, 48, 64, 79, 95, 111, 127,
21, 0, -21, -42, -64, -85, -106, -127,
};
EXPECT_THAT(output_scales, ElementsAreArray(expected_output_scales));
EXPECT_THAT(output_data, ElementsAreArray(expected_output_data));
}
TEST_F(QuantizationUtilsTest, SymmetricPerChannelQuantization) {
const std::vector<float> input = {
3.0, 2.0, 5.0, -2.0, 3.0, 2.0, 5.0, -2.0,
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
1.0, 0.0, -1.0, -2.0, -3.0, -4.0, -5.0, -6.0,
};
const int32_t channel_index = 0;
std::vector<float> output_scales(3);
std::vector<int8_t> output_data(3 * 2 * 2 * 2);
TensorT tensor = TensorT();
tensor.quantization = std::make_unique<QuantizationParametersT>();
tensor.shape = {3, 2, 2, 2};
TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax(
input.data(), tensor.shape, channel_index, tensor.quantization.get()));
const std::vector<float> expected_mins = {-2.0, 1.0, -6.0};
const std::vector<float> expected_maxs = {5.0, 8.0, 1.0};
EXPECT_THAT(tensor.quantization->min, ElementsAreArray(expected_mins));
EXPECT_THAT(tensor.quantization->max, ElementsAreArray(expected_maxs));
TF_EXPECT_OK(mlir::lite::toco_legacy::SymmetricPerChannelQuantization(
&tensor, input.data(), channel_index, &output_scales, &output_data));
const std::vector<float> expected_output_scales = {0.0393700786, 0.0629921257,
0.0472440943};
const std::vector<int8_t> expected_output_data = {
76, 51, 127, -51, 76, 51, 127, -51,
16, 32, 48, 64, 79, 95, 111, 127,
21, 0, -21, -42, -64, -85, -106, -127,
};
EXPECT_THAT(output_scales, ElementsAreArray(expected_output_scales));
EXPECT_THAT(output_data, ElementsAreArray(expected_output_data));
}
TEST_F(QuantizationUtilsTest, SymmetricPerChannelQuantization2DTensor) {
const std::vector<float> input = {
3.0, 2.0, 5.0, -2.0, 3.0, 2.0, 5.0, -2.0,
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0,
1.0, 0.0, -1.0, -2.0, -3.0, -4.0, -5.0, -6.0,
};
const int32_t channel_index = 1;
std::vector<float> output_scales(8);
std::vector<int8_t> output_data(3 * 8);
TensorT tensor = TensorT();
tensor.quantization = std::make_unique<QuantizationParametersT>();
tensor.shape = {3, 8};
TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax(
input.data(), tensor.shape, channel_index, tensor.quantization.get()));
const std::vector<float> expected_mins = {1.0, 0.0, -1.0, -2.0,
-3.0, -4.0, -5.0, -6.0};
const std::vector<float> expected_maxs = {3.0, 2.0, 5.0, 4.0,
5.0, 6.0, 7.0, 8.0};
EXPECT_THAT(tensor.quantization->min, ElementsAreArray(expected_mins));
EXPECT_THAT(tensor.quantization->max, ElementsAreArray(expected_maxs));
TF_EXPECT_OK(mlir::lite::toco_legacy::SymmetricPerChannelQuantization(
&tensor, input.data(), channel_index, &output_scales, &output_data));
const std::vector<float> expected_output_scales = {
0.02362204724, 0.01574803149, 0.03937007874, 0.03149606299,
0.03937007874, 0.04724409448, 0.05511811023, 0.06299212598};
const std::vector<int8_t> expected_output_data = {
127, 127, 127, -64, 76, 42, 91, -32,
42, 127, 76, 127, 127, 127, 127, 127,
42, 0, -25, -64, -76, -85, -91, -95,
};
EXPECT_THAT(output_scales, ElementsAreArray(expected_output_scales));
EXPECT_THAT(output_data, ElementsAreArray(expected_output_data));
}
TEST_F(QuantizationUtilsTest, SymmetricPerChannelQuantizeValues) {
const std::vector<float> input = {
13.0, 21.0,
21.0, 22.0,
31.0, 40.0,
};
const std::vector<float> scales_inv = {2, 0.5, 3};
const std::vector<int32_t> dimension = {3, 1, 1, 2};
const int channel_index = 0;
std::vector<int8_t> output_data(3 * 1 * 1 * 2);
SymmetricPerChannelQuantizeValues(input.data(), scales_inv, dimension,
channel_index, &output_data);
const std::vector<int8_t> expected_output_data = {
26, 42,
11, 11,
93, 120,
};
EXPECT_THAT(output_data, ElementsAreArray(expected_output_data));
}
TEST_F(QuantizationUtilsTest, FillPerChannelMinMax) {
const std::vector<float> input = {
13.0, 21.0,
21.0, 22.0,
31.0, 40.0,
};
QuantizationParametersT quantization_params = QuantizationParametersT();
std::vector<int> dimension = {3, 1, 1, 2};
int32_t channel_dim_idx = 0;
const std::vector<float> expected_mins = {13.0, 21.0, 31.0};
const std::vector<float> expected_maxs = {21.0, 22.0, 40.0};
TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax(
input.data(), dimension, channel_dim_idx, &quantization_params));
EXPECT_EQ(quantization_params.min, expected_mins);
EXPECT_EQ(quantization_params.max, expected_maxs);
EXPECT_EQ(quantization_params.quantized_dimension, channel_dim_idx);
}
TEST_F(QuantizationUtilsTest, FillPerChannelMinMaxFillDim3) {
const std::vector<float> input = {
13.0, 21.0, 21.0, 22.0, 31.0, 40.0,
};
QuantizationParametersT quantization_params = QuantizationParametersT();
std::vector<int> dimension = {3, 1, 1, 2};
int32_t channel_dim_idx = 3;
const std::vector<float> expected_mins = {13.0, 21.0};
const std::vector<float> expected_maxs = {31.0, 40.0};
TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax(
input.data(), dimension, channel_dim_idx, &quantization_params));
EXPECT_EQ(quantization_params.min, expected_mins);
EXPECT_EQ(quantization_params.max, expected_maxs);
EXPECT_EQ(quantization_params.quantized_dimension, channel_dim_idx);
}
TEST_F(QuantizationUtilsTest, FillPerChannelMinMax2DTensor) {
const std::vector<float> input = {
13.0, 21.0, 21.0, 22.0, 31.0, 40.0,
};
QuantizationParametersT quantization_params = QuantizationParametersT();
std::vector<int> dimension = {3, 2};
int32_t channel_dim_idx = 1;
const std::vector<float> expected_mins = {13.0, 21.0};
const std::vector<float> expected_maxs = {31.0, 40.0};
TF_EXPECT_OK(mlir::lite::toco_legacy::FillPerChannelMinMax(
input.data(), dimension, channel_dim_idx, &quantization_params));
EXPECT_EQ(quantization_params.min, expected_mins);
EXPECT_EQ(quantization_params.max, expected_maxs);
EXPECT_EQ(quantization_params.quantized_dimension, channel_dim_idx);
}
TEST_F(QuantizationUtilsTest, SymmetricQuantizeTensorNullInputs) {
EXPECT_EQ(SymmetricQuantizeTensor(nullptr, nullptr).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(QuantizationUtilsTest, SymmetricQuantizeTensorNullQuantParams) {
ASSERT_TRUE(g_test_model_dir);
ASSERT_FALSE(g_test_model_dir->empty());
auto test_model = ReadConvModel();
ASSERT_TRUE(test_model);
auto readonly_model = test_model->GetModel();
ASSERT_TRUE(readonly_model);
ASSERT_TRUE(readonly_model->subgraphs());
ASSERT_GE(readonly_model->subgraphs()->size(), 1);
tflite::ModelT model;
readonly_model->UnPackTo(&model);
auto subgraph = model.subgraphs[0].get();
auto conv_op = subgraph->operators.at(0).get();
ASSERT_EQ(
GetBuiltinCode(model.operator_codes.at(conv_op->opcode_index).get()),
BuiltinOperator_CONV_2D);
int32_t weights_tensor_idx = conv_op->inputs[1];
TensorT* weights_tensor = subgraph->tensors.at(weights_tensor_idx).get();
weights_tensor->quantization = std::make_unique<QuantizationParametersT>();
EXPECT_EQ(weights_tensor->type, TensorType_FLOAT32);
size_t float_buffer_size =
model.buffers.at(weights_tensor->buffer)->data.size();
TF_EXPECT_OK(SymmetricQuantizeTensor(&model, weights_tensor));
size_t quant_buffer_size =
model.buffers.at(weights_tensor->buffer)->data.size();
EXPECT_EQ(weights_tensor->type, TensorType_INT8);
EXPECT_EQ(quant_buffer_size * 4, float_buffer_size);
}
TEST_F(QuantizationUtilsTest, SymmetricQuantizeTensor) {
ASSERT_TRUE(g_test_model_dir);
ASSERT_FALSE(g_test_model_dir->empty());
auto test_model = ReadConvModel();
ASSERT_TRUE(test_model);
auto readonly_model = test_model->GetModel();
ASSERT_TRUE(readonly_model);
ASSERT_TRUE(readonly_model->subgraphs());
ASSERT_GE(readonly_model->subgraphs()->size(), 1);
tflite::ModelT model;
readonly_model->UnPackTo(&model);
auto subgraph = model.subgraphs[0].get();
auto conv_op = subgraph->operators.at(0).get();
ASSERT_EQ(
GetBuiltinCode(model.operator_codes.at(conv_op->opcode_index).get()),
BuiltinOperator_CONV_2D);
int32_t weights_tensor_idx = conv_op->inputs[1];
TensorT* weights_tensor = subgraph->tensors.at(weights_tensor_idx).get();
EXPECT_EQ(weights_tensor->type, TensorType_FLOAT32);
size_t float_buffer_size =
model.buffers.at(weights_tensor->buffer)->data.size();
TF_EXPECT_OK(SymmetricQuantizeTensor(&model, weights_tensor));
size_t quant_buffer_size =
model.buffers.at(weights_tensor->buffer)->data.size();
EXPECT_EQ(weights_tensor->type, TensorType_INT8);
EXPECT_EQ(quant_buffer_size * 4, float_buffer_size);
}
TEST_F(QuantizationUtilsTest, QuantizeFloat16Clamp) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto tensor = std::make_unique<TensorT>();
auto buffer = std::make_unique<tflite::BufferT>();
constexpr int kNumElements = 6;
const std::vector<float> weights = {2.0, 1.0, 65504., 65505, -65504., -99999};
auto weights_reinterpreted_data =
reinterpret_cast<const unsigned char*>(weights.data());
buffer->data.assign(weights_reinterpreted_data,
weights_reinterpreted_data + weights.size() * 4);
tensor->buffer = 0;
tensor->shape = {1, kNumElements};
model->subgraphs.push_back(std::move(subgraph));
model->subgraphs[0]->tensors.push_back(std::move(tensor));
model->buffers.push_back(std::move(buffer));
TF_EXPECT_OK(QuantizeTensorFloat16(model.get(),
model->subgraphs[0]->tensors[0].get()));
auto weightsf16 = reinterpret_cast<Eigen::half*>(
model->buffers[model->subgraphs[0]->tensors[0]->buffer]->data.data());
std::vector<float> wf32(kNumElements);
std::transform(weightsf16, weightsf16 + 6, wf32.begin(),
[](Eigen::half a) { return static_cast<float>(a); });
EXPECT_THAT(wf32,
ElementsAreArray({2.0, 1.0, 65504., 65504., -65504., -65504.}));
EXPECT_EQ(model->subgraphs[0]->tensors[0]->type, TensorType_FLOAT16);
}
TEST_F(QuantizationUtilsTest, QuantizeFloat16) {
ASSERT_TRUE(g_test_model_dir != nullptr);
ASSERT_FALSE(g_test_model_dir->empty());
auto test_model = ReadConvModel();
ASSERT_TRUE(test_model);
auto readonly_model = test_model->GetModel();
ASSERT_TRUE(readonly_model);
ASSERT_TRUE(readonly_model->subgraphs());
ASSERT_GE(readonly_model->subgraphs()->size(), 1);
tflite::ModelT model;
readonly_model->UnPackTo(&model);
auto subgraph = model.subgraphs[0].get();
auto conv_op = subgraph->operators.at(0).get();
ASSERT_EQ(
GetBuiltinCode(model.operator_codes.at(conv_op->opcode_index).get()),
BuiltinOperator_CONV_2D);
int32_t weights_tensor_idx = conv_op->inputs[1];
TensorT* weights_tensor = subgraph->tensors.at(weights_tensor_idx).get();
EXPECT_EQ(weights_tensor->type, TensorType_FLOAT32);
size_t float_buffer_size =
model.buffers.at(weights_tensor->buffer)->data.size();
TF_EXPECT_OK(QuantizeTensorFloat16(&model, weights_tensor));
size_t quant_buffer_size =
model.buffers.at(weights_tensor->buffer)->data.size();
EXPECT_EQ(weights_tensor->type, TensorType_FLOAT16);
EXPECT_EQ(quant_buffer_size * 2, float_buffer_size);
}
TEST_F(QuantizationUtilsTest, AddQuantizationParams) {
auto model = std::make_unique<ModelT>();
auto subgraph = std::make_unique<tflite::SubGraphT>();
auto tensor = std::make_unique<TensorT>();
auto buffer = std::make_unique<tflite::BufferT>();
const std::vector<float> scales = {0.5, 1.0, 1.5};
const std::vector<int64_t> zero_points = {5, 10, 15};
const int32_t quantizated_dimension = 3;
const std::vector<uint8_t> buffer_data = {1, 2, 3, 4};
const int32_t buffer_size = 4;
tensor->buffer = 0;
model->subgraphs.push_back(std::move(subgraph));
model->subgraphs[0]->tensors.push_back(std::move(tensor));
model->buffers.push_back(std::move(buffer));
TF_EXPECT_OK(AddQuantizationParams(scales, zero_points, quantizated_dimension,
buffer_data.data(), buffer_size,
TensorType_INT8, model.get(),
model->subgraphs[0]->tensors[0].get()));
EXPECT_THAT(model->subgraphs[0]->tensors[0]->quantization->scale,
ElementsAreArray(scales));
EXPECT_THAT(model->subgraphs[0]->tensors[0]->quantization->zero_point,
ElementsAreArray(zero_points));
EXPECT_THAT(model->buffers[model->subgraphs[0]->tensors[0]->buffer]->data,
ElementsAreArray(buffer_data));
EXPECT_EQ(model->subgraphs[0]->tensors[0]->type, TensorType_INT8);
}
}
}
}
}
int main(int argc, char** argv) {
std::string model_file;
const std::vector<tsl::Flag> flag_list = {
tsl::Flag("test_model_file", &model_file,
"Path to test tflite model file."),
};
const bool parse_result = tsl::Flags::Parse(&argc, argv, flag_list);
if (!parse_result) {
std::cerr << "Required test_model_file\n";
std::abort();
}
g_test_model_dir = new std::string(tsl::io::Dirname(model_file));
::tsl::port::InitMain(argv[0], &argc, &argv);
return RUN_ALL_TESTS();
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/quantization/lite/toco_legacy/quantization_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
425d073d-3f27-40e9-99b6-df84309bd750 | cpp | google/tensorstore | data_type | tensorstore/internal/json_binding/data_type.cc | tensorstore/internal/json_binding/data_type_test.cc | #include "tensorstore/internal/json_binding/data_type.h"
#include <string>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json/json.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_json_binding {
TENSORSTORE_DEFINE_JSON_BINDER(DataTypeJsonBinder, [](auto is_loading,
const auto& options,
auto* obj,
::nlohmann::json* j) {
if constexpr (is_loading) {
return internal_json_binding::Compose<std::string>(
[](auto is_loading, const auto& options, DataType* obj, auto* id) {
*obj = tensorstore::GetDataType(*id);
if (!obj->valid()) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
tensorstore::StrCat("Unsupported data type: ",
tensorstore::QuoteString(*id)));
}
return absl::OkStatus();
})(is_loading, options, obj, j);
} else {
if (!obj->valid()) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
} else if (obj->id() == DataTypeId::custom) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Data type has no canonical identifier");
} else {
*j = obj->name();
}
return absl::OkStatus();
}
})
TENSORSTORE_DEFINE_JSON_BINDER(OptionalDataTypeJsonBinder,
[](auto is_loading, const auto& options,
auto* obj, ::nlohmann::json* j) {
if constexpr (is_loading) {
if (j->is_discarded()) {
*obj = DataType{};
return absl::OkStatus();
}
}
return DataTypeJsonBinder(is_loading, options,
obj, j);
})
TENSORSTORE_DEFINE_JSON_BINDER(
ConstrainedDataTypeJsonBinder,
[](auto is_loading, const auto& options, auto* obj, ::nlohmann::json* j) {
return Validate(
[](const auto& options, DataType* d) {
if (options.dtype().valid() && d->valid() &&
options.dtype() != *d) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Expected data type of ", options.dtype(),
" but received: ", *d));
}
return absl::OkStatus();
},
DefaultValue([dtype = options.dtype()](DataType* d) { *d = dtype; }))(
is_loading, options, obj, j);
})
}
} | #include "tensorstore/internal/json_binding/data_type.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/data_type.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::MatchesStatus;
namespace jb = tensorstore::internal_json_binding;
namespace {
struct X {};
TEST(DataTypeJsonBinderTest, ToJson) {
EXPECT_THAT(jb::ToJson(DataType(dtype_v<std::int32_t>)),
::testing::Optional(::nlohmann::json("int32")));
EXPECT_THAT(jb::ToJson(DataType(dtype_v<bool>)),
::testing::Optional(::nlohmann::json("bool")));
EXPECT_THAT(jb::ToJson(DataType(dtype_v<X>)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Data type has no canonical identifier"));
EXPECT_THAT(jb::ToJson(DataType{}),
::testing::Optional(tensorstore::MatchesJson(
::nlohmann::json(::nlohmann::json::value_t::discarded))));
}
TEST(DataTypeJsonBinderTest, FromJson) {
EXPECT_THAT(jb::FromJson<DataType>(::nlohmann::json("int32")),
::testing::Optional(dtype_v<std::int32_t>));
EXPECT_THAT(jb::FromJson<DataType>(::nlohmann::json("bool")),
::testing::Optional(dtype_v<bool>));
EXPECT_THAT(jb::FromJson<DataType>(::nlohmann::json("invalid")),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Unsupported data type: \"invalid\""));
EXPECT_THAT(jb::FromJson<DataType>(
::nlohmann::json(::nlohmann::json::value_t::discarded)),
::testing::Optional(DataType{}));
EXPECT_THAT(jb::FromJson<DataType>(
::nlohmann::json(::nlohmann::json::value_t::discarded),
tensorstore::internal_json_binding::DataTypeJsonBinder),
MatchesStatus(absl::StatusCode::kInvalidArgument));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/data_type.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/json_binding/data_type_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
3aee648e-9f7e-4188-b1ab-86279bc91b57 | cpp | tensorflow/tensorflow | register_common_dialects | tensorflow/compiler/mlir/register_common_dialects.cc | tensorflow/compiler/mlir/register_common_dialects_test.cc | #include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "mlir/Dialect/Quant/IR/Quant.h"
#include "mlir/Dialect/Shape/IR/Shape.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Tosa/IR/TosaOps.h"
#include "mlir/InitAllDialects.h"
#include "mlir/InitAllExtensions.h"
#include "stablehlo/dialect/Register.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_dialect.h"
#include "tensorflow/compiler/mlir/tools/kernel_gen/ir/tf_framework_ops.h"
#include "xla/mlir/framework/ir/xla_framework.h"
#include "xla/mlir_hlo/mhlo/IR/register.h"
#include "tensorflow/core/ir/types/dialect.h"
namespace mlir {
void RegisterCommonToolingDialects(mlir::DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
mlir::mhlo::registerAllMhloDialects(registry);
mlir::registerAllDialects(registry);
mlir::registerAllExtensions(registry);
mlir::stablehlo::registerAllDialects(registry);
registry.insert<mlir::TFL::TensorFlowLiteDialect>();
registry.insert<mlir::kernel_gen::tf_framework::TFFrameworkDialect>();
registry.insert<mlir::quant::QuantDialect>();
registry.insert<mlir::quantfork::QuantizationForkDialect>();
registry.insert<mlir::shape::ShapeDialect>();
registry.insert<mlir::tensor::TensorDialect>();
registry.insert<mlir::tosa::TosaDialect>();
registry.insert<mlir::xla_framework::XLAFrameworkDialect,
mlir::TF::TensorFlowDialect, mlir::tf_type::TFTypeDialect>();
}
}; | #include "tensorflow/compiler/mlir/register_common_dialects.h"
#include <gtest/gtest.h>
#include "mlir/IR/DialectRegistry.h"
namespace mlir {
namespace {
TEST(RegisterCommonDialectsTest, DoesntCrash) {
mlir::DialectRegistry registry;
mlir::RegisterCommonToolingDialects(registry);
EXPECT_FALSE(registry.getDialectNames().empty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/register_common_dialects.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/register_common_dialects_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b0f85722-0479-4365-86f1-257131d04adf | cpp | tensorflow/tensorflow | deadness_analysis | tensorflow/compiler/jit/deadness_analysis.cc | tensorflow/compiler/jit/deadness_analysis_test.cc | #include "tensorflow/compiler/jit/deadness_analysis.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/deadness_analysis_internal.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/control_flow.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/hash/hash.h"
namespace tensorflow {
namespace {
using tsl::StatusOr;
class Predicate {
public:
enum class Kind { kAnd, kOr, kNot, kAndRecurrence, kSymbol, kIntSymbol };
virtual string ToString() const = 0;
int64_t id() const { return id_; }
virtual absl::Span<Predicate* const> GetOperands() const = 0;
virtual Kind kind() const = 0;
virtual ~Predicate() {}
template <typename FunctionTy>
static void Visit(Predicate* p, const FunctionTy& func);
protected:
explicit Predicate(int64_t id) : id_(id) {}
private:
const int64_t id_;
Predicate(const Predicate&) = delete;
void operator=(const Predicate&) = delete;
};
class AndPredicate : public Predicate {
public:
explicit AndPredicate(int64_t id, std::vector<Predicate*> operands)
: Predicate(id), operands_(std::move(operands)) {}
string ToString() const override {
if (operands().empty()) {
return "#true";
}
std::vector<string> operands_str;
std::transform(operands().begin(), operands().end(),
std::back_inserter(operands_str),
[](Predicate* pred) { return pred->ToString(); });
return absl::StrCat("(", absl::StrJoin(operands_str, " & "), ")");
}
Kind kind() const override { return Kind::kAnd; }
absl::Span<Predicate* const> GetOperands() const override {
return operands_;
}
absl::Span<Predicate* const> operands() const { return operands_; }
private:
std::vector<Predicate*> operands_;
};
class OrPredicate : public Predicate {
public:
explicit OrPredicate(int64_t id, std::vector<Predicate*> operands)
: Predicate(id), operands_(std::move(operands)) {}
string ToString() const override {
if (operands().empty()) {
return "#false";
}
std::vector<string> operands_str;
std::transform(operands().begin(), operands().end(),
std::back_inserter(operands_str),
[](Predicate* pred) { return pred->ToString(); });
return absl::StrCat("(", absl::StrJoin(operands_str, " | "), ")");
}
Kind kind() const override { return Kind::kOr; }
absl::Span<Predicate* const> GetOperands() const override {
return operands_;
}
absl::Span<Predicate* const> operands() const { return operands_; }
private:
std::vector<Predicate*> operands_;
};
class NotPredicate : public Predicate {
public:
explicit NotPredicate(int64_t id, Predicate* operand)
: Predicate(id), operands_({operand}) {}
string ToString() const override {
return absl::StrCat("~", operand()->ToString());
}
Kind kind() const override { return Kind::kNot; }
Predicate* operand() const { return operands_[0]; }
absl::Span<Predicate* const> GetOperands() const override {
return operands_;
}
private:
std::array<Predicate*, 1> operands_;
};
class AndRecurrencePredicate : public Predicate {
public:
explicit AndRecurrencePredicate(int64_t id, Predicate* start, Predicate* step,
std::vector<string> frame)
: Predicate(id), operands_({start, step}), frame_(std::move(frame)) {}
Predicate* start() const { return operands_[0]; }
Predicate* step() const { return operands_[1]; }
absl::Span<const string> frame() const { return frame_; }
string ToString() const override {
return absl::StrCat("{", start()->ToString(), ",&,", step()->ToString(),
"}<", absl::StrJoin(frame(), ";"), ">");
}
Kind kind() const override { return Kind::kAndRecurrence; }
absl::Span<Predicate* const> GetOperands() const override {
return operands_;
}
private:
std::array<Predicate*, 2> operands_;
std::vector<string> frame_;
};
class SymbolPredicate : public Predicate {
public:
explicit SymbolPredicate(int64_t id, TensorId tensor_id, bool must_be_true)
: Predicate(id),
tensor_id_(std::move(tensor_id)),
must_be_true_(must_be_true) {}
string ToString() const override {
return must_be_true() ? absl::StrCat("*", tensor_id_.ToString())
: tensor_id_.ToString();
}
Kind kind() const override { return Kind::kSymbol; }
absl::Span<Predicate* const> GetOperands() const override { return {}; }
TensorId tensor_id() const { return tensor_id_; }
bool must_be_true() const { return must_be_true_; }
private:
TensorId tensor_id_;
bool must_be_true_;
};
class IntSymbolPredicate : public Predicate {
public:
explicit IntSymbolPredicate(int64_t id, TensorId tensor_id,
std::optional<int> must_have_value)
: Predicate(id),
tensor_id_(std::move(tensor_id)),
must_have_value_(must_have_value) {}
string ToString() const override {
return must_have_value().has_value()
? absl::StrCat(tensor_id_.ToString(), "=", *must_have_value_)
: tensor_id_.ToString();
}
Kind kind() const override { return Kind::kIntSymbol; }
absl::Span<Predicate* const> GetOperands() const override { return {}; }
TensorId tensor_id() const { return tensor_id_; }
const std::optional<int>& must_have_value() const { return must_have_value_; }
private:
TensorId tensor_id_;
std::optional<int> must_have_value_;
};
template <typename FunctionTy>
void Predicate::Visit(Predicate* p, const FunctionTy& func) {
absl::flat_hash_set<Predicate*> visited;
std::vector<Predicate*> stack;
stack.push_back(p);
visited.insert(p);
while (!stack.empty()) {
Predicate* current = stack.back();
stack.pop_back();
bool done = func(current);
if (done) {
return;
}
for (Predicate* op : current->GetOperands()) {
if (visited.insert(op).second) {
stack.push_back(op);
}
}
}
}
class PredicateFactory {
public:
Predicate* MakeAndPredicate(absl::Span<Predicate* const> operands) {
return MakeAndOrImpl(operands, true);
}
Predicate* MakeOrPredicate(absl::Span<Predicate* const> operands) {
return MakeAndOrImpl(operands, false);
}
Predicate* MakeNotPredicate(Predicate* pred) {
auto it = make_not_predicate_cache_.find(pred);
if (it != make_not_predicate_cache_.end()) {
return it->second;
}
Predicate* result = MakeNotPredicateImpl(pred);
bool insert_successful =
make_not_predicate_cache_.insert({pred, result}).second;
(void)insert_successful;
DCHECK(insert_successful);
return result;
}
Predicate* MakeAndRecurrencePredicate(Predicate* start, Predicate* step,
std::vector<string> frame) {
SignatureForAndRec signature(start, step, std::move(frame));
auto it = interned_and_rec_instances_.find(signature);
if (it != interned_and_rec_instances_.end()) {
return it->second.get();
}
std::unique_ptr<Predicate> new_pred = Make<AndRecurrencePredicate>(
std::get<0>(signature), std::get<1>(signature), std::get<2>(signature));
Predicate* new_pred_ptr = new_pred.get();
bool inserted =
interned_and_rec_instances_.emplace(signature, std::move(new_pred))
.second;
(void)inserted;
DCHECK(inserted);
return new_pred_ptr;
}
Status MakeSymbolPredicate(Node* node, int output_idx, bool must_be_true,
Predicate** predicate) {
TensorId tensor_id(node->name(), output_idx);
bool is_boolean_tensor =
BaseType(node->output_type(tensor_id.index())) == DT_BOOL;
TF_RET_CHECK(!must_be_true || is_boolean_tensor);
if (node->type_string() == "Const" && must_be_true) {
const TensorProto* proto = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "value", &proto));
Tensor tensor(proto->dtype());
TF_RET_CHECK(tensor.FromProto(*proto));
*predicate = tensor.scalar<bool>()() ? MakeTrue() : MakeFalse();
return absl::OkStatus();
}
SignatureForSymbol signature = {tensor_id, must_be_true};
auto it = interned_symbol_instances_.find(signature);
if (it == interned_symbol_instances_.end()) {
std::unique_ptr<Predicate> new_pred =
Make<SymbolPredicate>(tensor_id, must_be_true);
Predicate* new_pred_ptr = new_pred.get();
interned_symbol_instances_.emplace(std::move(signature),
std::move(new_pred));
*predicate = new_pred_ptr;
} else {
*predicate = it->second.get();
}
return absl::OkStatus();
}
Status MakeSymbolPredicate(Node* node, int output_idx,
std::optional<int> must_have_value,
Predicate** predicate) {
TensorId tensor_id(node->name(), output_idx);
TF_RET_CHECK(BaseType(node->output_type(tensor_id.index())) == DT_INT32);
if (must_have_value.has_value() && node->type_string() == "Const") {
const TensorProto* proto = nullptr;
TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "value", &proto));
Tensor tensor(proto->dtype());
TF_RET_CHECK(tensor.FromProto(*proto));
*predicate = tensor.scalar<int32>()() == *must_have_value ? MakeTrue()
: MakeFalse();
return absl::OkStatus();
}
SignatureForIntSymbol signature = {tensor_id, must_have_value};
auto it = interned_int_symbol_instances_.find(signature);
if (it == interned_int_symbol_instances_.end()) {
std::unique_ptr<Predicate> new_pred =
Make<IntSymbolPredicate>(tensor_id, must_have_value);
Predicate* new_pred_ptr = new_pred.get();
interned_int_symbol_instances_.emplace(std::move(signature),
std::move(new_pred));
*predicate = new_pred_ptr;
} else {
*predicate = it->second.get();
}
return absl::OkStatus();
}
Predicate* MakeTrue() { return MakeAndPredicate({}); }
Predicate* MakeFalse() { return MakeOrPredicate({}); }
~PredicateFactory() {
DCHECK_EQ(stack_depth_, 0) << "Unnested IncrementStackDepth?";
}
private:
Predicate* MakeNotPredicateImpl(Predicate* pred) {
IncrementStackDepth stack_frame(this);
if (!stack_frame.HasOverflowed()) {
if (Predicate* simplified = SimplifyUsingDeMorgan(pred)) {
return simplified;
}
if (auto* not_pred = dynamic_cast<NotPredicate*>(pred)) {
return not_pred->operand();
}
}
SignatureForNot signature = pred;
auto it = interned_not_instances_.find(signature);
if (it == interned_not_instances_.end()) {
std::unique_ptr<Predicate> new_pred = Make<NotPredicate>(pred);
Predicate* new_pred_ptr = new_pred.get();
interned_not_instances_.emplace(signature, std::move(new_pred));
return new_pred_ptr;
} else {
return it->second.get();
}
}
Predicate* SimplifyUsingDeMorgan(Predicate* pred) {
Predicate::Kind kind = pred->kind();
if (kind == Predicate::Kind::kAnd || kind == Predicate::Kind::kOr) {
std::vector<Predicate*> new_operands;
absl::c_transform(pred->GetOperands(), std::back_inserter(new_operands),
[&](Predicate* p) { return MakeNotPredicate(p); });
return kind == Predicate::Kind::kOr ? MakeAndPredicate(new_operands)
: MakeOrPredicate(new_operands);
}
return nullptr;
}
template <typename PredicateT, typename... Args>
std::unique_ptr<Predicate> Make(Args&&... args) {
return std::unique_ptr<PredicateT>(
new PredicateT(id_counter_++, std::forward<Args>(args)...));
}
Predicate* MakeAndOrImpl(absl::Span<Predicate* const> operands, bool is_and);
Predicate* MakeInternedAndOr(std::vector<Predicate*> simplified_ops,
Predicate::Kind pred_kind);
using SignatureForAndOr =
std::pair<Predicate::Kind, absl::Span<Predicate* const>>;
using SignatureForNot = Predicate*;
using SignatureForAndRec =
std::tuple<Predicate*, Predicate*, std::vector<string>>;
using SignatureForSymbol = std::pair<SafeTensorId, bool>;
using SignatureForIntSymbol = std::pair<SafeTensorId, std::optional<int32>>;
struct HashSignatureForAndOr {
size_t operator()(const SignatureForAndOr& signature) const {
size_t hash = ::tensorflow::hash<Predicate::Kind>()(signature.first);
for (Predicate* p : signature.second) {
hash = Hash64Combine(hash, ::tensorflow::hash<Predicate*>()(p));
}
return hash;
}
};
struct HashSignatureForSymbol {
size_t operator()(const SignatureForSymbol& signature) const {
return Hash64Combine(SafeTensorId::Hasher()(signature.first),
::tensorflow::hash<bool>()(signature.second));
}
};
struct HashSignatureForIntSymbol {
size_t operator()(const SignatureForIntSymbol& signature) const {
return Hash64Combine(
SafeTensorId::Hasher()(signature.first),
Hash64Combine(
::tensorflow::hash<bool>()(signature.second.has_value()),
::tensorflow::hash<int32>()(
signature.second.has_value() ? *signature.second : 0)));
}
};
class IncrementStackDepth {
public:
explicit IncrementStackDepth(PredicateFactory* parent) : parent_(parent) {
parent_->stack_depth_++;
}
bool HasOverflowed() const {
const int kMaxStackDepth = 8;
return parent_->stack_depth_ >= kMaxStackDepth;
}
~IncrementStackDepth() { parent_->stack_depth_--; }
private:
PredicateFactory* parent_;
};
absl::flat_hash_map<Predicate*, Predicate*> make_not_predicate_cache_;
absl::flat_hash_map<SignatureForAndOr, std::unique_ptr<Predicate>,
HashSignatureForAndOr>
interned_and_or_instances_;
absl::flat_hash_map<SignatureForNot, std::unique_ptr<Predicate>>
interned_not_instances_;
absl::flat_hash_map<SignatureForAndRec, std::unique_ptr<Predicate>>
interned_and_rec_instances_;
absl::flat_hash_map<SignatureForSymbol, std::unique_ptr<Predicate>,
HashSignatureForSymbol>
interned_symbol_instances_;
absl::flat_hash_map<SignatureForIntSymbol, std::unique_ptr<Predicate>,
HashSignatureForIntSymbol>
interned_int_symbol_instances_;
int64_t id_counter_ = 0;
int stack_depth_ = 0;
};
Predicate* PredicateFactory::MakeInternedAndOr(
std::vector<Predicate*> simplified_ops, Predicate::Kind pred_kind) {
std::stable_sort(
simplified_ops.begin(), simplified_ops.end(),
[](Predicate* a, Predicate* b) { return a->id() < b->id(); });
auto it = interned_and_or_instances_.find({pred_kind, simplified_ops});
if (it != interned_and_or_instances_.end()) {
return it->second.get();
}
simplified_ops.shrink_to_fit();
absl::Span<Predicate* const> operands_slice = simplified_ops;
std::unique_ptr<Predicate> new_pred =
pred_kind == Predicate::Kind::kAnd
? Make<AndPredicate>(std::move(simplified_ops))
: Make<OrPredicate>(std::move(simplified_ops));
Predicate* new_pred_ptr = new_pred.get();
interned_and_or_instances_.emplace(
SignatureForAndOr(pred_kind, operands_slice), std::move(new_pred));
return new_pred_ptr;
}
Predicate* PredicateFactory::MakeAndOrImpl(
absl::Span<Predicate* const> operands, bool is_and) {
Predicate::Kind pred_kind =
is_and ? Predicate::Kind::kAnd : Predicate::Kind::kOr;
IncrementStackDepth stack_frame(this);
if (stack_frame.HasOverflowed()) {
return MakeInternedAndOr(
std::vector<Predicate*>(operands.begin(), operands.end()), pred_kind);
}
Predicate::Kind other_pred_kind =
is_and ? Predicate::Kind::kOr : Predicate::Kind::kAnd;
absl::flat_hash_set<Predicate*> simplified_ops_set;
std::vector<Predicate*> simplified_ops;
for (Predicate* op : operands) {
if (!simplified_ops_set.insert(op).second) {
continue;
}
if (op->kind() == pred_kind) {
for (Predicate* subop : op->GetOperands()) {
if (simplified_ops_set.insert(subop).second) {
simplified_ops.push_back(subop);
}
}
} else {
simplified_ops.push_back(op);
}
}
if (simplified_ops.size() == 1) {
return simplified_ops[0];
}
absl::flat_hash_set<Predicate*> negated_ops;
for (Predicate* op : simplified_ops) {
if (negated_ops.count(op)) {
return is_and ? MakeFalse() : MakeTrue();
}
Predicate* negated_op = MakeNotPredicate(op);
if (negated_op->kind() == pred_kind) {
if (absl::c_all_of(negated_op->GetOperands(), [&](Predicate* p) {
return simplified_ops_set.contains(p);
})) {
return is_and ? MakeFalse() : MakeTrue();
}
}
negated_ops.insert(negated_op);
}
if (is_and) {
absl::flat_hash_set<Predicate*> to_remove;
std::vector<Predicate*> to_add;
for (Predicate* op : simplified_ops) {
if (op->kind() == Predicate::Kind::kAndRecurrence) {
auto* and_rec = static_cast<AndRecurrencePredicate*>(op);
if (negated_ops.contains(and_rec->step())) {
to_remove.insert(and_rec);
to_remove.insert(MakeNotPredicate(and_rec->step()));
to_add.push_back(and_rec->start());
}
}
}
auto it = simplified_ops.begin();
while (it != simplified_ops.end()) {
if (to_remove.contains(*it)) {
it = simplified_ops.erase(it);
} else {
++it;
}
}
simplified_ops.insert(simplified_ops.end(), to_add.begin(), to_add.end());
}
std::vector<Predicate*> common_inner_operands;
absl::flat_hash_set<Predicate*> common_inner_operands_set;
for (Predicate* op : simplified_ops) {
if (op->kind() != other_pred_kind) {
common_inner_operands.clear();
break;
}
if (common_inner_operands.empty()) {
common_inner_operands.insert(common_inner_operands.end(),
op->GetOperands().begin(),
op->GetOperands().end());
} else {
common_inner_operands.clear();
absl::c_copy_if(op->GetOperands(),
std::back_inserter(common_inner_operands),
[&](Predicate* sub_op) {
return common_inner_operands_set.count(sub_op) == 1;
});
}
if (common_inner_operands.empty()) break;
common_inner_operands_set.clear();
common_inner_operands_set.insert(common_inner_operands.begin(),
common_inner_operands.end());
}
if (common_inner_operands.empty()) {
return MakeInternedAndOr(std::move(simplified_ops), pred_kind);
}
std::vector<Predicate*> factored_ops;
for (Predicate* op : simplified_ops) {
std::vector<Predicate*> new_sub_op_ops;
absl::c_copy_if(op->GetOperands(), std::back_inserter(new_sub_op_ops),
[&](Predicate* sub_op) {
return std::find(common_inner_operands.begin(),
common_inner_operands.end(),
sub_op) == common_inner_operands.end();
});
factored_ops.push_back(MakeAndOrImpl(new_sub_op_ops, !is_and));
}
Predicate* new_inner_op = MakeAndOrImpl(factored_ops, is_and);
std::vector<Predicate*> outer_ops;
outer_ops.push_back(new_inner_op);
outer_ops.insert(outer_ops.end(), common_inner_operands.begin(),
common_inner_operands.end());
return MakeAndOrImpl(outer_ops, !is_and);
}
class DeadnessAnalysisImpl : public DeadnessAnalysis {
public:
explicit DeadnessAnalysisImpl(const Graph* graph)
: graph_(*graph), vlog_(VLOG_IS_ON(2)) {}
Status Populate(bool enable_optimistic);
Status PopulateFrame(absl::Span<Node* const> topo, bool use_optimistic_mode,
bool* success);
absl::StatusOr<DeadnessAnalysis::DeadnessPredicate> GetPredicateFor(
Node* n, int oidx) const override;
void Print() const override;
absl::flat_hash_map<TensorId, string, TensorId::Hasher> PredicateMapAsString()
const;
private:
enum class EdgeKind { kDataAndControl, kDataOnly, kControlOnly };
Status GetInputPreds(Node* n, EdgeKind edge_kind,
std::vector<Predicate*>* result);
void SetPredicate(Node* n, int output_idx, Predicate* pred,
std::vector<bool>* should_revisit) {
auto insert_result =
predicate_map_.insert({TensorId(n->name(), output_idx), pred});
if (!insert_result.second && insert_result.first->second != pred) {
VLOG(4) << "For " << n->name() << ":" << output_idx << " from "
<< insert_result.first->second->ToString() << " "
<< insert_result.first->second << " to " << pred->ToString()
<< " " << pred;
insert_result.first->second = pred;
if (should_revisit != nullptr) {
for (const Edge* e : n->out_edges()) {
(*should_revisit)[e->dst()->id()] = true;
}
}
}
}
void SetPredicate(Node* n, absl::Span<const int> output_idxs, Predicate* pred,
std::vector<bool>* should_revisit) {
for (int output_idx : output_idxs) {
SetPredicate(n, output_idx, pred, should_revisit);
}
}
Status HandleSwitch(Node* n, std::vector<bool>* should_revisit);
Status HandleMerge(Node* n, std::vector<bool>* should_revisit,
bool use_optimistic_mode);
Status HandleRecv(Node* n, std::vector<bool>* should_revisit);
Status HandleGeneric(Node* n, std::vector<bool>* should_revisit);
Status HandleNode(Node* n, std::vector<bool>* should_revisit,
bool use_optimistic_mode = false);
Status GetFrameBasedTopologicalOrder(std::vector<Node*>* order);
bool IsRootEnter(const Node* n) const {
return IsEnter(n) && control_flow_info_[n->id()].parent_frame->IsSource();
}
bool IsRootExit(const Node* n) const {
return IsExit(n) && control_flow_info_[n->id()].parent_frame->IsSource();
}
const Graph& graph_;
absl::flat_hash_map<TensorId, Predicate*, TensorId::Hasher> predicate_map_;
PredicateFactory predicate_factory_;
std::vector<ControlFlowInfo> control_flow_info_;
bool vlog_;
absl::flat_hash_map<absl::string_view, Node*> frame_to_merge_node_;
};
TensorId InputEdgeToTensorId(const Edge* e) {
return TensorId(e->src()->name(), e->src_output());
}
Status DeadnessAnalysisImpl::GetInputPreds(
Node* n, DeadnessAnalysisImpl::EdgeKind edge_kind,
std::vector<Predicate*>* result) {
result->clear();
for (const Edge* in_edge : n->in_edges()) {
bool should_process =
edge_kind == EdgeKind::kDataAndControl ||
(in_edge->IsControlEdge() && edge_kind == EdgeKind::kControlOnly) ||
(!in_edge->IsControlEdge() && edge_kind == EdgeKind::kDataOnly);
if (should_process) {
auto it = predicate_map_.find(InputEdgeToTensorId(in_edge));
if (it == predicate_map_.end()) {
xla::GraphCycles graph_cycles;
TF_RETURN_IF_ERROR(
CreateCycleDetectionGraph(&graph_, &graph_cycles).status());
return errors::Internal("Could not find input ", in_edge->DebugString(),
" to ", n->name(),
" when visiting the graph in post-order. Most "
"likely indicates a bug in deadness analysis.");
}
result->push_back(it->second);
}
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::HandleSwitch(Node* n,
std::vector<bool>* should_revisit) {
std::vector<Predicate*> input_preds;
TF_RETURN_IF_ERROR(GetInputPreds(n, EdgeKind::kDataAndControl, &input_preds));
const Edge* pred_edge;
TF_RETURN_IF_ERROR(n->input_edge(1, &pred_edge));
if (n->type_string() != "_SwitchN") {
Predicate* true_switch;
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
pred_edge->src(), pred_edge->src_output(),
true, &true_switch));
Predicate* false_switch = predicate_factory_.MakeNotPredicate(true_switch);
input_preds.push_back(false_switch);
SetPredicate(n, 0, predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
input_preds.pop_back();
input_preds.push_back(true_switch);
SetPredicate(n, 1, predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
input_preds.pop_back();
} else {
Predicate* branch_pred;
for (int i = 0; i < n->num_outputs() - 1; i++) {
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
pred_edge->src(), pred_edge->src_output(),
std::optional<int32>(i), &branch_pred));
input_preds.push_back(branch_pred);
SetPredicate(n, i, predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
input_preds.pop_back();
input_preds.push_back(predicate_factory_.MakeNotPredicate(branch_pred));
}
SetPredicate(n, n->num_outputs() - 1,
predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
}
SetPredicate(n, Graph::kControlSlot,
predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
return absl::OkStatus();
}
namespace {
Status CreateMultipleNextIterationInputsError(Node* merge) {
std::vector<string> backedges;
for (const Edge* backedge : merge->in_edges()) {
if (backedge->src()->IsNextIteration()) {
backedges.push_back(absl::StrCat(" ", SummarizeNode(*backedge->src())));
}
}
return errors::InvalidArgument(
"Multiple NextIteration inputs to merge node ",
FormatNodeForError(*merge), ": \n", absl::StrJoin(backedges, "\n"),
"\nMerge nodes can have at most one incoming NextIteration edge.");
}
Status FindUniqueBackedge(Node* merge, const Edge** result) {
*result = nullptr;
CHECK(merge->IsMerge());
for (const Edge* e : merge->in_edges()) {
if (e->src()->IsNextIteration()) {
if (*result != nullptr) {
return CreateMultipleNextIterationInputsError(merge);
}
*result = e;
}
}
return absl::OkStatus();
}
Predicate* DeduceStepPredicate(PredicateFactory* predicate_factory,
Predicate* symbolic_predicate,
Predicate* backedge_predicate) {
CHECK(dynamic_cast<SymbolPredicate*>(symbolic_predicate));
if (backedge_predicate->kind() != Predicate::Kind::kAnd) {
return nullptr;
}
std::vector<Predicate*> and_ops;
absl::Span<Predicate* const> recurrent_pred_ops =
backedge_predicate->GetOperands();
bool found_sym = false;
for (Predicate* and_op : recurrent_pred_ops) {
if (and_op == symbolic_predicate) {
found_sym = true;
continue;
}
bool found_sym_as_inner_operand = false;
auto has_self_as_inner_operand = [&](Predicate* p) {
if (p == symbolic_predicate) {
found_sym_as_inner_operand = true;
return true;
}
return false;
};
Predicate::Visit(and_op, has_self_as_inner_operand);
if (found_sym_as_inner_operand) {
return nullptr;
}
and_ops.push_back(and_op);
}
return found_sym ? predicate_factory->MakeAndPredicate(and_ops) : nullptr;
}
Status GetFullFrame(const Node* n, absl::Span<const ControlFlowInfo> cfi_infos,
std::vector<string>* frame) {
int depth = 0;
for (const ControlFlowInfo* cfi_iter = &cfi_infos[n->id()]; !n->IsSource();
n = cfi_iter->parent_frame, cfi_iter = &cfi_infos[n->id()]) {
frame->push_back(cfi_iter->frame_name);
if (depth++ > 5000) {
return errors::Internal(
"Frame of depth > 5000: Probably malformed graph or a bug in "
"BuildControlFlowInfo");
}
}
return absl::OkStatus();
}
Status GetRootFrame(const Node* n, absl::Span<const ControlFlowInfo> cfi_infos,
absl::string_view* frame) {
int depth = 0;
const ControlFlowInfo* cfi_iter = &cfi_infos[n->id()];
while (!cfi_iter->parent_frame->IsSource()) {
n = cfi_iter->parent_frame;
cfi_iter = &cfi_infos[n->id()];
if (depth++ > 5000) {
return errors::Internal(
"Frame of depth > 5000: Probably malformed graph or a bug in "
"BuildControlFlowInfo");
}
}
*frame = cfi_iter->frame_name;
return absl::OkStatus();
}
}
Status DeadnessAnalysisImpl::HandleMerge(Node* n,
std::vector<bool>* should_revisit,
bool use_optimistic_mode) {
bool has_unvisited_backedge = false;
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge() && e->src()->IsNextIteration()) {
has_unvisited_backedge |= !predicate_map_.count(InputEdgeToTensorId(e));
}
}
auto it = predicate_map_.find(TensorId(n->name(), 0));
if (it == predicate_map_.end()) {
if (has_unvisited_backedge) {
Predicate* input_data_pred;
if (use_optimistic_mode) {
absl::string_view frame_name = control_flow_info_[n->id()].frame_name;
auto insert_result = frame_to_merge_node_.insert({frame_name, n});
Node* representative = insert_result.first->second;
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
representative, 0, false,
&input_data_pred));
} else {
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
n, 0, false, &input_data_pred));
}
SetPredicate(n, {0, 1, Graph::kControlSlot}, input_data_pred,
should_revisit);
return absl::OkStatus();
}
std::vector<Predicate*> input_preds;
TF_RETURN_IF_ERROR(GetInputPreds(n, EdgeKind::kDataOnly, &input_preds));
Predicate* input_data_pred =
predicate_factory_.MakeOrPredicate(input_preds);
SetPredicate(n, {0, 1, Graph::kControlSlot}, input_data_pred,
should_revisit);
return absl::OkStatus();
}
if (it->second->kind() == Predicate::Kind::kSymbol) {
const Edge* unique_backedge;
TF_RETURN_IF_ERROR(FindUniqueBackedge(n, &unique_backedge));
if (unique_backedge) {
if (Predicate* step = DeduceStepPredicate(
&predicate_factory_, it->second,
predicate_map_[InputEdgeToTensorId(unique_backedge)])) {
std::vector<Predicate*> non_recurrent_inputs;
for (const Edge* e : n->in_edges()) {
if (e != unique_backedge) {
non_recurrent_inputs.push_back(
predicate_map_[InputEdgeToTensorId(e)]);
}
}
Predicate* start =
predicate_factory_.MakeOrPredicate(non_recurrent_inputs);
std::vector<string> frame;
TF_RETURN_IF_ERROR(GetFullFrame(n, control_flow_info_, &frame));
Predicate* and_rec = predicate_factory_.MakeAndRecurrencePredicate(
start, step, std::move(frame));
SetPredicate(n, {0, 1, Graph::kControlSlot}, and_rec, should_revisit);
return absl::OkStatus();
}
}
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::HandleRecv(Node* n,
std::vector<bool>* should_revisit) {
std::vector<Predicate*> input_preds;
TF_RETURN_IF_ERROR(GetInputPreds(n, EdgeKind::kDataAndControl, &input_preds));
Predicate* signal_is_alive;
TF_RETURN_IF_ERROR(predicate_factory_.MakeSymbolPredicate(
n, 0, false, &signal_is_alive));
input_preds.push_back(signal_is_alive);
SetPredicate(n, {0, Graph::kControlSlot},
predicate_factory_.MakeAndPredicate(input_preds),
should_revisit);
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::HandleGeneric(Node* n,
std::vector<bool>* should_revisit) {
std::vector<Predicate*> input_preds;
TF_RETURN_IF_ERROR(GetInputPreds(n, EdgeKind::kDataAndControl, &input_preds));
Predicate* pred = predicate_factory_.MakeAndPredicate(input_preds);
for (int output_idx = 0; output_idx < n->num_outputs(); output_idx++) {
SetPredicate(n, output_idx, pred, should_revisit);
}
SetPredicate(n, Graph::kControlSlot, pred, should_revisit);
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::HandleNode(Node* n,
std::vector<bool>* should_revisit,
bool use_optimistic_mode) {
if (n->IsSwitch()) {
TF_RETURN_IF_ERROR(HandleSwitch(n, should_revisit));
} else if (n->IsMerge()) {
TF_RETURN_IF_ERROR(HandleMerge(n, should_revisit, use_optimistic_mode));
} else if (n->IsControlTrigger()) {
SetPredicate(n, Graph::kControlSlot, predicate_factory_.MakeTrue(),
nullptr);
} else if (n->IsRecv() || n->IsHostRecv()) {
TF_RETURN_IF_ERROR(HandleRecv(n, should_revisit));
} else if (n->IsNextIteration()) {
TF_RETURN_IF_ERROR(HandleGeneric(n, should_revisit));
} else {
TF_RETURN_IF_ERROR(HandleGeneric(n, should_revisit));
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::GetFrameBasedTopologicalOrder(
std::vector<Node*>* order) {
absl::flat_hash_map<absl::string_view, size_t> num_enters_for_frame;
absl::flat_hash_map<absl::string_view, size_t> num_exits_for_frame;
std::vector<size_t> num_ready_inputs(graph_.num_node_ids(), 0);
Node* src_node = graph_.source_node();
for (const auto* node : graph_.op_nodes()) {
const ControlFlowInfo& cf = control_flow_info_[node->id()];
if (IsRootEnter(node)) {
++num_enters_for_frame[cf.frame_name];
} else if (IsRootExit(node)) {
++num_exits_for_frame[cf.frame_name];
}
if (IsMerge(node)) {
for (const Edge* e : node->in_edges()) {
if (IsNextIteration(e->src())) {
++num_ready_inputs[node->id()];
}
}
}
}
std::deque<Node*> ready;
ready.push_back(src_node);
absl::flat_hash_map<absl::string_view, std::vector<Node*>>
ready_enters_per_frame;
std::vector<Node*> ready_exits;
while (!ready.empty()) {
Node* curr_node = ready.front();
ready.pop_front();
VLOG(4) << "Visiting " << curr_node->name();
order->push_back(curr_node);
for (const Edge* out_edge : curr_node->out_edges()) {
Node* out = out_edge->dst();
int out_id = out->id();
if (IsNextIteration(curr_node) && IsMerge(out)) {
continue;
}
++num_ready_inputs[out->id()];
if (!out->IsOp()) continue;
if (num_ready_inputs[out->id()] != out->in_edges().size()) continue;
absl::string_view frame_name = control_flow_info_[out_id].frame_name;
if (IsRootEnter(out)) {
ready_enters_per_frame[frame_name].push_back(out);
} else if (IsRootExit(out)) {
ready_exits.push_back(out);
} else {
ready.push_back(out);
}
}
if (ready.empty()) {
if (!ready_exits.empty()) {
absl::string_view frame_name =
control_flow_info_[ready_exits.front()->id()].frame_name;
CHECK_EQ(ready_exits.size(), num_exits_for_frame[frame_name]);
ready.insert(ready.end(), ready_exits.begin(), ready_exits.end());
ready_exits.clear();
} else {
for (auto iter = ready_enters_per_frame.begin();
iter != ready_enters_per_frame.end(); ++iter) {
absl::string_view frame_name = iter->first;
const std::vector<Node*>& ready_enters = iter->second;
if (ready_enters.size() == num_enters_for_frame[frame_name]) {
ready.insert(ready.end(), ready_enters.begin(), ready_enters.end());
ready_enters_per_frame.erase(iter);
break;
}
}
}
}
}
if (!ready_enters_per_frame.empty() || !ready_exits.empty()) {
return errors::InvalidArgument(
"Some enters/exits have never been visited in the traversal."
" Most probably the input graph is malformed.");
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::Populate(bool enable_optimistic) {
std::vector<string> unreachable_nodes;
TF_RETURN_IF_ERROR(
BuildControlFlowInfo(&graph_, &control_flow_info_, &unreachable_nodes));
if (!unreachable_nodes.empty()) {
if (unreachable_nodes.size() > 5) {
unreachable_nodes.erase(unreachable_nodes.begin() + 5,
unreachable_nodes.end());
}
return errors::InvalidArgument(
"Found unreachable nodes, most likely source and sink nodes not "
"connected: ",
absl::StrJoin(unreachable_nodes, ", "));
}
std::vector<Node*> topo;
TF_RETURN_IF_ERROR(GetFrameBasedTopologicalOrder(&topo));
size_t frame_start = 0;
while (frame_start < topo.size()) {
absl::string_view cur_frame_name;
TF_RETURN_IF_ERROR(
GetRootFrame(topo[frame_start], control_flow_info_, &cur_frame_name));
size_t frame_end = frame_start;
for (size_t i = frame_start + 1; i < topo.size(); ++i) {
absl::string_view i_frame_name;
TF_RETURN_IF_ERROR(
GetRootFrame(topo[i], control_flow_info_, &i_frame_name));
if (i_frame_name == cur_frame_name) {
frame_end = i;
} else {
break;
}
}
absl::Span<Node*> sub_topo(topo.data() + frame_start,
frame_end - frame_start + 1);
frame_start = frame_end + 1;
bool success = false;
if (enable_optimistic && !cur_frame_name.empty()) {
TF_RETURN_IF_ERROR(
PopulateFrame(sub_topo, true, &success));
}
if (!success) {
TF_RETURN_IF_ERROR(
PopulateFrame(sub_topo, false, nullptr));
}
VLOG(2) << "Done populating frame " << cur_frame_name << " using the "
<< (success ? "optimistic" : "pessimistic") << " mode.";
}
return absl::OkStatus();
}
Status DeadnessAnalysisImpl::PopulateFrame(absl::Span<Node* const> topo,
bool use_optimistic_mode,
bool* success) {
CHECK(use_optimistic_mode && success != nullptr ||
!use_optimistic_mode && success == nullptr);
std::vector<bool> should_revisit;
should_revisit.resize(graph_.num_node_ids());
for (Node* n : topo) {
VLOG(4) << "Visiting " << n->name();
TF_RETURN_IF_ERROR(
HandleNode(n, nullptr, use_optimistic_mode));
if (n->IsNextIteration()) {
for (const Edge* e : n->out_edges()) {
if (e->dst()->IsMerge()) {
should_revisit[e->dst()->id()] = true;
}
}
}
}
for (Node* n : topo) {
if (should_revisit[n->id()]) {
VLOG(4) << "Revisiting " << n->name();
TF_RETURN_IF_ERROR(HandleNode(n, &should_revisit));
}
}
if (use_optimistic_mode) {
bool is_converged = true;
absl::flat_hash_map<absl::string_view, Predicate*> frame_to_pred;
for (Node* n : topo) {
if (!n->IsMerge()) {
continue;
}
const Edge* e;
TF_RETURN_IF_ERROR(FindUniqueBackedge(n, &e));
if (e == nullptr) {
continue;
}
Node* merge = n;
absl::string_view frame_name = control_flow_info_[merge->id()].frame_name;
auto it = predicate_map_.find(TensorId(merge->name(), 0));
Predicate* merge_pred = it->second;
if (merge_pred->kind() != Predicate::Kind::kAndRecurrence) {
is_converged = false;
VLOG(2) << "Running the optimistic mode on frame " << frame_name
<< " does not converge because node " << merge->name()
<< " cannot be mapped into the AndRecurrence form.";
break;
}
auto insert_result = frame_to_pred.insert({frame_name, merge_pred});
if (!insert_result.second) {
Predicate* curr_andrec = merge_pred;
Predicate* prev_andrec = insert_result.first->second;
if (curr_andrec != prev_andrec) {
is_converged = false;
VLOG(2) << "Running the optimistic mode on frame " << frame_name
<< " does not converge. Seeing different Merge predicates: \n"
<< curr_andrec->ToString() << " and \n"
<< prev_andrec->ToString();
break;
}
}
}
if (!is_converged) {
for (Node* n : topo) {
for (int oid = 0; oid < n->num_outputs(); ++oid) {
predicate_map_.erase(TensorId(n->name(), oid));
}
predicate_map_.erase(TensorId(n->name(), Graph::kControlSlot));
}
}
if (success != nullptr) {
*success = is_converged;
}
}
return absl::OkStatus();
}
absl::StatusOr<DeadnessAnalysis::DeadnessPredicate>
DeadnessAnalysisImpl::GetPredicateFor(Node* n, int oidx) const {
auto it = predicate_map_.find(TensorId(n->name(), oidx));
TF_RET_CHECK(it != predicate_map_.end())
<< "could not find " << TensorId(n->name(), oidx).ToString()
<< " in predicate map";
return MakeDeadnessPredicate(it->second);
}
void DeadnessAnalysisImpl::Print() const {
std::vector<TensorId> tensor_ids;
tensor_ids.reserve(predicate_map_.size());
for (const auto& kv_pair : predicate_map_) {
tensor_ids.push_back(kv_pair.first);
}
std::sort(tensor_ids.begin(), tensor_ids.end());
for (TensorId tensor_id : tensor_ids) {
auto it = predicate_map_.find(tensor_id);
CHECK(it != predicate_map_.end()) << tensor_id.ToString();
VLOG(2) << tensor_id.ToString() << " -> " << it->second->ToString();
}
}
}
DeadnessAnalysis::~DeadnessAnalysis() {}
Status DeadnessAnalysis::Run(
const Graph& graph, std::unique_ptr<DeadnessAnalysis>* result) {
std::unique_ptr<DeadnessAnalysisImpl> analysis(
new DeadnessAnalysisImpl(&graph));
TF_RETURN_IF_ERROR(analysis->Populate(true));
if (VLOG_IS_ON(2)) {
analysis->Print();
}
*result = std::move(analysis);
return absl::OkStatus();
}
absl::flat_hash_map<TensorId, string, TensorId::Hasher>
DeadnessAnalysisImpl::PredicateMapAsString() const {
absl::flat_hash_map<TensorId, string, TensorId::Hasher> result;
for (const auto& kv_pair : predicate_map_) {
CHECK(result.insert({kv_pair.first, kv_pair.second->ToString()}).second);
}
return result;
}
namespace deadness_analysis_internal {
Status ComputePredicates(const Graph& graph, PredicateMapTy* out_predicate_map,
bool enable_optimistic) {
DeadnessAnalysisImpl impl(&graph);
TF_RETURN_IF_ERROR(impl.Populate(enable_optimistic));
*out_predicate_map = impl.PredicateMapAsString();
return absl::OkStatus();
}
}
string DeadnessAnalysis::DebugString(DeadnessPredicate predicate) const {
return static_cast<Predicate*>(predicate.pred_)->ToString();
}
} | #include "tensorflow/compiler/jit/deadness_analysis.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/control_flow_ops_internal.h"
#include "tensorflow/cc/ops/function_ops.h"
#include "tensorflow/cc/ops/sendrecv_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/deadness_analysis_internal.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/tf2xla/xla_op_kernel.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph_def_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
absl::StatusOr<bool> HasInputsWithMismatchingDeadness(
const DeadnessAnalysis& deadness_analysis, const Node& n) {
std::optional<DeadnessAnalysis::DeadnessPredicate> pred;
for (const Edge* edge : n.in_edges()) {
TF_ASSIGN_OR_RETURN(
DeadnessAnalysis::DeadnessPredicate this_pred,
deadness_analysis.GetPredicateFor(edge->src(), edge->src_output()));
if (pred && *pred != this_pred) {
return true;
}
pred = this_pred;
}
return false;
}
using deadness_analysis_internal::ComputePredicates;
using deadness_analysis_internal::PredicateMapTy;
Status AnalyzeDeadness(Graph* graph,
std::unique_ptr<DeadnessAnalysis>* result) {
FixupSourceAndSinkEdges(graph);
return DeadnessAnalysis::Run(*graph, result);
}
ops::Switch CreateSwitch(const Scope& root, const string& prefix) {
Output value = ops::Placeholder(root.WithOpName(prefix + "/value"), DT_FLOAT);
Output predicate =
ops::Placeholder(root.WithOpName(prefix + "/pred"), DT_BOOL);
return ops::Switch(root.WithOpName(prefix + "/switch"), value, predicate);
}
TensorId ControlOutputFor(const Output& o) {
return {o.node()->name(), Graph::kControlSlot};
}
void VLogGraphIfAsked(const Graph& graph) {
if (VLOG_IS_ON(3)) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
string serialized;
::tensorflow::protobuf::TextFormat::PrintToString(graph_def, &serialized);
LOG(INFO) << serialized;
}
}
struct InductionVarInfo {
Output induction_var;
Output loop_cond;
};
InductionVarInfo CreateInductionVariable(const Scope& root,
const string& prefix,
const string& frame_name,
const Output& initial_value) {
Output enter_initial_value = ops::internal::Enter(
root.WithOpName(prefix + "/enter"), initial_value, frame_name);
ops::Merge iv(root.WithOpName(prefix + "/iv"),
{enter_initial_value, enter_initial_value});
Output increment_by = ops::Const(root.WithOpName(prefix + "/incr"), 1);
Output final_value = ops::Const(root.WithOpName(prefix + "/final"), 10);
Output loop_cond_expr =
ops::Less(root.WithOpName(prefix + "/cond"), iv.output, final_value);
ops::Switch latch(root.WithOpName(prefix + "/latch"), iv.output,
loop_cond_expr);
ops::internal::Exit exit(root.WithOpName(prefix + "/exit"),
latch.output_false);
Output iv_next = ops::Add(root.WithOpName(prefix + "/ivnext"),
latch.output_true, increment_by);
Output next_iteration =
ops::NextIteration(root.WithOpName(prefix + "/next_iteration"), iv_next);
CHECK(root.graph()
->UpdateEdge(next_iteration.node(), 0, iv.output.node(), 1)
.ok());
root.graph()->AddControlEdge(iv.output.node(), increment_by.node());
root.graph()->AddControlEdge(iv.output.node(), final_value.node());
return {iv.output, loop_cond_expr};
}
InductionVarInfo CreateInductionVariable(const Scope& root,
const string& prefix,
const string& frame_name,
int32_t init) {
return CreateInductionVariable(
root, prefix, frame_name,
ops::Const(root.WithOpName(prefix + "/init"), init));
}
struct DependentInductionVar {
Output induction_var;
ops::Switch latch;
};
DependentInductionVar CreateDependentLoopInvariantValue(
const Scope& root, const string& prefix, const string& frame_name,
const Output& loop_cond, const Output& value) {
Output enter_value = ops::internal::Enter(root.WithOpName(prefix + "/enter"),
value, frame_name);
ops::Merge iv(root.WithOpName(prefix + "/iv"), {enter_value, enter_value});
ops::Switch latch(root.WithOpName(prefix + "/latch"), iv.output, loop_cond);
ops::internal::Exit exit(root.WithOpName(prefix + "/exit"),
latch.output_false);
Output next_iteration = ops::NextIteration(
root.WithOpName(prefix + "/next_iteration"), latch.output_true);
CHECK(root.graph()
->UpdateEdge(next_iteration.node(), 0, iv.output.node(), 1)
.ok());
return {iv.output, latch};
}
DependentInductionVar CreateDependentLoopInvariantValue(
const Scope& root, const string& prefix, const string& frame_name,
const Output& loop_cond, int32_t value) {
return CreateDependentLoopInvariantValue(
root, prefix, frame_name, loop_cond,
ops::Const(root.WithOpName(prefix + "/init"), value));
}
TEST(DeadnessAnalysisTest, BasicPositive) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw = CreateSwitch(root, "0");
Output add =
ops::Add(root.WithOpName("add"), sw.output_true, sw.output_false);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, BasicNegative) {
Scope root = Scope::NewRootScope().ExitOnError();
Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT);
Output b = ops::Placeholder(root.WithOpName("b"), DT_FLOAT);
Output add = ops::Add(root.WithOpName("add"), a, b);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, AndIsCommutative) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
Output a0 =
ops::Add(root.WithOpName("a0"), sw_0.output_false, sw_1.output_false);
Output a1 =
ops::Add(root.WithOpName("a1"), sw_1.output_false, sw_0.output_false);
Output b0 =
ops::Add(root.WithOpName("b0"), sw_0.output_false, sw_1.output_true);
Output b1 =
ops::Add(root.WithOpName("b1"), sw_1.output_true, sw_0.output_false);
Output live0 = ops::Add(root.WithOpName("live0"), a0, a1);
Output live1 = ops::Add(root.WithOpName("live1"), b0, b1);
Output halfdead0 = ops::Add(root.WithOpName("halfdead0"), a0, b0);
Output halfdead1 = ops::Add(root.WithOpName("halfdead1"), a1, b1);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
bool has_inputs_with_mismatching_deadness;
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *live0.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *live1.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *halfdead0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *halfdead1.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, AndIsAssociative) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
Output a0 =
ops::Add(root.WithOpName("a0"), sw_0.output_false, sw_1.output_false);
Output a1 = ops::Add(root.WithOpName("a1"), a0, sw_2.output_false);
Output b0 =
ops::Add(root.WithOpName("b0"), sw_1.output_false, sw_2.output_false);
Output b1 = ops::Add(root.WithOpName("b1"), sw_0.output_false, b0);
Output add = ops::Add(root.WithOpName("add"), a1, b1);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, OrIsCommutative) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
ops::Merge m1(root.WithOpName("m1"), {sw_1.output_false, sw_0.output_false});
ops::Merge m2(root.WithOpName("m2"), {sw_0.output_false, sw_1.output_true});
ops::Merge m3(root.WithOpName("m3"), {sw_1.output_true, sw_0.output_false});
Output live0 = ops::Add(root.WithOpName("live0"), m0.output, m1.output);
Output live1 = ops::Add(root.WithOpName("live1"), m2.output, m3.output);
Output halfdead0 =
ops::Add(root.WithOpName("halfdead0"), m0.output, m2.output);
Output halfdead1 =
ops::Add(root.WithOpName("halfdead1"), m1.output, m3.output);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
bool has_inputs_with_mismatching_deadness;
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *live0.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *live1.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *halfdead0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *halfdead1.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, OrIsAssociative) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
ops::Merge m1(root.WithOpName("m1"), {m0.output, sw_2.output_false});
ops::Merge m2(root.WithOpName("m2"), {sw_1.output_false, sw_2.output_false});
ops::Merge m3(root.WithOpName("m3"), {sw_0.output_false, m2.output});
Output add = ops::Add(root.WithOpName("add"), m1.output, m3.output);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, AndOfOr) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
ops::Switch sw_3 = CreateSwitch(root, "3");
ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
ops::Merge m1(root.WithOpName("m1"), {sw_2.output_false, sw_3.output_false});
Output add0 = ops::Add(root.WithOpName("add0"), m0.output, m1.output);
Output add1 = ops::Add(root.WithOpName("add1"), m0.output, m1.output);
Output add2 = ops::Add(root.WithOpName("add2"), add0, add1);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add2.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, OrOfAnd) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
ops::Switch sw_3 = CreateSwitch(root, "3");
Output add0 =
ops::Add(root.WithOpName("add0"), sw_0.output_false, sw_1.output_false);
Output add1 =
ops::Add(root.WithOpName("add1"), sw_2.output_false, sw_3.output_false);
ops::Merge m0(root.WithOpName("m0"), {add0, add1});
ops::Merge m1(root.WithOpName("m1"), {add0, add1});
Output add2 = ops::Add(root.WithOpName("add2"), m0.output, m1.output);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add2.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, AndOrDistributiveSimplified) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "A");
ops::Switch sw_1 = CreateSwitch(root, "B");
Output add0 =
ops::Add(root.WithOpName("and0"), sw_0.output_false, sw_1.output_true);
Output add1 =
ops::Add(root.WithOpName("and1"), sw_0.output_false, sw_1.output_false);
ops::Merge or2(root.WithOpName("or2"), {add0, add1});
Output add3 =
ops::Add(root.WithOpName("and3"), or2.output, sw_0.output_false);
ops::Merge or4(root.WithOpName("or4"), {add3, sw_0.output_true});
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(or4.output)], "#true");
}
TEST(DeadnessAnalysisTest, AndOrDistributive) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw_0 = CreateSwitch(root, "0");
ops::Switch sw_1 = CreateSwitch(root, "1");
ops::Switch sw_2 = CreateSwitch(root, "2");
ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false});
Output add0 = ops::Add(root.WithOpName("add0"), m0.output, sw_2.output_false);
Output add1 =
ops::Add(root.WithOpName("add1"), sw_0.output_false, sw_2.output_false);
Output add2 =
ops::Add(root.WithOpName("add2"), sw_1.output_false, sw_2.output_false);
ops::Merge m1(root.WithOpName("m1"), {add1, add2});
Output add3 = ops::Add(root.WithOpName("add3"), add0, m1.output);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add3.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, Ternary) {
Scope root = Scope::NewRootScope().ExitOnError();
Output predicate = ops::Placeholder(root.WithOpName("predicate"), DT_BOOL);
Output true_value = ops::Placeholder(root.WithOpName("true_value"), DT_FLOAT);
Output false_value =
ops::Placeholder(root.WithOpName("false_value"), DT_FLOAT);
ops::Switch predicated_true(root.WithOpName("predicated_true"), true_value,
predicate);
ops::Switch predicated_false(root.WithOpName("predicated_false"), true_value,
predicate);
ops::Merge merge(root.WithOpName("ternary"), {predicated_true.output_true,
predicated_false.output_false});
Output addend = ops::Placeholder(root.WithOpName("addend"), DT_FLOAT);
Output add = ops::Add(root.WithOpName("add"), merge.output, addend);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, Recv) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv_a = ops::_Recv(root.WithOpName("recv_a"), DT_FLOAT, "tensor_a",
"sender", 0, "receiver");
Output recv_b = ops::_Recv(root.WithOpName("recv_b"), DT_FLOAT, "tensor_b",
"sender", 0, "receiver");
Output add = ops::Add(root.WithOpName("add"), recv_a, recv_b);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, HostRecv) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv_a = ops::_HostRecv(root.WithOpName("recv_a"), DT_FLOAT,
"tensor_a", "sender", 0, "receiver");
Output recv_b = ops::_HostRecv(root.WithOpName("recv_b"), DT_FLOAT,
"tensor_b", "sender", 0, "receiver");
Output add = ops::Add(root.WithOpName("add"), recv_a, recv_b);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, Loop) {
Scope root = Scope::NewRootScope().ExitOnError();
Output iv0 = CreateInductionVariable(root, "iv0", "fr0", 0).induction_var;
Output iv1 = CreateInductionVariable(root, "iv1", "fr0", 0).induction_var;
Output iv2 = CreateInductionVariable(root, "iv2", "fr0", 1).induction_var;
Output add0 = ops::Add(root.WithOpName("add0"), iv0, iv1);
Output add1 = ops::Add(root.WithOpName("add1"), iv1, iv2);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
bool has_inputs_with_mismatching_deadness;
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
TF_ASSERT_OK_AND_ASSIGN(
has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add1.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(iv0)],
"{#true,&,*iv0/cond:0}<fr0>");
EXPECT_EQ(predicate_map[ControlOutputFor(iv1)],
"{#true,&,*iv1/cond:0}<fr0>");
EXPECT_EQ(predicate_map[ControlOutputFor(iv2)],
"{#true,&,*iv2/cond:0}<fr0>");
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
"({#true,&,*iv0/cond:0}<fr0> & {#true,&,*iv1/cond:0}<fr0>)");
EXPECT_EQ(predicate_map[ControlOutputFor(add1)],
"({#true,&,*iv1/cond:0}<fr0> & {#true,&,*iv2/cond:0}<fr0>)");
}
}
TEST(DeadnessAnalysisTest, ControlEquivalentLoopBodies) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv = CreateInductionVariable(root, "iv0", "loop", 0);
Output dependent_iv0 =
CreateDependentLoopInvariantValue(root, "div0", "loop", iv.loop_cond, 0)
.induction_var;
Output dependent_iv1 =
CreateDependentLoopInvariantValue(root, "div1", "loop", iv.loop_cond, 0)
.induction_var;
Output add0 = ops::Add(root.WithOpName("add0"), dependent_iv0, dependent_iv1);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
true));
EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv0)],
predicate_map[ControlOutputFor(iv.induction_var)]);
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv1)],
predicate_map[ControlOutputFor(iv.induction_var)]);
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
predicate_map[ControlOutputFor(iv.induction_var)]);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
false));
EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv0)],
"{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv1)],
"{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
"{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>");
}
}
TEST(DeadnessAnalysisTest, LoopInvariantPredicateOnBackedge) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv = CreateInductionVariable(root, "iv0", "frame", 0);
DependentInductionVar dependent_iv =
CreateDependentLoopInvariantValue(root, "div0", "frame", iv.loop_cond, 0);
FixupSourceAndSinkEdges(root.graph());
TF_ASSERT_OK(root.graph()->UpdateEdge(
iv.induction_var.node(), 0, dependent_iv.latch.output_true.node(), 0));
VLogGraphIfAsked(*root.graph());
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
true));
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv.induction_var)],
"{#true,&,*iv0/cond:0}<frame>");
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
false));
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv.induction_var)],
"div0/iv:0");
}
}
TEST(DeadnessAnalysisTest, ControlEquivalentNestedLoopBodies) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv_outer =
CreateInductionVariable(root, "iv_outer", "outer_loop", 0);
Output enter_constant_outer_loop = ops::internal::Enter(
root.WithOpName("constant_enter_outer_loop"),
ops::Const(root.WithOpName("constant"), 5), "outer_loop",
ops::internal::Enter::Attrs().IsConstant(true));
ops::Switch inner_value(root.WithOpName("outer_is_live"),
enter_constant_outer_loop, iv_outer.loop_cond);
InductionVarInfo iv_inner = CreateInductionVariable(
root, "iv_inner", "inner_loop", inner_value.output_true);
Output dependent_outer_iv0 =
CreateDependentLoopInvariantValue(root, "dependent_outer_iv0",
"outer_loop", iv_outer.loop_cond, 0)
.induction_var;
Output dependent_outer_iv1 =
CreateDependentLoopInvariantValue(root, "dependent_outer_iv1",
"outer_loop", iv_outer.loop_cond, 0)
.induction_var;
Output dependent_inner_iv0 = CreateDependentLoopInvariantValue(
root, "dependent_inner_iv0", "inner_loop",
iv_inner.loop_cond, dependent_outer_iv0)
.induction_var;
Output dependent_inner_iv1 = CreateDependentLoopInvariantValue(
root, "dependent_inner_iv1", "inner_loop",
iv_inner.loop_cond, dependent_outer_iv1)
.induction_var;
Output add0 = ops::Add(root.WithOpName("add0"), dependent_inner_iv0,
dependent_inner_iv1);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
true));
EXPECT_EQ(predicate_map[ControlOutputFor(iv_outer.induction_var)],
"{#true,&,*iv_outer/cond:0}<outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(iv_inner.induction_var)],
"{(*iv_outer/cond:0 & "
"{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/"
"cond:0}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv0)],
"{{#true,&,(iv_outer/iv:0 & "
"*iv_outer/cond:0)}<outer_loop>,&,(*iv_inner/cond:0 & "
"iv_inner/iv:0)}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv1)],
predicate_map[ControlOutputFor(dependent_inner_iv0)]);
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
predicate_map[ControlOutputFor(dependent_inner_iv0)]);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
false));
EXPECT_EQ(predicate_map[ControlOutputFor(iv_outer.induction_var)],
"{#true,&,*iv_outer/cond:0}<outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(iv_inner.induction_var)],
"{(*iv_outer/cond:0 & "
"{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/"
"cond:0}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv0)],
"{{#true,&,(iv_outer/iv:0 & "
"*iv_outer/cond:0)}<outer_loop>,&,(iv_inner/iv:0 & "
"*iv_inner/cond:0)}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv1)],
predicate_map[ControlOutputFor(dependent_inner_iv0)]);
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
predicate_map[ControlOutputFor(dependent_inner_iv0)]);
}
}
TEST(DeadnessAnalysisTest, ControlNonEquivalentNestedLoopBodies) {
Scope root = Scope::NewRootScope().ExitOnError();
std::array<Output, 2> outer_iv;
std::array<Output, 2> inner_iv;
for (int i : {0, 1}) {
InductionVarInfo iv_outer =
CreateInductionVariable(root, "iv_outer", "outer_loop", 0);
Output enter_constant_outer_loop = ops::internal::Enter(
root.WithOpName("constant_enter_outer_loop"),
ops::Const(root.WithOpName("constant"), 5), "outer_loop",
ops::internal::Enter::Attrs().IsConstant(true));
ops::Switch inner_value(root.WithOpName("outer_is_live"),
enter_constant_outer_loop, iv_outer.loop_cond);
InductionVarInfo iv_inner = CreateInductionVariable(
root, "iv_inner", "inner_loop", inner_value.output_true);
outer_iv[i] = iv_outer.induction_var;
inner_iv[i] = iv_inner.induction_var;
}
Output add0 = ops::Add(root.WithOpName("add0"), inner_iv[0], inner_iv[1]);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(outer_iv[0])],
"{#true,&,*iv_outer/cond:0}<outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(inner_iv[0])],
"{(*iv_outer/cond:0 & "
"{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/"
"cond:0}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(outer_iv[1])],
"{#true,&,*iv_outer/cond_1:0}<outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(inner_iv[1])],
"{(*iv_outer/cond_1:0 & "
"{#true,&,*iv_outer/cond_1:0}<outer_loop>),&,*iv_inner/"
"cond_1:0}<inner_loop;outer_loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(add0)],
"({(*iv_outer/cond:0 & "
"{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/"
"cond:0}<inner_loop;outer_loop> & {(*iv_outer/cond_1:0 & "
"{#true,&,*iv_outer/cond_1:0}<outer_loop>),&,*iv_inner/"
"cond_1:0}<inner_loop;outer_loop>)");
}
}
TEST(DeadnessAnalysisTest, NestedLoopBodiesWithACapture) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv_outer =
CreateInductionVariable(root, "iv_outer", "outer_loop", 0);
Output enter_constant_outer_loop = ops::internal::Enter(
root.WithOpName("constant_enter_outer_loop"),
ops::Const(root.WithOpName("constant"), 5), "outer_loop",
ops::internal::Enter::Attrs().IsConstant(true));
ops::Switch inner_value(root.WithOpName("outer_is_live"),
enter_constant_outer_loop, iv_outer.loop_cond);
InductionVarInfo iv_inner = CreateInductionVariable(
root, "iv_inner", "inner_loop", inner_value.output_true);
DependentInductionVar div0_outer = CreateDependentLoopInvariantValue(
root, "div0_outer", "outer_loop", iv_outer.loop_cond, 0);
DependentInductionVar div1_outer = CreateDependentLoopInvariantValue(
root, "div1_outer", "outer_loop", iv_outer.loop_cond, 0);
DependentInductionVar div0_inner = CreateDependentLoopInvariantValue(
root, "div0_inner", "inner_loop", iv_inner.loop_cond,
div0_outer.induction_var);
DependentInductionVar div1_inner = CreateDependentLoopInvariantValue(
root, "div1_inner", "inner_loop", iv_inner.loop_cond,
div1_outer.induction_var);
Output captured = ops::_Recv(root.WithOpName("captured"), DT_INT32,
"tensor_a", "sender", 0, "receiver");
Output capture_enter_outer = ops::internal::Enter(
root.WithOpName("capture_enter_outer"), captured, "outer_loop",
ops::internal::Enter::Attrs().IsConstant(true));
Output capture_enter_inner = ops::internal::Enter(
root.WithOpName("capture_enter_inner"), capture_enter_outer, "inner_loop",
ops::internal::Enter::Attrs().IsConstant(true));
Output mul0 = ops::Mul(root.WithOpName("mul0"), div1_inner.induction_var,
capture_enter_inner);
TF_ASSERT_OK(root.graph()->UpdateEdge(
mul0.node(), 0, div1_inner.latch.output_true.node(), 0));
Output add0 = ops::Add(root.WithOpName("add0"), div0_inner.induction_var,
div1_inner.induction_var);
VLogGraphIfAsked(*root.graph());
{
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add0.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
}
TEST(DeadnessAnalysisTest, CyclicRecurrence) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv = CreateInductionVariable(root, "iv0", "loop", 0);
DependentInductionVar div0 =
CreateDependentLoopInvariantValue(root, "div0", "loop", iv.loop_cond, 0);
DependentInductionVar div1 =
CreateDependentLoopInvariantValue(root, "div1", "loop", iv.loop_cond, 0);
FixupSourceAndSinkEdges(root.graph());
TF_ASSERT_OK(root.graph()->UpdateEdge(div1.induction_var.node(), 0,
div0.latch.output_true.node(), 0));
TF_ASSERT_OK(root.graph()->UpdateEdge(div0.induction_var.node(), 0,
div1.latch.output_true.node(), 0));
VLogGraphIfAsked(*root.graph());
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
true));
EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(div0.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(div1.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
TensorId switch_false_out = {div1.latch.output_false.node()->name(),
div1.latch.output_false.index()};
EXPECT_EQ(predicate_map[switch_false_out], "(#true)");
}
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map,
false));
EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)],
"{#true,&,*iv0/cond:0}<loop>");
EXPECT_EQ(predicate_map[ControlOutputFor(div0.induction_var)], "div0/iv:0");
EXPECT_EQ(predicate_map[ControlOutputFor(div1.induction_var)], "div1/iv:0");
}
}
TEST(DeadnessAnalysisTest, AndRecurrenceNeedsFrameName) {
Scope root = Scope::NewRootScope().ExitOnError();
InductionVarInfo iv_0 = CreateInductionVariable(root, "iv_0", "frame_0", 10);
InductionVarInfo iv_1 = CreateInductionVariable(root, "iv_1", "frame_1", 9);
Output init = CreateSwitch(root, "init").output_true;
Output step = CreateSwitch(root, "step").output_true;
std::array<Output, 2> exits;
std::array<Output, 2> next_iterations;
for (int i : {0, 1}) {
Output init_enter = ops::internal::Enter(
root.WithOpName(absl::StrCat("init_enter_frame_", i)), init,
absl::StrCat("frame_", i),
ops::internal::Enter::Attrs().IsConstant(true));
Output step_enter = ops::internal::Enter(
root.WithOpName(absl::StrCat("step_enter_frame_", i)), step,
absl::StrCat("frame_", i),
ops::internal::Enter::Attrs().IsConstant(true));
ops::Merge iv(root.WithOpName(absl::StrCat("expr_", i)),
{init_enter, init_enter});
Output add = ops::Add(root.WithOpName(absl::StrCat("add_", i)), iv.output,
step_enter);
next_iterations[i] = ops::NextIteration(
root.WithOpName(absl::StrCat("expr_", i, "_next_iteration")), add);
EXPECT_TRUE(
root.graph()
->UpdateEdge(next_iterations[i].node(), 0, iv.output.node(), 1)
.ok());
exits[i] = ops::internal::Exit(root.WithOpName(absl::StrCat("exit_", i)),
iv.output);
}
FixupSourceAndSinkEdges(root.graph());
{
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_NE(predicate_map[ControlOutputFor(exits[0])],
predicate_map[ControlOutputFor(exits[1])]);
EXPECT_NE(predicate_map[ControlOutputFor(exits[0])], "");
EXPECT_NE(predicate_map[ControlOutputFor(exits[1])], "");
EXPECT_NE(predicate_map[ControlOutputFor(next_iterations[0])],
predicate_map[ControlOutputFor(next_iterations[1])]);
EXPECT_NE(predicate_map[ControlOutputFor(next_iterations[0])], "");
EXPECT_NE(predicate_map[ControlOutputFor(next_iterations[1])], "");
}
}
TEST(DeadnessAnalysisTest, ControlInputs) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw = CreateSwitch(root, "0");
Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
Output const0 = ops::Const(root.WithOpName("const0"), 1);
Output const1 = ops::Const(root.WithOpName("const1"), 2);
Output add = ops::Add(root.WithOpName("add"), const0, const1);
root.graph()->AddControlEdge(id0.node(), const0.node());
root.graph()->AddControlEdge(id1.node(), const1.node());
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, ControlTrigger) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw = CreateSwitch(root, "0");
Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
ops::ControlTrigger ctrl_trigger0(root.WithOpName("ctrl_trigger0"));
ops::ControlTrigger ctrl_trigger1(root.WithOpName("ctrl_trigger1"));
Output const0 = ops::Const(root.WithOpName("const0"), 1);
Output const1 = ops::Const(root.WithOpName("const1"), 2);
Output add = ops::Add(root.WithOpName("add"), const0, const1);
root.graph()->AddControlEdge(id0.node(), ctrl_trigger0.operation.node());
root.graph()->AddControlEdge(ctrl_trigger0.operation.node(), const0.node());
root.graph()->AddControlEdge(id1.node(), ctrl_trigger1.operation.node());
root.graph()->AddControlEdge(ctrl_trigger1.operation.node(), const1.node());
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, ControlInputsToMerge) {
Scope root = Scope::NewRootScope().ExitOnError();
ops::Switch sw = CreateSwitch(root, "0");
Output id0 = ops::Identity(root.WithOpName("id0"), sw.output_false);
Output id1 = ops::Identity(root.WithOpName("id1"), sw.output_true);
Output constant = ops::Const(root.WithOpName("constant"), 5);
ops::Merge m0(root.WithOpName("m0"), {constant});
ops::Merge m1(root.WithOpName("m0"), {constant});
Output add = ops::Add(root.WithOpName("add"), m0.output, m1.output);
root.graph()->AddControlEdge(id0.node(), m0.output.node());
root.graph()->AddControlEdge(id1.node(), m1.output.node());
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *add.node()));
EXPECT_FALSE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, RecvVsSwitch) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv = ops::_Recv(root.WithOpName("recv"), DT_BOOL, "tensor", "sender",
0, "receiver");
Output value = ops::Placeholder(root.WithOpName("value"), DT_BOOL);
ops::Switch sw(root.WithOpName("switch"), value, recv);
Output logical_and =
ops::LogicalAnd(root.WithOpName("and"), recv, sw.output_true);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
TF_ASSERT_OK_AND_ASSIGN(
bool has_inputs_with_mismatching_deadness,
HasInputsWithMismatchingDeadness(*result, *logical_and.node()));
EXPECT_TRUE(has_inputs_with_mismatching_deadness);
}
TEST(DeadnessAnalysisTest, RecvVsSwitchText) {
Scope root = Scope::NewRootScope().ExitOnError();
Output recv = ops::_Recv(root.WithOpName("recv"), DT_BOOL, "tensor", "sender",
0, "receiver");
Output value = ops::Placeholder(root.WithOpName("value"), DT_BOOL);
ops::Switch sw(root.WithOpName("switch"), value, recv);
Output logical_and =
ops::LogicalAnd(root.WithOpName("and"), recv, sw.output_true);
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
TensorId logical_and_output_0 = {logical_and.node()->name(),
Graph::kControlSlot};
EXPECT_EQ(predicate_map[logical_and_output_0], "(recv:0 & *recv:0)");
}
TEST(DeadnessAnalysisTest, DeMorgan) {
Scope root = Scope::NewRootScope().ExitOnError();
Output cond_0 = ops::Placeholder(root.WithOpName("cond_0"), DT_BOOL);
Output cond_1 = ops::Placeholder(root.WithOpName("cond_1"), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch sw_0(root.WithOpName("switch_0"), value, cond_0);
ops::Switch sw_1(root.WithOpName("switch_1"), value, cond_1);
Output and_0_1 =
ops::Add(root.WithOpName("and_0_1"), sw_0.output_true, sw_1.output_true);
Output or_not0_not1 = ops::Merge(root.WithOpName("or_not0_not1"),
{sw_0.output_false, sw_1.output_false})
.output;
Output should_always_be_dead =
ops::Add(root.WithOpName("should_always_be_dead"), and_0_1, or_not0_not1);
Output should_always_be_alive =
ops::Merge(root.WithOpName("should_always_be_alive"),
{and_0_1, or_not0_not1})
.output;
std::unique_ptr<DeadnessAnalysis> result;
TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result));
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(should_always_be_dead)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(should_always_be_alive)], "#true");
}
TEST(DeadnessAnalysisTest, ConstantTrueSwitchCondition) {
Scope root = Scope::NewRootScope().ExitOnError();
Output constant_true = ops::Const(root.WithOpName("const_true"), true);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch sw(root.WithOpName("switch"), value, constant_true);
Output id_false = ops::Identity(root.WithOpName("id_false"), sw.output_false);
Output id_true = ops::Identity(root.WithOpName("id_true"), sw.output_true);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_false)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(id_true)], "#true");
}
TEST(DeadnessAnalysisTest, ConstantFalseSwitchCondition) {
Scope root = Scope::NewRootScope().ExitOnError();
Output constant_false = ops::Const(root.WithOpName("const_false"), false);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch sw(root.WithOpName("switch"), value, constant_false);
Output id_false = ops::Identity(root.WithOpName("id_false"), sw.output_false);
Output id_true = ops::Identity(root.WithOpName("id_true"), sw.output_true);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_false)], "#true");
EXPECT_EQ(predicate_map[ControlOutputFor(id_true)], "#false");
}
TEST(DeadnessAnalysisTest, RefBoolSwitchCondition) {
Scope root = Scope::NewRootScope().ExitOnError();
Output condition_ref_var =
ops::Variable(root.WithOpName("cond_ref"), TensorShape({}), DT_BOOL);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
ops::Switch sw(root.WithOpName("switch"), value, condition_ref_var);
Output id_false = ops::Identity(root.WithOpName("id_false"), sw.output_false);
Output id_true = ops::Identity(root.WithOpName("id_true"), sw.output_true);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_false)], "~*cond_ref:0");
EXPECT_EQ(predicate_map[ControlOutputFor(id_true)], "*cond_ref:0");
}
void CreateSwitchN(const Scope& scope, Input data, Input output_index,
int64_t num_outs, OutputList* outputs) {
if (!scope.ok()) return;
auto _data = ops::AsNodeOut(scope, data);
if (!scope.ok()) return;
auto _output_index = ops::AsNodeOut(scope, output_index);
if (!scope.ok()) return;
Node* ret;
const auto unique_name = scope.GetUniqueNameForOp("_SwitchN");
auto builder = NodeBuilder(unique_name, "_SwitchN")
.Input(_data)
.Input(_output_index)
.Attr("num_outs", num_outs);
scope.UpdateBuilder(&builder);
scope.UpdateStatus(builder.Finalize(scope.graph(), &ret));
if (!scope.ok()) return;
scope.UpdateStatus(scope.DoShapeInference(ret));
for (int32_t i = 0; i < ret->num_outputs(); ++i) {
outputs->push_back(Output(ret, i));
}
}
TEST(DeadnessAnalysisTest, Constant1_SwitchN_2Branches_DoesNotFail) {
Scope root = Scope::NewRootScope().ExitOnError();
Output constant_1 = ops::Const(root.WithOpName("const_1"), 1);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
OutputList outputs;
CreateSwitchN(root.WithOpName("switchn"), value, constant_1, 2, &outputs);
Output id_0 = ops::Identity(root.WithOpName("id_0"), outputs[0]);
Output id_1 = ops::Identity(root.WithOpName("id_1"), outputs[1]);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_0)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(id_1)], "#true");
}
TEST(DeadnessAnalysisTest, Constant7_SwitchN_3Branches) {
Scope root = Scope::NewRootScope().ExitOnError();
Output constant_7 = ops::Const(root.WithOpName("const_7"), 7);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
OutputList outputs;
CreateSwitchN(root.WithOpName("switchn"), value, constant_7, 3, &outputs);
Output id_0 = ops::Identity(root.WithOpName("id_0"), outputs[0]);
Output id_1 = ops::Identity(root.WithOpName("id_1"), outputs[1]);
Output id_2 = ops::Identity(root.WithOpName("id_2"), outputs[2]);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_0)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(id_1)], "#false");
EXPECT_EQ(predicate_map[ControlOutputFor(id_2)], "#true");
}
TEST(DeadnessAnalysisTest, RefInt_SwitchN_3Branches) {
Scope root = Scope::NewRootScope().ExitOnError();
Output condition_ref_var =
ops::Variable(root.WithOpName("bidx"), TensorShape({}), DT_INT32);
Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT);
OutputList outputs;
CreateSwitchN(root.WithOpName("switchn"), value, condition_ref_var, 3,
&outputs);
Output id_0 = ops::Identity(root.WithOpName("id_0"), outputs[0]);
Output id_1 = ops::Identity(root.WithOpName("id_1"), outputs[1]);
Output id_2 = ops::Identity(root.WithOpName("id_2"), outputs[2]);
FixupSourceAndSinkEdges(root.graph());
PredicateMapTy predicate_map;
TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
EXPECT_EQ(predicate_map[ControlOutputFor(id_0)], "bidx:0=0");
EXPECT_EQ(predicate_map[ControlOutputFor(id_1)], "(~bidx:0=0 & bidx:0=1)");
EXPECT_EQ(predicate_map[ControlOutputFor(id_2)], "(~bidx:0=0 & ~bidx:0=1)");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/deadness_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/deadness_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f88f8392-47f6-4a36-91b7-14325d137a41 | cpp | tensorflow/tensorflow | repeat_dataset_op | tensorflow/core/kernels/data/repeat_dataset_op.cc | tensorflow/core/kernels/data/repeat_dataset_op_test.cc | #include "tensorflow/core/kernels/data/repeat_dataset_op.h"
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/data/global_shuffle_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const RepeatDatasetOp::kDatasetType;
constexpr const char* const RepeatDatasetOp::kInputDataset;
constexpr const char* const RepeatDatasetOp::kCount;
constexpr const char* const RepeatDatasetOp::kOutputTypes;
constexpr const char* const RepeatDatasetOp::kOutputShapes;
namespace {
constexpr char kForeverRepeat[] = "ForeverRepeat";
constexpr char kEmptyRepeat[] = "EmptyRepeat";
constexpr char kFiniteRepeat[] = "FiniteRepeat";
constexpr char kCurIteration[] = "i";
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kUninitialized[] = "uninitialized";
constexpr int64_t kKnownRatio = 1;
std::string nested_prefix(const std::string& prefix, int64_t epoch) {
return strings::StrCat(prefix, "[", epoch, "]");
}
bool HasDataServiceInput(const DatasetBase* dataset) {
DCHECK(dataset != nullptr);
if (absl::StartsWith(dataset->type_string(), "DataServiceDataset")) {
return true;
}
std::vector<const DatasetBase*> inputs;
Status s = dataset->InputDatasets(&inputs);
if (!s.ok()) {
return false;
}
for (const DatasetBase* input : inputs) {
if (HasDataServiceInput(input)) {
return true;
}
}
return false;
}
class RepeatedSplitProvider : public SplitProvider {
public:
explicit RepeatedSplitProvider(std::unique_ptr<SplitProvider> split_provider,
int64_t count)
: split_provider_(std::move(split_provider)), count_(count) {}
int64_t Cardinality() const override {
if (split_provider_->Cardinality() == 0 || count_ == 0) {
return 0;
}
if (count_ < 0) {
return kInfiniteCardinality;
}
if (split_provider_->Cardinality() < 0) {
return split_provider_->Cardinality();
}
return split_provider_->Cardinality() * count_;
}
absl::Status GetNext(Tensor* split, bool* end_of_splits) override {
return split_provider_->GetNext(split, end_of_splits);
}
absl::Status Reset() override { return split_provider_->Reset(); }
absl::Status Save(std::function<std::string(std::string)> full_name,
IteratorStateWriter* writer) override {
return split_provider_->Save(full_name, writer);
}
absl::Status Restore(std::function<std::string(std::string)> full_name,
IteratorStateReader* reader) override {
return split_provider_->Restore(full_name, reader);
}
void Cancel() override { split_provider_->Cancel(); }
private:
const std::unique_ptr<SplitProvider> split_provider_;
const int64_t count_;
};
}
class RepeatDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, int64_t count, const DatasetBase* input)
: DatasetBase(DatasetContext(ctx)), count_(count), input_(input) {
input_->Ref();
if (input_ != nullptr && !input_->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input_->RandomIndexingCompatible();
} else if (count <= 0) {
random_indexing_compatible_ = absl::FailedPreconditionError(
absl::StrCat("`repeat(", count,
")` does not support random access of tf.data "
"datasets."));
} else {
random_indexing_compatible_ = absl::OkStatus();
}
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
if (count_ < 0) {
return std::make_unique<ForeverIterator>(ForeverIterator::Params{
this, name_utils::IteratorPrefix(kForeverRepeat, prefix)});
} else if (count_ == 0) {
return std::make_unique<EmptyIterator>(EmptyIterator::Params{
this, name_utils::IteratorPrefix(kEmptyRepeat, prefix)});
} else {
return std::make_unique<FiniteIterator>(FiniteIterator::Params{
this, name_utils::IteratorPrefix(kFiniteRepeat, prefix)});
}
}
absl::Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
std::vector<std::unique_ptr<SplitProvider>> input_split_providers;
TF_RETURN_IF_ERROR(input_->MakeSplitProviders(&input_split_providers));
split_providers->clear();
for (auto& split_provider : input_split_providers) {
split_providers->push_back(std::make_unique<RepeatedSplitProvider>(
std::move(split_provider), count_));
}
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(RepeatDatasetOp::kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t n = input_->Cardinality(options);
if (count_ < 0) {
if (n == 0) {
return 0;
}
return kInfiniteCardinality;
}
if (count_ == 0) {
return 0;
}
if (n == kInfiniteCardinality || n == kUnknownCardinality) {
return n;
}
return count_ * n;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
return input_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
return input_->Get(ctx, index % input_->Cardinality(), out_tensors);
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
Node* count = nullptr;
TF_RETURN_IF_ERROR(b->AddScalar(count_, &count));
TF_RETURN_IF_ERROR(b->AddDataset(this, {input_graph_node, count}, output));
return absl::OkStatus();
}
private:
class EmptyIterator : public DatasetIterator<Dataset> {
public:
explicit EmptyIterator(const Params& params)
: DatasetIterator<Dataset>(params) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
return absl::OkStatus();
}
};
class FiniteIterator : public DatasetIterator<Dataset> {
public:
explicit FiniteIterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
while (i_ < dataset()->count_) {
IteratorContextWithIndexMapper ctx_with_index_mapper(ctx, this);
TF_RETURN_IF_ERROR(input_impl_->GetNext(ctx_with_index_mapper.Get(),
out_tensors, end_of_sequence));
ctx_with_index_mapper.MergeCheckpoint();
if (!*end_of_sequence) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(nested_prefix(prefix(), i_));
++i_;
input_impl_.reset();
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
}
*end_of_sequence = true;
input_impl_.reset();
return absl::OkStatus();
}
IndexMapperFn GetIndexMapper(IndexMapperFn parent_index_mapper)
const override TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
int64_t input_cardinality = dataset()->input_->Cardinality();
int64_t repeat_count = i_;
return [parent_index_mapper, input_cardinality,
repeat_count](size_t element_position) -> absl::StatusOr<size_t> {
if (element_position >= input_cardinality) {
return absl::OutOfRangeError("Finite repeat is out of range");
}
size_t repeated_element_position =
repeat_count * input_cardinality + element_position;
TF_ASSIGN_OR_RETURN(size_t shuffled_element_position,
parent_index_mapper(repeated_element_position));
return shuffled_element_position % input_cardinality;
};
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIteration, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIteration, &i_));
if (ctx->restored_element_count().has_value()) {
CardinalityOptions options;
options.set_compute_level(
CardinalityOptions::CARDINALITY_COMPUTE_MODERATE);
const int64_t input_cardinality =
dataset()->input_->Cardinality(std::move(options));
IteratorContext::Params params(ctx);
params.restored_element_count =
*ctx->restored_element_count() % (input_cardinality);
params.index_mapper = GetIndexMapper(ctx->index_mapper());
IteratorContext ctx_with_restored_element_count(params);
if (!input_empty) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(&ctx_with_restored_element_count,
reader, input_impl_));
ctx->MergeCheckpoint(ctx_with_restored_element_count.checkpoint());
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
if (static_cast<bool>(!input_empty)) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
} else {
input_impl_.reset();
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
};
class ForeverIterator : public DatasetIterator<Dataset> {
public:
explicit ForeverIterator(const Params& params)
: DatasetIterator<Dataset>(params),
has_data_service_input_(HasDataServiceInput(dataset())),
input_impl_(nullptr),
i_(0),
first_call_(true) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
return dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
do {
if (!input_impl_) {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
DCHECK(!*end_of_sequence || out_tensors->empty());
if (first_call_ && *end_of_sequence && ctx->split_providers().empty()) {
if (!has_data_service_input_) {
input_impl_.reset();
return absl::OkStatus();
}
}
first_call_ = false;
if (!*end_of_sequence) {
return absl::OkStatus();
}
ctx->PurgeCheckpoint(nested_prefix(prefix(), i_));
++i_;
for (const auto& provider : ctx->split_providers()) {
TF_RETURN_IF_ERROR(provider->Reset());
}
input_impl_.reset();
first_call_ = true;
} while (true);
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
kKnownRatio);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kCurIteration, i_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kCurIteration, &i_));
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(input_empty)) {
input_impl_.reset();
first_call_ = true;
} else {
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
ctx, this, nested_prefix(prefix(), i_), &input_impl_));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
first_call_ = false;
}
return absl::OkStatus();
}
private:
const bool has_data_service_input_;
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t i_ TF_GUARDED_BY(mu_);
bool first_call_ TF_GUARDED_BY(mu_);
};
const int64_t count_;
const DatasetBase* const input_;
absl::Status random_indexing_compatible_;
};
RepeatDatasetOp::RepeatDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {}
void RepeatDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t count;
OP_REQUIRES_OK(ctx, ParseScalarArgument<int64_t>(ctx, kCount, &count));
*output = new Dataset(ctx, count, input);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("RepeatDataset").Device(DEVICE_CPU),
RepeatDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/repeat_dataset_op.h"
#include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "repeat_dataset";
class RepeatDatasetParams : public DatasetParams {
public:
template <typename T>
RepeatDatasetParams(T input_dataset_params, int64_t count,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
count_(count) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return {CreateTensor<int64_t>(TensorShape({}), {count_})};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back(RepeatDatasetOp::kInputDataset);
input_names->emplace_back(RepeatDatasetOp::kCount);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("output_types", output_dtypes_);
attr_vector->emplace_back("output_shapes", output_shapes_);
attr_vector->emplace_back("metadata", "");
return absl::OkStatus();
}
string dataset_type() const override { return RepeatDatasetOp::kDatasetType; }
private:
int64_t count_;
};
class RepeatDatasetOpTest : public DatasetOpsTestBase {};
RepeatDatasetParams FiniteRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {1, 2, 3, 4}),
CreateTensor<tstring>(TensorShape{2, 1}, {"a", "b"})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
2,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
RepeatDatasetParams EmptyRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {1, 2, 3, 4}),
CreateTensor<tstring>(TensorShape{2, 1}, {"a", "b"})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
0,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
RepeatDatasetParams ForeverRepeatDatasetParams() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 1}, {1, 2})},
"tensor_slice");
return RepeatDatasetParams(
std::move(tensor_slice_dataset_params),
-1,
{DT_INT64, DT_STRING},
{PartialTensorShape({2}), PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<RepeatDatasetParams>> GetNextTestCases() {
return {{FiniteRepeatDatasetParams(),
{CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"}),
CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"})}},
{EmptyRepeatDatasetParams(),
{}},
{
ForeverRepeatDatasetParams(),
{CreateTensor<int64_t>(TensorShape{1}, {1}),
CreateTensor<int64_t>(TensorShape{1}, {2})}}};
}
class ParameterizedIteratorGetNextOpTest
: public RepeatDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<RepeatDatasetParams>> {};
TEST_P(ParameterizedIteratorGetNextOpTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
auto expected_outputs_it = test_case.expected_outputs.begin();
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
if (dataset_->Cardinality() == kInfiniteCardinality) {
for (int i = 0; i < 100; ++i) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
for (const auto& tensor : out_tensors) {
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
if (expected_outputs_it == test_case.expected_outputs.end()) {
expected_outputs_it = test_case.expected_outputs.begin();
}
}
}
EXPECT_FALSE(end_of_sequence);
} else {
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (const auto& tensor : out_tensors) {
EXPECT_NE(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
}
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
}
INSTANTIATE_TEST_SUITE_P(RepeatDatasetOpTest,
ParameterizedIteratorGetNextOpTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(RepeatDatasetOpTest, DatasetNodeName) {
auto dataset_params = FiniteRepeatDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(RepeatDatasetOpTest, DatasetTypeString) {
auto dataset_params = FiniteRepeatDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(RepeatDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<RepeatDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{FiniteRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{EmptyRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{ForeverRepeatDatasetParams(),
{DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<RepeatDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FiniteRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{EmptyRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{ForeverRepeatDatasetParams(),
{PartialTensorShape({1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<RepeatDatasetParams>>
DatasetCardinalityTestCases() {
return {{FiniteRepeatDatasetParams(), 4},
{EmptyRepeatDatasetParams(), 0},
{ForeverRepeatDatasetParams(),
kInfiniteCardinality}};
}
DATASET_CARDINALITY_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
DatasetCardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<RepeatDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{FiniteRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{EmptyRepeatDatasetParams(),
{DT_INT64, DT_STRING}},
{ForeverRepeatDatasetParams(),
{DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<RepeatDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FiniteRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{EmptyRepeatDatasetParams(),
{PartialTensorShape({2}),
PartialTensorShape({1})}},
{ForeverRepeatDatasetParams(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorOutputShapesTestCases())
std::vector<IteratorPrefixTestCase<RepeatDatasetParams>>
IteratorPrefixTestCases() {
return {
{FiniteRepeatDatasetParams(),
name_utils::IteratorPrefix(
"FiniteRepeat", FiniteRepeatDatasetParams().iterator_prefix())},
{EmptyRepeatDatasetParams(),
name_utils::IteratorPrefix(
"EmptyRepeat", EmptyRepeatDatasetParams().iterator_prefix())},
{ForeverRepeatDatasetParams(),
name_utils::IteratorPrefix(
"ForeverRepeat", ForeverRepeatDatasetParams().iterator_prefix())}};
}
ITERATOR_PREFIX_TEST_P(RepeatDatasetOpTest, RepeatDatasetParams,
IteratorPrefixTestCases())
std::vector<IteratorSaveAndRestoreTestCase<RepeatDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FiniteRepeatDatasetParams(),
{0, 1, 3},
{CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"}),
CreateTensor<int64_t>(TensorShape{2}, {1, 2}),
CreateTensor<tstring>(TensorShape{1}, {"a"}),
CreateTensor<int64_t>(TensorShape{2}, {3, 4}),
CreateTensor<tstring>(TensorShape{1}, {"b"})}},
{EmptyRepeatDatasetParams(),
{0, 1, 3},
{}},
{
ForeverRepeatDatasetParams(),
{0, 1, 3},
{CreateTensor<int64_t>(TensorShape{1}, {1}),
CreateTensor<int64_t>(TensorShape{1}, {2})}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public RepeatDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<RepeatDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, Roundtrip) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
auto expected_outputs_it = test_case.expected_outputs.begin();
bool end_of_sequence = dataset_->Cardinality() == 0;
std::vector<Tensor> out_tensors;
int cur_iteration = 0;
std::vector<int> breakpoints = GetParam().breakpoints;
for (int breakpoint : breakpoints) {
VariantTensorDataWriter writer;
TF_EXPECT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
while (cur_iteration < breakpoint) {
out_tensors.clear();
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
for (auto& tensor : out_tensors) {
EXPECT_NE(expected_outputs_it, test_case.expected_outputs.end());
TF_EXPECT_OK(ExpectEqual(tensor, *expected_outputs_it));
expected_outputs_it++;
}
}
cur_iteration++;
if (dataset_->Cardinality() == kInfiniteCardinality &&
expected_outputs_it == test_case.expected_outputs.end()) {
expected_outputs_it = test_case.expected_outputs.begin();
}
}
if (breakpoint >= dataset_->Cardinality()) {
if (dataset_->Cardinality() == kInfiniteCardinality) {
EXPECT_FALSE(end_of_sequence);
} else {
EXPECT_TRUE(end_of_sequence);
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
} else {
EXPECT_FALSE(end_of_sequence);
}
}
}
INSTANTIATE_TEST_SUITE_P(
RepeatDatasetOpTest, ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/repeat_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/repeat_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd8402fb-b272-4b3d-8727-5a2fb5c803c3 | cpp | tensorflow/tensorflow | stablehlo_and | tensorflow/lite/kernels/stablehlo_and.cc | tensorflow/lite/kernels/stablehlo_and_test.cc | #include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_elementwise.h"
namespace tflite::ops::builtin {
TfLiteRegistration* Register_STABLEHLO_AND() {
static TfLiteRegistration r = {nullptr, nullptr, ElementwisePrepare,
ElementwiseEval<ComputationType::kAnd>};
return &r;
}
} | #include <cstdint>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAre;
class AndOpModel : public SingleOpModel {
public:
AndOpModel(const TensorData& input1, const TensorData& input2,
const TensorData& output) {
input1_ = AddInput(input1);
input2_ = AddInput(input2);
output_ = AddOutput(output);
SetBuiltinOp(BuiltinOperator_STABLEHLO_AND, BuiltinOptions_NONE, 0);
SetBypassDefaultDelegates();
BuildInterpreter({GetShape(input1_), GetShape(input2_)});
}
int input1() { return input1_; }
int input2() { return input2_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
protected:
int input1_;
int input2_;
int output_;
};
TEST(StablehloElementwise, AndInt32) {
AndOpModel model({TensorType_INT32, {1, 2, 2, 1}},
{TensorType_INT32, {1, 2, 2, 1}}, {TensorType_INT32, {}});
model.PopulateTensor<int32_t>(model.input1(), {2, 3, 7, 8});
model.PopulateTensor<int32_t>(model.input2(), {4, 5, 7, 1});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAre(0, 1, 7, 0));
}
TEST(StablehloElementwise, AndInt8) {
AndOpModel model({TensorType_INT8, {1, 3, 1}}, {TensorType_INT8, {1, 3, 1}},
{TensorType_INT8, {}});
model.PopulateTensor<int8_t>(model.input1(), {7, -8, -8});
model.PopulateTensor<int8_t>(model.input2(), {0, 7, -8});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int8_t>(), ElementsAre(0, 0, -8));
}
TEST(StablehloElementwise, AndInt16) {
AndOpModel model({TensorType_INT16, {1, 1, 3}}, {TensorType_INT16, {1, 1, 3}},
{TensorType_INT16, {}});
model.PopulateTensor<int16_t>(model.input1(), {32767, -32768, -32768});
model.PopulateTensor<int16_t>(model.input2(), {32767, -32768, -32768});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAre(32767, -32768, -32768));
}
TEST(StablehloElementwise, AndBool) {
AndOpModel model({TensorType_BOOL, {2, 1, 2, 1}},
{TensorType_BOOL, {2, 1, 2, 1}}, {TensorType_BOOL, {}});
model.PopulateTensor<bool>(model.input1(), {false, false, true, true});
model.PopulateTensor<bool>(model.input2(), {false, true, false, true});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<bool>(), ElementsAre(false, false, false, true));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_and.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_and_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
0dfe0675-3405-4e60-9aaa-06befc7c3acc | cpp | tensorflow/tensorflow | interpreter_utils | tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc | tensorflow/lite/delegates/interpreter_utils_test.cc | #include "tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.h"
#include <cstring>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/interpreter_builder.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace gpu {
namespace testing {
absl::Status InterpreterInvokeWithOpResolver(
const ::tflite::Model* model, TfLiteDelegate* delegate,
const OpResolver& op_resolver, const std::vector<TensorFloat32>& inputs,
std::vector<TensorFloat32>* outputs) {
auto interpreter = std::make_unique<Interpreter>();
if (InterpreterBuilder(model, op_resolver)(&interpreter) != kTfLiteOk) {
return absl::InternalError("Unable to create TfLite InterpreterBuilder");
}
if (delegate && interpreter->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
return absl::InternalError(
"Unable to modify TfLite graph with the delegate");
}
interpreter->SetNumThreads(1);
if (interpreter->AllocateTensors() != kTfLiteOk) {
return absl::InternalError("Unable to allocate TfLite tensors");
}
for (int i = 0; i < inputs.size(); ++i) {
if (interpreter->tensor(interpreter->inputs()[i])->type != kTfLiteFloat32) {
return absl::InternalError("input data_type is not float32");
}
float* tflite_data =
interpreter->typed_tensor<float>(interpreter->inputs()[i]);
if (inputs[i].data.size() * sizeof(float) >
interpreter->tensor(interpreter->inputs()[i])->bytes) {
return absl::InternalError("too big input data");
}
std::memcpy(tflite_data, inputs[i].data.data(),
inputs[i].data.size() * sizeof(float));
}
if (interpreter->Invoke() != kTfLiteOk) {
return absl::InternalError("Unable to invoke TfLite interpreter");
}
if (!outputs || !outputs->empty()) {
return absl::InternalError("Invalid outputs pointer");
}
outputs->reserve(interpreter->outputs().size());
for (auto t : interpreter->outputs()) {
const TfLiteTensor* out_tensor = interpreter->tensor(t);
TensorFloat32 bhwc;
bhwc.id = t;
if (out_tensor->dims->data[0] != 1) {
return absl::InternalError("Batch dimension is expected to be 1");
}
bhwc.shape.b = out_tensor->dims->data[0];
switch (out_tensor->dims->size) {
case 2:
bhwc.shape.h = 1;
bhwc.shape.w = 1;
bhwc.shape.c = out_tensor->dims->data[1];
break;
case 3:
bhwc.shape.h = 1;
bhwc.shape.w = out_tensor->dims->data[1];
bhwc.shape.c = out_tensor->dims->data[2];
break;
case 4:
bhwc.shape.h = out_tensor->dims->data[1];
bhwc.shape.w = out_tensor->dims->data[2];
bhwc.shape.c = out_tensor->dims->data[3];
break;
default:
return absl::InternalError("Unsupported dimensions size " +
std::to_string(out_tensor->dims->size));
}
bhwc.data = std::vector<float>(
out_tensor->data.f,
out_tensor->data.f + out_tensor->bytes / sizeof(float));
outputs->push_back(bhwc);
}
return absl::OkStatus();
}
absl::Status InterpreterInvoke(const ::tflite::Model* model,
TfLiteDelegate* delegate,
const std::vector<TensorFloat32>& inputs,
std::vector<TensorFloat32>* outputs) {
ops::builtin::BuiltinOpResolver builtin_op_resolver;
return InterpreterInvokeWithOpResolver(model, delegate, builtin_op_resolver,
inputs, outputs);
}
}
}
} | #include "tensorflow/lite/delegates/interpreter_utils.h"
#include <string.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/delegate_test_util.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace delegates {
using test_utils::SimpleDelegate;
using test_utils::TestDelegate;
using test_utils::TestFP16Delegation;
namespace {
TEST_F(TestDelegate, DelegateNodeInvokeFailureFallback) {
delegate_ = std::unique_ptr<SimpleDelegate>(new SimpleDelegate(
{0, 1, 2}, kTfLiteDelegateFlagsNone, false ,
0 , true ));
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 1);
std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<float> expected_output = {2.0f, 4.0f, 6.0f};
constexpr int kOutputTensorIndex = 3;
memcpy(interpreter_->typed_tensor<float>(0), input.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), 3 * sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
TfLiteTensor* tensor = interpreter_->tensor(kOutputTensorIndex);
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(tensor->data.f[i], expected_output[i]) << i;
}
}
TEST_F(TestDelegate, TestFallbackWithMultipleDelegates) {
delegate_ = std::unique_ptr<SimpleDelegate>(
new SimpleDelegate({0}, kTfLiteDelegateFlagsAllowDynamicTensors));
delegate2_ = std::unique_ptr<SimpleDelegate>(new SimpleDelegate(
{1, 2}, kTfLiteDelegateFlagsNone, false ,
0 , true ));
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate2_->get_tf_lite_delegate()),
kTfLiteOk);
ASSERT_EQ(interpreter_->execution_plan().size(), 2);
std::vector<float> input = {1.0f, 2.0f, 3.0f};
std::vector<float> expected_output = {2.0f, 4.0f, 6.0f};
constexpr int kOutputTensorIndex = 2;
TfLiteTensor* tensor = interpreter_->tensor(kOutputTensorIndex);
memcpy(interpreter_->typed_tensor<float>(0), input.data(), 3 * sizeof(float));
memcpy(interpreter_->typed_tensor<float>(1), input.data(), 3 * sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
EXPECT_EQ(interpreter_->execution_plan().size(), 3);
for (int i = 0; i < 3; ++i) {
EXPECT_EQ(tensor->data.f[i], expected_output[i]) << i;
}
}
TEST_P(TestFP16Delegation, DelegateInvokeWithCPUFallback) {
delegate_ = std::make_unique<FP16Delegate>(
GetParam(), false,
true);
ASSERT_EQ(
interpreter_->ModifyGraphWithDelegate(delegate_->get_tf_lite_delegate()),
kTfLiteOk);
std::vector<float> input = {3.0f};
std::vector<float> expected_output = {16.0f};
const int input_tensor_idx = interpreter_->inputs()[0];
const int output_tensor_idx = interpreter_->outputs()[0];
memcpy(interpreter_->typed_tensor<float>(input_tensor_idx), input.data(),
sizeof(float));
EXPECT_EQ(
delegates::InterpreterUtils::InvokeWithCPUFallback(interpreter_.get()),
kTfLiteDelegateError);
TfLiteTensor* output_tensor = interpreter_->tensor(output_tensor_idx);
for (int i = 0; i < 1; ++i) {
EXPECT_EQ(output_tensor->data.f[i], expected_output[i]) << i;
}
ASSERT_EQ(interpreter_->execution_plan().size(), 8);
VerifyInvoke();
}
INSTANTIATE_TEST_SUITE_P(TestFP16Delegation, TestFP16Delegation,
::testing::Values(1, 2));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/testing/interpreter_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/interpreter_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ecbc942e-789a-4c27-8270-279e04a643e2 | cpp | tensorflow/tensorflow | bcast | tensorflow/core/util/bcast.cc | tensorflow/core/util/bcast_test.cc | #include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/platform/logging.h"
namespace tensorflow {
BCast::Vec BCast::FromShape(const TensorShape& shape) {
const int N = shape.dims();
BCastList::Vec ret(N);
for (int i = 0; i < N; ++i) {
ret[i] = shape.dim_size(i);
}
return ret;
}
TensorShape BCast::ToShape(const BCastList::Vec& vec) {
TensorShape shape(vec);
return shape;
}
} | #include "tensorflow/core/util/bcast.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
namespace {
string BCast(const tensorflow::BCast::Vec& x, const tensorflow::BCast::Vec& y,
const bool fewer_dims_optimization = true) {
tensorflow::BCast b(x, y, fewer_dims_optimization);
if (!b.IsValid()) {
return "invalid";
}
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_reshape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_bcast(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_reshape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_bcast(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.result_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.output_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_x_reduce_idx(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_y_reduce_idx(), ","), "]");
return ret;
}
string BCastBatchIndices(const tensorflow::BCast::Vec& x,
const tensorflow::BCast::Vec& y,
const bool fewer_dims_optimization = true) {
tensorflow::BCast b(x, y, fewer_dims_optimization,
true);
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.x_batch_indices(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.y_batch_indices(), ","), "]");
return ret;
}
string BCastList3(const tensorflow::BCast::Vec& x,
const tensorflow::BCast::Vec& y,
const tensorflow::BCast::Vec& z,
const bool fewer_dims_optimization = true) {
tensorflow::BCastList<3> b({x, y, z}, fewer_dims_optimization);
if (!b.IsValid()) {
return "invalid";
}
string ret;
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.reshape(2), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.bcast(2), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.result_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.output_shape(), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(0), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(1), ","), "]");
strings::StrAppend(&ret, "[", absl::StrJoin(b.grad_reduce_idx(2), ","), "]");
return ret;
}
TEST(BCastTest, Invalid) {
for (const bool use_optimization : {true, false}) {
EXPECT_EQ("invalid", BCast({5, 3, 2}, {3}, use_optimization));
EXPECT_EQ("invalid", BCast({5, 3, 2}, {2, 2}, use_optimization));
EXPECT_EQ("invalid", BCast({5, 3, 2}, {10, 1, 1}, use_optimization));
EXPECT_EQ("invalid",
BCast({1, 2, 1, 2, 1, 2}, {2, 4, 2, 1, 2, 1}, use_optimization));
}
}
TEST(BCastListTest, Invalid) {
for (const bool use_optimization : {true, false}) {
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {3}, {1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {2, 2}, {1}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({5, 3, 2}, {10, 1, 1}, {1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1, 2, 1, 2, 1, 2}, {2, 4, 2, 1, 2, 1}, {1},
use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {1}, {3}, use_optimization));
EXPECT_EQ("invalid", BCastList3({5, 3, 2}, {1}, {2, 2}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({5, 3, 2}, {1}, {10, 1, 1}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1}, {5, 3, 2}, {3}, use_optimization));
EXPECT_EQ("invalid", BCastList3({1}, {5, 3, 2}, {2, 2}, use_optimization));
EXPECT_EQ("invalid",
BCastList3({1}, {5, 3, 2}, {10, 1, 1}, use_optimization));
}
}
TEST(BCastTest, Basic_SameShape) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}),
"[2310][1][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[][]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][]");
}
TEST(BCastListTest, Basic_SameShape) {
EXPECT_EQ(BCastList3({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}),
"[2310][1][2310][1][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[][][]");
EXPECT_EQ(
BCastList3({11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, {11, 7, 5, 3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][][]");
}
TEST(BCastTest, Basic_SameShapeWithZeroDim) {
EXPECT_EQ(BCast({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}),
"[0][1][0][1]"
"[0]"
"[11,7,0,3,2]"
"[][]");
EXPECT_EQ(BCast({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, false),
"[11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1]"
"[11,7,0,3,2]"
"[11,7,0,3,2]"
"[][]");
}
TEST(BCastListTest, Basic_SameShapeWithZeroDim) {
EXPECT_EQ(BCastList3({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}),
"[0][1][0][1][0][1]"
"[0]"
"[11,7,0,3,2]"
"[][][]");
EXPECT_EQ(
BCastList3({11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, {11, 7, 0, 3, 2}, false),
"[11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1][11,7,0,3,2][1,1,1,1,1]"
"[11,7,0,3,2]"
"[11,7,0,3,2]"
"[][][]");
}
TEST(BCastTest, Basic_Scalar_Scalar) {
EXPECT_EQ(BCast({1, 1}, {}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {1, 1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {1, 1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
}
TEST(BCastTest, Basic_TrueScalar_Scalar) {
EXPECT_EQ(BCast({}, {}),
"[1][1][1][1]"
"[1]"
"[]"
"[][]");
EXPECT_EQ(BCast({}, {1}),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({}, {1}, false),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({}, {1, 1}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({}, {1, 1}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1}, {}),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({1}, {}, false),
"[1][1][1][1]"
"[1]"
"[1]"
"[0][0]");
EXPECT_EQ(BCast({1, 1}, {}),
"[1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1]");
EXPECT_EQ(BCast({1, 1}, {}, false),
"[1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1]");
}
TEST(BCastListTest, Basic_Scalar_Scalar_Scalar) {
EXPECT_EQ(BCastList3({1, 1}, {1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1, 1}, {1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1, 1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1}, {1, 1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {1}, {1, 1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
}
TEST(BCastListTest, Basic_TrueScalar_Scalar_Scalar) {
EXPECT_EQ(BCastList3({1, 1}, {1}, {}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1, 1}, {1}, {}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({}, {1, 1}, {1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({}, {1, 1}, {1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {}, {1, 1}),
"[1][1][1][1][1][1]"
"[1]"
"[1,1]"
"[0,1][0,1][0,1]");
EXPECT_EQ(BCastList3({1}, {}, {1, 1}, false),
"[1,1][1,1][1,1][1,1][1,1][1,1]"
"[1,1]"
"[1,1]"
"[0,1][0,1][0,1]");
}
TEST(BCastTest, Basic_Tensor_Scalar) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,3,2]"
"[][0,1,2,3,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,1,1][11,7,5,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,3,4]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,3,2]"
"[0,1,2,3,4][]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2}, false),
"[1,1,1,1,1][11,7,5,3,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,3,4][]");
EXPECT_EQ(BCast({1, 2147483648}, {1}),
"[2147483648][1][1][2147483648]"
"[2147483648]"
"[1,2147483648]"
"[0][0,1]");
}
TEST(BCastTest, Basic_Tensor_With_DimSize_1_Scalar) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2, 1}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,3,2,1]"
"[5][0,1,2,3,4,5]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2, 1}, {1}, false),
"[11,7,5,3,2,1][1,1,1,1,1,1][1,1,1,1,1,1][11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[5][0,1,2,3,4,5]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2, 1}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,3,2,1]"
"[0,1,2,3,4,5][5]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 3, 2, 1}, false),
"[1,1,1,1,1,1][11,7,5,3,2,1][11,7,5,3,2,1][1,1,1,1,1,1]"
"[11,7,5,3,2,1]"
"[11,7,5,3,2,1]"
"[0,1,2,3,4,5][5]");
EXPECT_EQ(BCast({11, 7, 5, 1, 1, 3, 2, 1, 1}, {1}),
"[2310][1][1][2310]"
"[2310]"
"[11,7,5,1,1,3,2,1,1]"
"[3,4,7,8][0,1,2,3,4,5,6,7,8]");
EXPECT_EQ(BCast({11, 7, 5, 1, 1, 3, 2, 1, 1}, {1}, false),
"[11,7,5,1,1,3,2,1,1][1,1,1,1,1,1,1,1,1]"
"[1,1,1,1,1,1,1,1,1][11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[3,4,7,8][0,1,2,3,4,5,6,7,8]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 1, 1, 3, 2, 1, 1}),
"[1][2310][2310][1]"
"[2310]"
"[11,7,5,1,1,3,2,1,1]"
"[0,1,2,3,4,5,6,7,8][3,4,7,8]");
EXPECT_EQ(BCast({1}, {11, 7, 5, 1, 1, 3, 2, 1, 1}, false),
"[1,1,1,1,1,1,1,1,1][11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1][1,1,1,1,1,1,1,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[11,7,5,1,1,3,2,1,1]"
"[0,1,2,3,4,5,6,7,8][3,4,7,8]");
}
TEST(BCastTest, Basic_Tensor_Vector) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {2}),
"[1155,2][1,1][1,2][1155,1]"
"[1155,2]"
"[11,7,5,3,2]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {2}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,1,2][11,7,5,3,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({2}, {11, 7, 5, 3, 2}),
"[1,2][1155,1][1155,2][1,1]"
"[1155,2]"
"[11,7,5,3,2]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2}, {11, 7, 5, 3, 2}, false),
"[1,1,1,1,2][11,7,5,3,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,3][]");
}
TEST(BCastTest, Basic_Tensor_Matrix) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 2}),
"[385,6][1,1][1,6][385,1]"
"[385,6]"
"[11,7,5,3,2]"
"[][0,1,2]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 2}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,3,2][11,7,5,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2]");
EXPECT_EQ(BCast({3, 2}, {11, 7, 5, 3, 2}),
"[1,6][385,1][385,6][1,1]"
"[385,6]"
"[11,7,5,3,2]"
"[0,1,2][]");
EXPECT_EQ(BCast({3, 2}, {11, 7, 5, 3, 2}, false),
"[1,1,1,3,2][11,7,5,1,1][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2][]");
}
TEST(BCastTest, Basic_Tensor_Matrix_Column) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 1}),
"[385,3,2][1,1,1][1,3,1][385,1,2]"
"[385,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {3, 1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,1,1,3,1][11,7,5,1,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,1,2,4]");
EXPECT_EQ(BCast({3, 1}, {11, 7, 5, 3, 2}),
"[1,3,1][385,1,2][385,3,2][1,1,1]"
"[385,3,2]"
"[11,7,5,3,2]"
"[0,1,2,4][]");
EXPECT_EQ(BCast({3, 1}, {11, 7, 5, 3, 2}, false),
"[1,1,1,3,1][11,7,5,1,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[0,1,2,4][]");
}
TEST(BCastTest, Basic_Tensor_Matrix_As_Tensor) {
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {7, 5, 1, 1}),
"[11,35,6][1,1,1][1,35,1][11,1,6]"
"[11,35,6]"
"[11,7,5,3,2]"
"[][0,3,4]");
EXPECT_EQ(BCast({11, 7, 5, 3, 2}, {7, 5, 1, 1}, false),
"[11,7,5,3,2][1,1,1,1,1][1,7,5,1,1][11,1,1,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[][0,3,4]");
EXPECT_EQ(BCast({7, 5, 1, 1}, {11, 7, 5, 3, 2}),
"[1,35,1][11,1,6][11,35,6][1,1,1]"
"[11,35,6]"
"[11,7,5,3,2]"
"[0,3,4][]");
EXPECT_EQ(BCast({7, 5, 1, 1}, {11, 7, 5, 3, 2}, false),
"[1,7,5,1,1][11,1,1,3,2][11,7,5,3,2][1,1,1,1,1]"
"[11,7,5,3,2][11,7,5,3,2]"
"[0,3,4][]");
}
TEST(BCastTest, Basic_SymbolicShape) {
constexpr int64_t kSymDim1 = -10'000'000'000;
constexpr int64_t kSymDim2 = -10'000'000'001;
const tensorflow::BCast bcast({10, kSymDim1, kSymDim2}, {10, 1, 1}, false);
EXPECT_TRUE(bcast.IsValid());
EXPECT_EQ(bcast.output_batch_size(), -1);
}
TEST(BCastTest, Complex_BCast_To_Each_Other) {
string truth =
"[11,1,5,1,2][1,7,1,3,1][1,7,1,3,1][11,1,5,1,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[1,3][0,2,4]";
EXPECT_EQ(BCast({11, 1, 5, 1, 2}, {7, 1, 3, 1}), truth);
EXPECT_EQ(BCast({11, 1, 5, 1, 2}, {7, 1, 3, 1}, false), truth);
}
TEST(BCastListTest, Complex_BCast_To_Each_Other) {
string truth =
"[11,1,1,1,2][1,7,5,3,1]"
"[1,7,1,3,1][11,1,5,1,2]"
"[1,1,5,1,1][11,7,1,3,2]"
"[11,7,5,3,2]"
"[11,7,5,3,2]"
"[1,2,3][0,2,4][0,1,3,4]";
EXPECT_EQ(BCastList3({11, 1, 1, 1, 2}, {7, 1, 3, 1}, {5, 1, 1}), truth);
EXPECT_EQ(BCastList3({11, 1, 1, 1, 2}, {7, 1, 3, 1}, {5, 1, 1}, false),
truth);
}
TEST(BCastTest, TestZeroDimensionShape) {
EXPECT_EQ(BCast({2, 0, 5}, {5}),
"[0,5][1,1][1,5][0,1]"
"[0,5]"
"[2,0,5]"
"[][0,1]");
EXPECT_EQ(BCast({5}, {2, 0, 5}),
"[1,5][0,1][0,5][1,1]"
"[0,5]"
"[2,0,5]"
"[0,1][]");
EXPECT_EQ(BCast({2, 0, 5}, {5}, false),
"[2,0,5][1,1,1][1,1,5][2,0,1]"
"[2,0,5]"
"[2,0,5]"
"[][0,1]");
EXPECT_EQ(BCast({5}, {2, 0, 5}, false),
"[1,1,5][2,0,1][2,0,5][1,1,1]"
"[2,0,5]"
"[2,0,5]"
"[0,1][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {5}),
"[0,5][1,1][1,5][0,1]"
"[0,5]"
"[2,0,3,0,5]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({5}, {2, 0, 3, 0, 5}),
"[1,5][0,1][0,5][1,1]"
"[0,5]"
"[2,0,3,0,5]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {5}, false),
"[2,0,3,0,5][1,1,1,1,1][1,1,1,1,5][2,0,3,0,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,2,3]");
EXPECT_EQ(BCast({5}, {2, 0, 3, 0, 5}, false),
"[1,1,1,1,5][2,0,3,0,1][2,0,3,0,5][1,1,1,1,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,2,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {3, 1, 5}),
"[0,3,0,5][1,1,1,1][1,3,1,5][0,1,0,1]"
"[0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,3]");
EXPECT_EQ(BCast({3, 1, 5}, {2, 0, 3, 0, 5}),
"[1,3,1,5][0,1,0,1][0,3,0,5][1,1,1,1]"
"[0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,3][]");
EXPECT_EQ(BCast({2, 0, 3, 0, 5}, {3, 1, 5}, false),
"[2,0,3,0,5][1,1,1,1,1][1,1,3,1,5][2,0,1,0,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[][0,1,3]");
EXPECT_EQ(BCast({3, 1, 5}, {2, 0, 3, 0, 5}, false),
"[1,1,3,1,5][2,0,1,0,1][2,0,3,0,5][1,1,1,1,1]"
"[2,0,3,0,5]"
"[2,0,3,0,5]"
"[0,1,3][]");
}
TEST(BCastTest, BatchIndices) {
EXPECT_EQ("[0,0,0,0][0,1,2,3]", BCastBatchIndices({1}, {4}));
EXPECT_EQ("[][]", BCastBatchIndices({5}, {7}));
EXPECT_EQ("[][]", BCastBatchIndices({2, 4, 6}, {2, 4, 6}));
EXPECT_EQ("[0,0,0,0,1,1,1,1,2,2,2,2][0,1,2,3,0,1,2,3,0,1,2,3]",
BCastBatchIndices({3, 1}, {1, 4}));
EXPECT_EQ("[0,0,1,1,2,2,0,0,1,1,2,2][0,1,0,1,0,1,2,3,2,3,2,3]",
BCastBatchIndices({3, 1}, {2, 1, 2}));
}
void BM_BCastSetup(::testing::benchmark::State& state) {
const int same_shape = state.range(0);
if (same_shape) {
state.SetLabel("same_shapes");
for (auto s : state) {
class BCast b({1000, 100}, {1000, 100});
}
} else {
state.SetLabel("different_shapes");
for (auto s : state) {
class BCast b({3, 1, 5}, {2, 0, 3, 0, 5});
}
}
}
BENCHMARK(BM_BCastSetup)->Arg(0)->Arg(1);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/bcast.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/bcast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2dc24273-ac8a-4cd0-ae3e-f318b528f463 | cpp | tensorflow/tensorflow | mkl_fused_batch_norm_op | tensorflow/core/kernels/mkl/mkl_fused_batch_norm_op.cc | tensorflow/core/kernels/mkl/mkl_fused_batch_norm_op_test.cc | #ifdef INTEL_MKL
#include "unsupported/Eigen/CXX11/Tensor"
#include "dnnl.hpp"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/kernels/fused_batch_norm_op.h"
#include "tensorflow/core/kernels/no_op.h"
#include "tensorflow/core/util/mkl_util.h"
#include "tensorflow/core/util/tensor_format.h"
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
#include "tensorflow/core/platform/mutex.h"
#endif
#define GET_FLAG(bn_flag) static_cast<int>(dnnl::normalization_flags::bn_flag)
#define IS_SET(cflag) (context_.flags & GET_FLAG(cflag))
using dnnl::batch_normalization_backward;
using dnnl::batch_normalization_forward;
using dnnl::prop_kind;
using dnnl::stream;
using BatchNormFwdPd = dnnl::batch_normalization_forward::primitive_desc;
using BatchNormBwdPd = dnnl::batch_normalization_backward::primitive_desc;
namespace tensorflow {
#ifndef ENABLE_ONEDNN_V3
#define FORWARD_INFERENCE prop_kind::forward_scoring
#define GET_DIFF_SCALE_DATA_BUFFER diff_scale_shift_data
#define GET_DIFF_SCALE_SHIFT_DATA_BUFFERS diff_scale_shift_data
#define GET_DIFF_SHIFT_DATA_BUFFER diff_scale_shift_data + depth_
#define GET_SCALE_AND_SHIFT_FLAGS GET_FLAG(use_scale_shift)
#define GET_SCALE_DATA_BUFFER scale_shift_data
#define IS_SCALE_AND_SHIFT_FLAG_SET IS_SET(use_scale_shift)
#define SCALE_SHIFT_NET_ARGS \
{ DNNL_ARG_SCALE_SHIFT, *context_.scale_shift_mem }
#define SET_MKL_LAYOUT(md) SetMklLayout(&md)
#else
#define FORWARD_INFERENCE prop_kind::forward_inference
#define GET_DIFF_SCALE_DATA_BUFFER diff_scale_data
#define GET_DIFF_SCALE_SHIFT_DATA_BUFFERS diff_scale_data, diff_shift_data
#define GET_DIFF_SHIFT_DATA_BUFFER diff_shift_data
#define GET_SCALE_AND_SHIFT_FLAGS GET_FLAG(use_scale) | GET_FLAG(use_shift)
#define GET_SCALE_DATA_BUFFER scale_data
#define IS_SCALE_AND_SHIFT_FLAG_SET IS_SET(use_scale) && IS_SET(use_shift)
#define SCALE_SHIFT_NET_ARGS \
{DNNL_ARG_SCALE, *context_.scale_mem}, { DNNL_ARG_SHIFT, *context_.shift_mem }
#define SET_MKL_LAYOUT(md) SetMklLayout(md)
#endif
using CPUDevice = Eigen::ThreadPoolDevice;
using FusedBNActivationMode = functor::FusedBatchNormActivationMode;
struct MklBatchNormFwdParams {
memory::dims src_dims;
int depth;
float eps;
bool training;
TensorFormat data_format;
FusedBNActivationMode activation_mode;
memory::desc src_md;
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md;
#endif
MklBatchNormFwdParams(const memory::dims& src_dims, int depth, float eps,
bool training, TensorFormat data_format,
memory::desc src_md,
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md,
#endif
FusedBNActivationMode activation_mode)
: src_dims(src_dims),
depth(depth),
eps(eps),
training(training),
data_format(data_format),
activation_mode(activation_mode),
#ifndef ENABLE_ONEDNN_V3
src_md(src_md) {
}
#else
src_md(src_md),
dst_md(dst_md) {
}
#endif
};
template <typename T, typename U>
class MklFusedBatchNormFwdPrimitive : public MklPrimitive {
public:
explicit MklFusedBatchNormFwdPrimitive(const MklBatchNormFwdParams& fwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.bn_fwd == nullptr) Setup(fwdParams);
}
~MklFusedBatchNormFwdPrimitive() {}
#ifndef ENABLE_ONEDNN_V3
void Execute(const T* src_data, const U* scale_shift_data, T* dst_data,
#else
void Execute(const T* src_data, const U* scale_data, const U* shift_data,
T* dst_data,
#endif
U* mean_data, U* variance_data,
std::shared_ptr<stream> fwd_stream, U* workspace_data) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)), *fwd_stream);
context_.dst_mem->set_data_handle(static_cast<void*>(dst_data),
*fwd_stream);
if (IS_SET(use_scale_shift))
context_.scale_shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_shift_data)), *fwd_stream);
if ((context_.pkind == prop_kind::forward_training) ||
(IS_SET(use_global_stats))) {
context_.mean_mem->set_data_handle(static_cast<void*>(mean_data),
*fwd_stream);
context_.variance_mem->set_data_handle(static_cast<void*>(variance_data),
*fwd_stream);
}
if (workspace_data != nullptr) {
context_.ws_mem->set_data_handle(workspace_data, *fwd_stream);
}
#else
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.dst_mem->set_data_handle(static_cast<void*>(dst_data));
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_shift_data)));
#else
context_.scale_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_data)));
context_.shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(shift_data)));
#endif
}
if ((context_.pkind == prop_kind::forward_training) ||
(IS_SET(use_global_stats))) {
context_.mean_mem->set_data_handle(static_cast<void*>(mean_data));
context_.variance_mem->set_data_handle(static_cast<void*>(variance_data));
}
if (workspace_data != nullptr) {
context_.ws_mem->set_data_handle(workspace_data);
}
#endif
execute_primitives(context_.fwd_primitives, fwd_stream, context_.net_args);
context_.src_mem->set_data_handle(DummyData);
context_.dst_mem->set_data_handle(DummyData);
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem->set_data_handle(DummyData);
#else
context_.scale_mem->set_data_handle(DummyData);
context_.shift_mem->set_data_handle(DummyData);
#endif
}
if ((context_.pkind == prop_kind::forward_training) ||
(IS_SET(use_global_stats))) {
context_.mean_mem->set_data_handle(DummyData);
context_.variance_mem->set_data_handle(DummyData);
}
if (workspace_data != nullptr) {
context_.ws_mem->set_data_handle(DummyData);
}
}
memory::desc GetDstPd() const { return context_.dst_mem->get_desc(); }
std::shared_ptr<BatchNormFwdPd> GetBatchNormFwdPd() const {
return context_.fwd_pd;
}
private:
struct BatchNormFwdContext {
int64 flags;
dnnl::prop_kind pkind;
std::shared_ptr<dnnl::memory> src_mem;
#ifndef ENABLE_ONEDNN_V3
std::shared_ptr<dnnl::memory> scale_shift_mem;
#else
std::shared_ptr<dnnl::memory> scale_mem;
std::shared_ptr<dnnl::memory> shift_mem;
#endif
std::shared_ptr<dnnl::memory> dst_mem;
std::shared_ptr<dnnl::memory> mean_mem;
std::shared_ptr<dnnl::memory> variance_mem;
std::shared_ptr<dnnl::memory> ws_mem;
std::shared_ptr<BatchNormFwdPd> fwd_pd;
std::shared_ptr<dnnl::primitive> bn_fwd;
std::vector<dnnl::primitive> fwd_primitives;
std::vector<std::unordered_map<int, memory>> net_args;
BatchNormFwdContext()
: flags(0),
pkind(prop_kind::forward_training),
src_mem(nullptr),
#ifndef ENABLE_ONEDNN_V3
scale_shift_mem(nullptr),
#else
scale_mem(nullptr),
shift_mem(nullptr),
#endif
dst_mem(nullptr),
mean_mem(nullptr),
variance_mem(nullptr),
ws_mem(nullptr),
bn_fwd(nullptr) {
}
};
void Setup(const MklBatchNormFwdParams& fwdParams) {
context_.flags = GET_SCALE_AND_SHIFT_FLAGS |
(fwdParams.training ? false : GET_FLAG(use_global_stats));
context_.pkind =
fwdParams.training ? prop_kind::forward_training : FORWARD_INFERENCE;
if (fwdParams.activation_mode == FusedBNActivationMode::kRelu) {
context_.flags |= GET_FLAG(fuse_norm_relu);
}
auto src_md = fwdParams.src_md;
#ifndef ENABLE_ONEDNN_V3
auto fwd_desc = batch_normalization_forward::desc(
context_.pkind, src_md, fwdParams.eps,
static_cast<dnnl::normalization_flags>(context_.flags));
context_.fwd_pd.reset(new BatchNormFwdPd(fwd_desc, cpu_engine_));
#else
auto dst_md = fwdParams.dst_md;
context_.fwd_pd.reset(new BatchNormFwdPd(
cpu_engine_, context_.pkind, src_md, dst_md, fwdParams.eps,
static_cast<dnnl::normalization_flags>(context_.flags)));
#endif
context_.src_mem.reset(
new memory(context_.fwd_pd->src_desc(), cpu_engine_, DummyData));
context_.dst_mem.reset(
new memory(context_.fwd_pd->dst_desc(), cpu_engine_, DummyData));
memory::dims m_dims = {1, fwdParams.depth};
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
memory::dims s_dims = {2, fwdParams.depth};
context_.scale_shift_mem.reset(
new memory({{s_dims}, MklDnnType<U>(), memory::format_tag::nc},
cpu_engine_, DummyData));
#else
memory::dims s_dims = {fwdParams.depth};
context_.scale_mem.reset(
new memory({{s_dims}, MklDnnType<U>(), memory::format_tag::x},
cpu_engine_, DummyData));
context_.shift_mem.reset(
new memory({{s_dims}, MklDnnType<U>(), memory::format_tag::x},
cpu_engine_, DummyData));
#endif
}
if (fwdParams.training || (IS_SET(use_global_stats))) {
context_.mean_mem.reset(
new memory({{m_dims}, MklDnnType<U>(), memory::format_tag::nc},
cpu_engine_, DummyData));
context_.variance_mem.reset(
new memory({{m_dims}, MklDnnType<U>(), memory::format_tag::nc},
cpu_engine_, DummyData));
}
if (IS_SET(fuse_norm_relu)) {
context_.ws_mem.reset(new memory(context_.fwd_pd->workspace_desc(),
cpu_engine_, DummyData));
}
if (!fwdParams.training && !(IS_SET(use_global_stats))) {
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
context_.net_args.push_back({{DNNL_ARG_SRC, *context_.src_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem}});
} else {
context_.net_args.push_back({{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_DST, *context_.dst_mem}});
}
context_.bn_fwd.reset(new batch_normalization_forward(*context_.fwd_pd));
} else if (IS_SET(use_global_stats)) {
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
if (IS_SET(fuse_norm_relu)) {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_WORKSPACE, *context_.ws_mem}});
} else {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem}});
}
} else {
if (IS_SET(fuse_norm_relu)) {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_WORKSPACE, *context_.ws_mem}});
} else {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_DST, *context_.dst_mem}});
}
}
context_.bn_fwd.reset(new batch_normalization_forward(*context_.fwd_pd));
} else {
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
if (IS_SET(fuse_norm_relu)) {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_WORKSPACE, *context_.ws_mem}});
} else {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
SCALE_SHIFT_NET_ARGS,
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem}});
}
} else {
if (IS_SET(fuse_norm_relu)) {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_WORKSPACE, *context_.ws_mem}});
} else {
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_DST, *context_.dst_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem}});
}
}
context_.bn_fwd.reset(new batch_normalization_forward(*context_.fwd_pd));
}
context_.fwd_primitives.push_back(*context_.bn_fwd);
}
struct BatchNormFwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T, typename U>
class MklFusedBatchNormFwdPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklFusedBatchNormFwdPrimitive<T, U>* Get(
const MklBatchNormFwdParams& fwdParams) {
auto bn_fwd = static_cast<MklFusedBatchNormFwdPrimitive<T, U>*>(
MklFusedBatchNormFwdPrimitiveFactory<T, U>::GetInstance()
.GetBatchNormFwd(fwdParams));
if (bn_fwd == nullptr) {
bn_fwd = new MklFusedBatchNormFwdPrimitive<T, U>(fwdParams);
MklFusedBatchNormFwdPrimitiveFactory<T, U>::GetInstance().SetBatchNormFwd(
fwdParams, bn_fwd);
}
return bn_fwd;
}
static MklFusedBatchNormFwdPrimitiveFactory& GetInstance() {
static MklFusedBatchNormFwdPrimitiveFactory instance_;
return instance_;
}
private:
MklFusedBatchNormFwdPrimitiveFactory() {}
~MklFusedBatchNormFwdPrimitiveFactory() {}
static string CreateKey(const MklBatchNormFwdParams& fwdParams) {
string prefix = "bn_fwd";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(fwdParams.src_dims);
key_creator.AddAsKey<int>(fwdParams.depth);
key_creator.AddAsKey<float>(fwdParams.eps);
key_creator.AddAsKey<bool>(fwdParams.training);
key_creator.AddAsKey<TensorFormat>(fwdParams.data_format);
key_creator.AddAsKey<FusedBNActivationMode>(fwdParams.activation_mode);
key_creator.AddAsKey(typeid(T).name());
key_creator.AddAsKey(typeid(U).name());
return key_creator.GetKey();
}
MklPrimitive* GetBatchNormFwd(const MklBatchNormFwdParams& fwdParams) {
string key = CreateKey(fwdParams);
return this->GetOp(key);
}
void SetBatchNormFwd(const MklBatchNormFwdParams& fwdParams,
MklPrimitive* op) {
string key = CreateKey(fwdParams);
this->SetOp(key, op);
}
};
struct MklBatchNormBwdParams {
memory::dims src_dims;
memory::dims diff_dst_dims;
int depth;
float eps;
bool training;
TensorFormat data_format;
memory::desc src_md;
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md;
memory::desc diff_src_md;
#endif
memory::desc diff_dst_md;
MklBatchNormBwdParams(memory::dims src_dims, memory::dims diff_dst_dims,
int depth, float eps, bool training,
TensorFormat data_format, memory::desc src_md,
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md, memory::desc diff_src_md,
#endif
memory::desc diff_dst_md)
: src_dims(src_dims),
diff_dst_dims(diff_dst_dims),
depth(depth),
eps(eps),
training(training),
data_format(data_format),
src_md(src_md),
#ifdef ENABLE_ONEDNN_V3
dst_md(dst_md),
diff_src_md(diff_src_md),
#endif
diff_dst_md(diff_dst_md) {
}
};
template <typename T, typename U>
class MklFusedBatchNormBwdPrimitive : public MklPrimitive {
public:
explicit MklFusedBatchNormBwdPrimitive(const MklBatchNormBwdParams& bwdParams)
: MklPrimitive(engine(engine::kind::cpu, 0)) {
if (context_.bn_bwd == nullptr) Setup(bwdParams);
}
~MklFusedBatchNormBwdPrimitive() {}
void Execute(const T* src_data, const U* mean_data, const U* variance_data,
#ifndef ENABLE_ONEDNN_V3
const T* diff_dst_data, const U* scale_shift_data,
T* diff_src_data, U* diff_scale_shift_data, U* res_space_data,
#else
const T* diff_dst_data, const U* scale_data, T* diff_src_data,
U* diff_scale_data, U* diff_shift_data, U* res_space_data,
#endif
std::shared_ptr<stream> bwd_stream) {
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex_lock lock(primitive_execution_mu_);
#endif
#if !defined(ENABLE_ONEDNN_OPENMP) && !defined(ENABLE_ONEDNN_V3)
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)), *bwd_stream);
context_.mean_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(mean_data)), *bwd_stream);
context_.variance_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(variance_data)), *bwd_stream);
context_.diff_dst_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(diff_dst_data)), *bwd_stream);
if (IS_SET(use_scale_shift)) {
context_.scale_shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_shift_data)), *bwd_stream);
context_.diff_scale_shift_mem->set_data_handle(
static_cast<void*>(diff_scale_shift_data), *bwd_stream);
}
context_.diff_src_mem->set_data_handle(static_cast<void*>(diff_src_data),
*bwd_stream);
#else
context_.src_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(src_data)));
context_.mean_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(mean_data)));
context_.variance_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(variance_data)));
context_.diff_dst_mem->set_data_handle(
static_cast<void*>(const_cast<T*>(diff_dst_data)));
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_shift_data)));
context_.diff_scale_shift_mem->set_data_handle(
static_cast<void*>(diff_scale_shift_data));
#else
context_.scale_mem->set_data_handle(
static_cast<void*>(const_cast<U*>(scale_data)));
context_.diff_scale_mem->set_data_handle(
static_cast<void*>(diff_scale_data));
context_.diff_shift_mem->set_data_handle(
static_cast<void*>(diff_shift_data));
#endif
}
context_.diff_src_mem->set_data_handle(static_cast<void*>(diff_src_data));
#endif
DCHECK_EQ(context_.bwd_primitives.size(), context_.net_args.size());
execute_primitives(context_.bwd_primitives, bwd_stream, context_.net_args);
context_.src_mem->set_data_handle(DummyData);
context_.mean_mem->set_data_handle(DummyData);
context_.variance_mem->set_data_handle(DummyData);
context_.diff_dst_mem->set_data_handle(DummyData);
if (IS_SCALE_AND_SHIFT_FLAG_SET) {
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem->set_data_handle(DummyData);
context_.diff_scale_shift_mem->set_data_handle(DummyData);
#else
context_.scale_mem->set_data_handle(DummyData);
context_.diff_scale_mem->set_data_handle(DummyData);
context_.diff_shift_mem->set_data_handle(DummyData);
#endif
}
context_.diff_src_mem->set_data_handle(DummyData);
}
std::shared_ptr<BatchNormBwdPd> GetBatchNormBwdPd() const {
return context_.bwd_pd;
}
memory::desc GetDiffSrcPd() { return context_.diff_src_mem->get_desc(); }
private:
struct BatchNormBwdContext {
int64 flags;
std::shared_ptr<dnnl::memory> src_mem;
std::shared_ptr<dnnl::memory> mean_mem;
std::shared_ptr<dnnl::memory> variance_mem;
std::shared_ptr<dnnl::memory> diff_dst_mem;
#ifndef ENABLE_ONEDNN_V3
std::shared_ptr<dnnl::memory> scale_shift_mem;
std::shared_ptr<dnnl::memory> diff_scale_shift_mem;
#else
std::shared_ptr<dnnl::memory> scale_mem;
std::shared_ptr<dnnl::memory> diff_scale_mem;
std::shared_ptr<dnnl::memory> diff_shift_mem;
#endif
std::shared_ptr<dnnl::memory> diff_src_mem;
std::shared_ptr<BatchNormBwdPd> bwd_pd;
std::shared_ptr<dnnl::primitive> bn_bwd;
std::vector<dnnl::primitive> bwd_primitives;
std::vector<std::unordered_map<int, memory>> net_args;
BatchNormBwdContext()
: flags(0),
src_mem(nullptr),
mean_mem(nullptr),
variance_mem(nullptr),
diff_dst_mem(nullptr),
#ifndef ENABLE_ONEDNN_V3
scale_shift_mem(nullptr),
diff_scale_shift_mem(nullptr),
#else
scale_mem(nullptr),
diff_scale_mem(nullptr),
diff_shift_mem(nullptr),
#endif
diff_src_mem(nullptr) {
}
};
inline int64 GetBatchNormFlags(const MklBatchNormBwdParams& bwdParams) const {
return GET_SCALE_AND_SHIFT_FLAGS |
(bwdParams.training ? false : GET_FLAG(use_global_stats));
}
void Setup(const MklBatchNormBwdParams& bwdParams) {
context_.flags = GetBatchNormFlags(bwdParams);
auto src_md = bwdParams.src_md;
auto diff_dst_md = bwdParams.diff_dst_md;
auto variance_desc = memory::desc({1, bwdParams.depth}, MklDnnType<U>(),
memory::format_tag::nc);
auto mean_desc = memory::desc({1, bwdParams.depth}, MklDnnType<U>(),
memory::format_tag::nc);
#ifndef ENABLE_ONEDNN_V3
auto scale_shift_desc = memory::desc({2, bwdParams.depth}, MklDnnType<U>(),
memory::format_tag::nc);
#else
auto scale_shift_desc =
memory::desc({bwdParams.depth}, MklDnnType<U>(), memory::format_tag::x);
#endif
auto diff_scale_shift_desc = scale_shift_desc;
auto bn_flags = GetBatchNormFlags(bwdParams);
#ifndef ENABLE_ONEDNN_V3
auto fwd_desc = batch_normalization_forward::desc(
prop_kind::forward_training, src_md, bwdParams.eps,
static_cast<dnnl::normalization_flags>(bn_flags));
auto fwd_pd = BatchNormFwdPd(fwd_desc, cpu_engine_);
auto bwd_desc = batch_normalization_backward::desc(
prop_kind::backward, diff_dst_md, src_md, bwdParams.eps,
static_cast<dnnl::normalization_flags>(bn_flags));
context_.bwd_pd.reset(new BatchNormBwdPd(bwd_desc, cpu_engine_, fwd_pd));
#else
auto dst_md = bwdParams.dst_md;
auto diff_src_md = bwdParams.diff_src_md;
auto fwd_pd = BatchNormFwdPd(
cpu_engine_, prop_kind::forward_training, src_md, dst_md, bwdParams.eps,
static_cast<dnnl::normalization_flags>(bn_flags));
context_.bwd_pd.reset(new BatchNormBwdPd(
cpu_engine_, prop_kind::backward, diff_src_md, diff_dst_md, src_md,
bwdParams.eps, static_cast<dnnl::normalization_flags>(bn_flags),
fwd_pd));
#endif
context_.src_mem.reset(new memory(src_md, cpu_engine_, DummyData));
context_.diff_dst_mem.reset(
new memory(diff_dst_md, cpu_engine_, DummyData));
context_.variance_mem.reset(
new memory(variance_desc, cpu_engine_, DummyData));
context_.mean_mem.reset(new memory(mean_desc, cpu_engine_, DummyData));
#ifndef ENABLE_ONEDNN_V3
context_.scale_shift_mem.reset(
new memory(scale_shift_desc, cpu_engine_, DummyData));
context_.diff_scale_shift_mem.reset(
new memory(diff_scale_shift_desc, cpu_engine_, DummyData));
#else
context_.scale_mem.reset(
new memory(scale_shift_desc, cpu_engine_, DummyData));
context_.diff_scale_mem.reset(
new memory(diff_scale_shift_desc, cpu_engine_, DummyData));
context_.diff_shift_mem.reset(
new memory(diff_scale_shift_desc, cpu_engine_, DummyData));
#endif
context_.diff_src_mem.reset(new memory(src_md, cpu_engine_, DummyData));
context_.bn_bwd.reset(new batch_normalization_backward(*context_.bwd_pd));
context_.net_args.push_back(
{{DNNL_ARG_SRC, *context_.src_mem},
{DNNL_ARG_MEAN, *context_.mean_mem},
{DNNL_ARG_VARIANCE, *context_.variance_mem},
{DNNL_ARG_DIFF_DST, *context_.diff_dst_mem},
{DNNL_ARG_DIFF_SRC, *context_.diff_src_mem},
#ifndef ENABLE_ONEDNN_V3
{DNNL_ARG_SCALE_SHIFT, *context_.scale_shift_mem},
{ DNNL_ARG_DIFF_SCALE_SHIFT,
*context_.diff_scale_shift_mem }});
#else
{DNNL_ARG_SCALE, *context_.scale_mem},
{DNNL_ARG_DIFF_SCALE, *context_.diff_scale_mem},
{DNNL_ARG_DIFF_SHIFT, *context_.diff_shift_mem}});
#endif
context_.bwd_primitives.push_back(*context_.bn_bwd);
}
struct BatchNormBwdContext context_;
#if defined(DNNL_AARCH64_USE_ACL) && defined(ENABLE_ONEDNN_OPENMP)
mutex primitive_execution_mu_;
#endif
};
template <typename T, typename U>
class MklFusedBatchNormBwdPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklFusedBatchNormBwdPrimitive<T, U>* Get(
const MklBatchNormBwdParams& bwdParams) {
auto bn_bwd = static_cast<MklFusedBatchNormBwdPrimitive<T, U>*>(
MklFusedBatchNormBwdPrimitiveFactory<T, U>::GetInstance()
.GetBatchNormBwd(bwdParams));
if (bn_bwd == nullptr) {
bn_bwd = new MklFusedBatchNormBwdPrimitive<T, U>(bwdParams);
MklFusedBatchNormBwdPrimitiveFactory<T, U>::GetInstance().SetBatchNormBwd(
bwdParams, bn_bwd);
}
return bn_bwd;
}
static MklFusedBatchNormBwdPrimitiveFactory& GetInstance() {
static MklFusedBatchNormBwdPrimitiveFactory instance_;
return instance_;
}
private:
MklFusedBatchNormBwdPrimitiveFactory() {}
~MklFusedBatchNormBwdPrimitiveFactory() {}
static string CreateKey(const MklBatchNormBwdParams& bwdParams) {
string prefix = "bn_bwd";
FactoryKeyCreator key_creator;
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(bwdParams.src_dims);
key_creator.AddAsKey(bwdParams.diff_dst_dims);
key_creator.AddAsKey<int>(bwdParams.depth);
key_creator.AddAsKey<float>(bwdParams.eps);
key_creator.AddAsKey<bool>(bwdParams.training);
key_creator.AddAsKey<TensorFormat>(bwdParams.data_format);
key_creator.AddAsKey(typeid(T).name());
key_creator.AddAsKey(typeid(U).name());
return key_creator.GetKey();
}
MklPrimitive* GetBatchNormBwd(const MklBatchNormBwdParams& bwdParams) {
string key = CreateKey(bwdParams);
return this->GetOp(key);
}
void SetBatchNormBwd(const MklBatchNormBwdParams& bwdParams,
MklPrimitive* op) {
string key = CreateKey(bwdParams);
this->SetOp(key, op);
}
};
template <typename Device, typename T, typename U, bool reserved_space,
bool is_batch_norm_ex = false, bool native_format = false>
class MklFusedBatchNormOp : public OpKernel {
public:
explicit MklFusedBatchNormOp(OpKernelConstruction* context)
: OpKernel(context) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
epsilon_ = epsilon;
float exponential_avg_factor;
OP_REQUIRES_OK(context, context->GetAttr("exponential_avg_factor",
&exponential_avg_factor));
exponential_avg_factor_ = static_cast<U>(exponential_avg_factor);
string tensor_format;
OP_REQUIRES_OK(context, context->GetAttr("data_format", &tensor_format));
OP_REQUIRES(context, FormatFromString(tensor_format, &tensor_format_),
absl::InvalidArgumentError("Invalid data format"));
OP_REQUIRES_OK(context, context->GetAttr("is_training", &is_training_));
depth_ = 0;
mean_values_ = nullptr;
variance_values_ = nullptr;
if (!is_batch_norm_ex) {
activation_mode_ = FusedBNActivationMode::kIdentity;
} else {
int num_side_inputs;
OP_REQUIRES_OK(context,
context->GetAttr("num_side_inputs", &num_side_inputs));
OP_REQUIRES(context, num_side_inputs == 0,
absl::InvalidArgumentError(
"_MKLFusedBatchNorm do not support side input now."));
OP_REQUIRES_OK(context, ParseActivationMode(context, &activation_mode_));
OP_REQUIRES(context, activation_mode_ == FusedBNActivationMode::kRelu,
absl::InvalidArgumentError(
"_MKLFusedBatchNorm only support Relu activation"));
}
}
void Compute(OpKernelContext* context) override {
try {
const size_t kSrcIndex = 0;
const size_t kScaleIndex = 1;
const size_t kShiftIndex = 2;
const size_t kMeanIndex = 3;
const size_t kVarianceIndex = 4;
const Tensor& src_tensor = MklGetInput(context, kSrcIndex);
const Tensor& scale_tensor = MklGetInput(context, kScaleIndex);
const Tensor& shift_tensor = MklGetInput(context, kShiftIndex);
const Tensor& est_mean_tensor = MklGetInput(context, kMeanIndex);
const Tensor& est_variance_tensor = MklGetInput(context, kVarianceIndex);
TensorShape tf_shape_src;
MklDnnShape dnn_shape_src;
GetMklShape(context, kSrcIndex, &dnn_shape_src, native_format);
if (dnn_shape_src.IsMklTensor()) {
tf_shape_src = dnn_shape_src.GetTfShape();
OP_REQUIRES(context, dnn_shape_src.GetDimension() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
src_tensor.shape().DebugString())));
} else {
tf_shape_src = src_tensor.shape();
OP_REQUIRES(context, src_tensor.dims() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
src_tensor.shape().DebugString())));
}
OP_REQUIRES(context, scale_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("scale must be 1-dimensional",
scale_tensor.shape().DebugString())));
OP_REQUIRES(context, shift_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("offset must be 1-dimensional",
shift_tensor.shape().DebugString())));
OP_REQUIRES(context, est_mean_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("estimated_mean must be 1-dimensional",
est_mean_tensor.shape().DebugString())));
OP_REQUIRES(context, est_variance_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("estimated_variance must be 1-dimensional",
est_variance_tensor.shape().DebugString())));
int num_channels;
if (dnn_shape_src.IsMklTensor()) {
num_channels = dnn_shape_src.DimSize(MklDnnDims::Dim_C);
} else {
num_channels = GetTensorDim(src_tensor, tensor_format_, 'C');
}
OP_REQUIRES(context, scale_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"scale must have the same number of elements "
"as the channels of x, got ",
scale_tensor.NumElements(), " and ", num_channels)));
OP_REQUIRES(context, shift_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"offset must have the same number of elements "
"as the channels of x, got ",
shift_tensor.NumElements(), " and ", num_channels)));
if (!is_training_ || exponential_avg_factor_ != 1.) {
std::string prefix_msg = is_training_
? "When exponential_avg_factor != 1"
: "When is_training=false";
OP_REQUIRES(context, est_mean_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
prefix_msg,
", mean must have the same number "
"of elements as the channels of x, got ",
est_mean_tensor.NumElements(), " and ", num_channels)));
OP_REQUIRES(
context, est_variance_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
prefix_msg,
", variance must have the same "
"number of elements as the channels of x, got ",
est_variance_tensor.NumElements(), " and ", num_channels)));
}
Tensor* dst_tensor = nullptr;
TensorShape workspace_tf_shape;
if (tf_shape_src.num_elements() == 0) {
size_t workspace_bytes = 0;
workspace_tf_shape.AddDim(workspace_bytes);
HandleEmptyInput(context, tf_shape_src, workspace_tf_shape,
scale_tensor.shape(), &dst_tensor);
return;
}
if (dnn_shape_src.IsMklTensor())
depth_ = dnn_shape_src.DimSize(MklDnnDims::Dim_C);
else
ExtractParams(context);
const size_t kDstIndex = 0;
Tensor* batch_mean_tensor = nullptr;
Tensor* batch_variance_tensor = nullptr;
Tensor* saved_mean_tensor = nullptr;
Tensor* saved_variance_tensor = nullptr;
Tensor* reserved_space_tensor = nullptr;
MklDnnData<T> src(&cpu_engine_);
#ifndef ENABLE_ONEDNN_V3
MklDnnData<U> scale_shift(&cpu_engine_);
#else
MklDnnData<U> scale(&cpu_engine_);
MklDnnData<U> shift(&cpu_engine_);
#endif
MklDnnData<U> wksp(&cpu_engine_);
memory::format_tag dnn_fmt;
MklTensorFormat mkl_tensor_fmt;
if (dnn_shape_src.IsMklTensor()) {
if (dnn_shape_src.IsTensorInNCHWFormat()) {
dnn_fmt = memory::format_tag::nchw;
mkl_tensor_fmt = MklTensorFormat::FORMAT_NCHW;
} else {
dnn_fmt = memory::format_tag::nhwc;
mkl_tensor_fmt = MklTensorFormat::FORMAT_NHWC;
}
} else {
mkl_tensor_fmt = TFDataFormatToMklDnnDataFormat(tensor_format_);
dnn_fmt = MklTensorFormatToMklDnnDataFormat(mkl_tensor_fmt);
}
memory::dims src_dims =
dnn_shape_src.IsMklTensor()
? dnn_shape_src.GetSizesAsMklDnnDims()
: TFShapeToMklDnnDimsInNCHW(src_tensor.shape(), tensor_format_);
auto src_md = dnn_shape_src.IsMklTensor()
? dnn_shape_src.GetMklLayout()
: memory::desc(src_dims, MklDnnType<T>(), dnn_fmt);
#ifdef ENABLE_ONEDNN_V3
auto dst_md = memory::desc(src_dims, MklDnnType<T>(), dnn_fmt);
#endif
MklBatchNormFwdParams fwdParams(src_dims, depth_, epsilon_, is_training_,
#ifndef ENABLE_ONEDNN_V3
tensor_format_, src_md, activation_mode_);
#else
tensor_format_, src_md, dst_md,
activation_mode_);
#endif
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
MklFusedBatchNormFwdPrimitive<T, U>* bn_fwd =
MklFusedBatchNormFwdPrimitiveFactory<T, U>::Get(fwdParams);
U* ws_data = nullptr;
if (fwdParams.activation_mode == FusedBNActivationMode::kRelu) {
memory::desc workspace_md =
bn_fwd->GetBatchNormFwdPd()->workspace_desc();
size_t workspace_bytes = workspace_md.get_size();
workspace_tf_shape.AddDim(workspace_bytes);
AllocateTFOutputs(context, scale_tensor.shape(), workspace_tf_shape,
&batch_mean_tensor, &batch_variance_tensor,
&saved_mean_tensor, &saved_variance_tensor,
&reserved_space_tensor);
if (reserved_space) {
wksp.SetUsrMem(workspace_md, reserved_space_tensor);
ws_data = static_cast<U*>(wksp.GetOpMem().get_data_handle());
}
} else {
size_t workspace_bytes = 0;
workspace_tf_shape.AddDim(workspace_bytes);
AllocateTFOutputs(context, scale_tensor.shape(), workspace_tf_shape,
&batch_mean_tensor, &batch_variance_tensor,
&saved_mean_tensor, &saved_variance_tensor,
&reserved_space_tensor);
}
if (is_training_)
SetMeanVariance(*batch_mean_tensor, *batch_variance_tensor);
else
SetMeanVariance(est_mean_tensor, est_variance_tensor);
#ifndef ENABLE_ONEDNN_V3
scale_shift.AllocateBuffer(2 * depth_ * sizeof(U));
U* scale_shift_data =
reinterpret_cast<U*>(scale_shift.GetAllocatedBuffer());
const U* scale_tf = scale_tensor.flat<U>().data();
const U* shift_tf = shift_tensor.flat<U>().data();
std::memcpy(scale_shift_data, scale_tf, depth_ * sizeof(U));
std::memcpy(scale_shift_data + depth_, shift_tf, depth_ * sizeof(U));
#else
scale.AllocateBuffer(depth_ * sizeof(U));
U* scale_data = reinterpret_cast<U*>(scale.GetAllocatedBuffer());
shift.AllocateBuffer(depth_ * sizeof(U));
U* shift_data = reinterpret_cast<U*>(shift.GetAllocatedBuffer());
const U* scale_tf = scale_tensor.flat<U>().data();
const U* shift_tf = shift_tensor.flat<U>().data();
std::memcpy(scale_data, scale_tf, depth_ * sizeof(U));
std::memcpy(shift_data, shift_tf, depth_ * sizeof(U));
#endif
char* saved_mean_data_tf =
reinterpret_cast<char*>(saved_mean_tensor->flat<U>().data());
std::memcpy(saved_mean_data_tf, reinterpret_cast<char*>(mean_values_),
depth_ * sizeof(U));
char* saved_variance_data_tf =
reinterpret_cast<char*>(saved_variance_tensor->flat<U>().data());
std::memcpy(saved_variance_data_tf,
reinterpret_cast<char*>(variance_values_),
depth_ * sizeof(U));
const T* src_data = nullptr;
std::shared_ptr<BatchNormFwdPd> bn_fwd_pd = bn_fwd->GetBatchNormFwdPd();
if (!native_format && src_md != bn_fwd_pd->src_desc()) {
src.SetUsrMem(src_md, &src_tensor);
src.CheckReorderToOpMem(bn_fwd_pd->src_desc(), cpu_engine_, context);
src_data = static_cast<T*>(src.GetOpMem().get_data_handle());
} else {
src_data = static_cast<T*>(const_cast<T*>(src_tensor.flat<T>().data()));
}
MklDnnShape dnn_shape_dst;
TensorShape tf_shape_dst;
dnn_shape_dst.SetMklTensor(true);
auto dst_pd = bn_fwd->GetDstPd();
dnn_shape_dst.SET_MKL_LAYOUT(dst_pd);
dnn_shape_dst.SetElemType(MklDnnType<T>());
auto ndims = dnn_shape_src.IsMklTensor() ? dnn_shape_src.GetDimension()
: src_tensor.shape().dims();
dnn_shape_dst.SetTfLayout(ndims, src_dims, mkl_tensor_fmt);
tf_shape_dst.AddDim(dst_pd.get_size() / sizeof(T));
if (native_format) {
tf_shape_dst = dnn_shape_dst.GetTfShape();
}
AllocateOutputSetMklShape(context, kDstIndex, &dst_tensor, tf_shape_dst,
dnn_shape_dst, native_format);
#ifndef ENABLE_ONEDNN_V3
U* scale_shift_op_data = scale_shift_data;
#else
U* scale_op_data = scale_data;
U* shift_op_data = shift_data;
#endif
U* mean_op_data = saved_mean_tensor->flat<U>().data();
U* variance_op_data = saved_variance_tensor->flat<U>().data();
T* dst_data = dst_tensor->flat<T>().data();
std::shared_ptr<stream> fwd_cpu_stream;
fwd_cpu_stream.reset(CreateStream(&eigen_tp, bn_fwd->GetEngine()));
#ifndef ENABLE_ONEDNN_V3
bn_fwd->Execute(src_data, scale_shift_op_data, dst_data, mean_op_data,
#else
bn_fwd->Execute(src_data, scale_op_data, shift_op_data, dst_data,
mean_op_data,
#endif
variance_op_data, fwd_cpu_stream, ws_data);
float adjust_factor = 1.0;
if (is_training_) {
size_t orig_size = src_dims[0] * src_dims[2] * src_dims[3];
size_t adjust_size = (orig_size > 1) ? (orig_size - 1) : 1;
adjust_factor = (static_cast<float>(orig_size)) / adjust_size;
}
auto mean_data = reinterpret_cast<U*>(saved_mean_data_tf);
auto variance_data = reinterpret_cast<U*>(saved_variance_data_tf);
auto batch_mean_data = batch_mean_tensor->flat<U>().data();
auto batch_variance_data = batch_variance_tensor->flat<U>().data();
auto est_mean_data = est_mean_tensor.flat<U>().data();
auto est_variance_data = est_variance_tensor.flat<U>().data();
if (is_training_) {
if (exponential_avg_factor_ == U(1.0)) {
for (int k = 0; k < depth_; k++) {
batch_mean_data[k] = mean_data[k];
batch_variance_data[k] =
static_cast<U>(adjust_factor) * variance_data[k];
}
} else {
U one_minus_factor = U(1.0) - exponential_avg_factor_;
for (int k = 0; k < depth_; k++) {
batch_mean_data[k] = one_minus_factor * est_mean_data[k] +
exponential_avg_factor_ * mean_data[k];
batch_variance_data[k] = one_minus_factor * est_variance_data[k] +
exponential_avg_factor_ *
static_cast<U>(adjust_factor) *
variance_data[k];
}
}
} else {
std::memcpy(batch_mean_data, mean_data, depth_ * sizeof(U));
std::memcpy(batch_variance_data, variance_data, depth_ * sizeof(U));
}
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(context,
absl::AbortedError(absl::StrCat(
"Operation received an exception:", error_msg)));
}
}
private:
float epsilon_;
U exponential_avg_factor_;
TensorFormat tensor_format_;
bool is_training_;
U* mean_values_;
U* variance_values_;
size_t depth_;
FusedBNActivationMode activation_mode_;
engine cpu_engine_ = engine(engine::kind::cpu, 0);
void ExtractParams(OpKernelContext* context) {
const Tensor& input = MklGetInput(context, 0);
depth_ = static_cast<int>(GetTensorDim(input, tensor_format_, 'C'));
}
void SetMeanVariance(const Tensor& mean, const Tensor& variance) {
mean_values_ = reinterpret_cast<U*>(const_cast<U*>(mean.flat<U>().data()));
variance_values_ =
reinterpret_cast<U*>(const_cast<U*>(variance.flat<U>().data()));
}
void HandleEmptyInput(OpKernelContext* context, TensorShape tf_shape_src,
TensorShape workspace_tf_shape,
TensorShape tf_shape_scale, Tensor** dst_tensor) {
DCHECK(dst_tensor);
const size_t kDstIndex = 0;
MklDnnShape dnn_shape_dst;
dnn_shape_dst.SetMklTensor(false);
AllocateOutputSetMklShape(context, kDstIndex, dst_tensor, tf_shape_src,
dnn_shape_dst, native_format);
DCHECK(*dst_tensor);
memset(const_cast<char*>((*dst_tensor)->tensor_data().data()), 0,
(*dst_tensor)->tensor_data().size());
Tensor* batch_mean_tensor = nullptr;
Tensor* batch_variance_tensor = nullptr;
Tensor* saved_mean_tensor = nullptr;
Tensor* saved_variance_tensor = nullptr;
Tensor* reserved_space_tensor = nullptr;
AllocateTFOutputs(context, tf_shape_scale, workspace_tf_shape,
&batch_mean_tensor, &batch_variance_tensor,
&saved_mean_tensor, &saved_variance_tensor,
&reserved_space_tensor);
}
void AllocateTFOutputs(OpKernelContext* context, TensorShape tf_shape_scale,
TensorShape workspace_tf_shape,
Tensor** batch_mean_tensor,
Tensor** batch_variance_tensor,
Tensor** saved_mean_tensor,
Tensor** saved_variance_tensor,
Tensor** reserved_space_tensor) {
DCHECK(batch_mean_tensor);
DCHECK(batch_variance_tensor);
DCHECK(saved_mean_tensor);
DCHECK(saved_variance_tensor);
const size_t kBatchMeanIndex = 1;
const size_t kBatchVarianceIndex = 2;
const size_t kSavedMeanIndex = 3;
const size_t kSavedVarianceIndex = 4;
const size_t kReservedSpaceIndex = 5;
MklDnnShape mkl_shape_batch_mean;
mkl_shape_batch_mean.SetMklTensor(false);
AllocateOutputSetMklShape(context, kBatchMeanIndex, batch_mean_tensor,
tf_shape_scale, mkl_shape_batch_mean,
native_format);
DCHECK(*batch_mean_tensor);
int num_elements = tf_shape_scale.num_elements();
auto batch_mean_data = (*batch_mean_tensor)->flat<U>().data();
std::fill_n(batch_mean_data, num_elements, static_cast<U>(NAN));
MklDnnShape mkl_shape_batch_variance;
mkl_shape_batch_variance.SetMklTensor(false);
AllocateOutputSetMklShape(context, kBatchVarianceIndex,
batch_variance_tensor, tf_shape_scale,
mkl_shape_batch_variance, native_format);
DCHECK(*batch_variance_tensor);
auto batch_variance_data = (*batch_variance_tensor)->flat<U>().data();
std::fill_n(batch_variance_data, num_elements, static_cast<U>(NAN));
MklDnnShape mkl_shape_saved_mean;
mkl_shape_saved_mean.SetMklTensor(false);
AllocateOutputSetMklShape(context, kSavedMeanIndex, saved_mean_tensor,
tf_shape_scale, mkl_shape_saved_mean,
native_format);
DCHECK(*saved_mean_tensor);
auto saved_mean_data = (*saved_mean_tensor)->flat<U>().data();
std::fill_n(saved_mean_data, num_elements, static_cast<U>(0));
MklDnnShape mkl_shape_saved_variance;
mkl_shape_saved_variance.SetMklTensor(false);
AllocateOutputSetMklShape(context, kSavedVarianceIndex,
saved_variance_tensor, tf_shape_scale,
mkl_shape_saved_variance, native_format);
DCHECK(*saved_variance_tensor);
auto saved_variance_data = (*saved_variance_tensor)->flat<U>().data();
std::fill_n(saved_variance_data, num_elements, static_cast<U>(0));
if (reserved_space) {
DCHECK(reserved_space_tensor != nullptr);
MklDnnShape mkl_shape_reserved_space;
mkl_shape_reserved_space.SetMklTensor(false);
AllocateOutputSetMklShape(context, kReservedSpaceIndex,
reserved_space_tensor, workspace_tf_shape,
mkl_shape_reserved_space, native_format);
DCHECK((*reserved_space_tensor) != nullptr);
}
}
};
template <typename Device, typename T, typename U, bool reserved_space,
bool native_format = false>
class MklFusedBatchNormGradOp : public OpKernel {
public:
explicit MklFusedBatchNormGradOp(OpKernelConstruction* context)
: OpKernel(context) {
float epsilon;
OP_REQUIRES_OK(context, context->GetAttr("epsilon", &epsilon));
epsilon_ = epsilon;
string tensor_format;
OP_REQUIRES_OK(context, context->GetAttr("data_format", &tensor_format));
OP_REQUIRES(context, FormatFromString(tensor_format, &tensor_format_),
absl::InvalidArgumentError("Invalid data format"));
OP_REQUIRES_OK(context, context->GetAttr("is_training", &is_training_));
depth_ = 0;
}
void Compute(OpKernelContext* context) override {
try {
const size_t kDiffDstIndex = 0;
const size_t kSrcIndex = 1;
const size_t kScaleIndex = 2;
const size_t kMeanIndex = 3;
const size_t kVarianceIndex = 4;
const size_t kReservedSpaceIndex = 5;
const Tensor& diff_dst_tensor = MklGetInput(context, kDiffDstIndex);
const Tensor& src_tensor = MklGetInput(context, kSrcIndex);
const Tensor& scale_tensor = MklGetInput(context, kScaleIndex);
const Tensor& saved_mean_tensor = MklGetInput(context, kMeanIndex);
const Tensor& saved_variance_tensor =
MklGetInput(context, kVarianceIndex);
const Tensor& reserved_space_tensor =
(reserved_space) ? MklGetInput(context, kReservedSpaceIndex)
: Tensor();
MklDnnShape dnn_shape_src, dnn_shape_diff_dst;
GetMklShape(context, kSrcIndex, &dnn_shape_src, native_format);
GetMklShape(context, kDiffDstIndex, &dnn_shape_diff_dst, native_format);
TensorShape tf_shape_src, tf_shape_diff_dst;
if (dnn_shape_diff_dst.IsMklTensor()) {
tf_shape_diff_dst = dnn_shape_diff_dst.GetTfShape();
OP_REQUIRES(context, dnn_shape_diff_dst.GetDimension() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
diff_dst_tensor.shape().DebugString())));
} else {
tf_shape_diff_dst = diff_dst_tensor.shape();
OP_REQUIRES(context, diff_dst_tensor.dims() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
diff_dst_tensor.shape().DebugString())));
}
if (dnn_shape_src.IsMklTensor()) {
tf_shape_src = dnn_shape_src.GetTfShape();
OP_REQUIRES(context, dnn_shape_src.GetDimension() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
src_tensor.shape().DebugString())));
} else {
tf_shape_src = src_tensor.shape();
OP_REQUIRES(context, src_tensor.dims() == 4,
absl::InvalidArgumentError(
absl::StrCat("input must be 4-dimensional",
src_tensor.shape().DebugString())));
}
OP_REQUIRES(context, scale_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("scale must be 1-dimensional",
scale_tensor.shape().DebugString())));
OP_REQUIRES(context, saved_mean_tensor.dims() == 1,
absl::InvalidArgumentError(
absl::StrCat("saved mean must be 1-dimensional",
saved_mean_tensor.shape().DebugString())));
OP_REQUIRES(context, saved_variance_tensor.dims() == 1,
absl::InvalidArgumentError(absl::StrCat(
"saved variance must be 1-dimensional",
saved_variance_tensor.shape().DebugString())));
OP_REQUIRES(
context, tf_shape_src == tf_shape_diff_dst,
absl::InvalidArgumentError(absl::StrCat(
"x and y_backprop must have same shape, but x has shape ",
src_tensor.shape().DebugString(), " and y_backprop has shape ",
diff_dst_tensor.shape().DebugString())));
int num_channels;
if (dnn_shape_src.IsMklTensor()) {
num_channels = dnn_shape_src.DimSize(MklDnnDims::Dim_C);
} else {
num_channels = GetTensorDim(src_tensor, tensor_format_, 'C');
}
OP_REQUIRES(context, scale_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"scale must have the same number of elements "
"as the channels of x, got ",
scale_tensor.NumElements(), " and ", num_channels)));
OP_REQUIRES(context, saved_mean_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"reserve_space_1 must have the same number of "
"elements as the channels of x, got ",
saved_mean_tensor.NumElements(), " and ", num_channels)));
OP_REQUIRES(
context, saved_variance_tensor.NumElements() == num_channels,
absl::InvalidArgumentError(absl::StrCat(
"reserve_space_2 must have the same number of "
"elements as the channels of x, got ",
saved_variance_tensor.NumElements(), " and ", num_channels)));
Tensor* diff_src_tensor = nullptr;
if (tf_shape_src.num_elements() == 0 ||
tf_shape_diff_dst.num_elements() == 0) {
HandleEmptyInput(context, tf_shape_src, scale_tensor.shape(),
&diff_src_tensor);
return;
}
if (dnn_shape_src.IsMklTensor()) {
depth_ = dnn_shape_src.DimSize(MklDnnDims::Dim_C);
} else if (dnn_shape_diff_dst.IsMklTensor()) {
depth_ = dnn_shape_diff_dst.DimSize(MklDnnDims::Dim_C);
} else {
ExtractParams(context);
}
memory::format_tag dnn_fmt;
MklTensorFormat mkl_tensor_fmt;
if (dnn_shape_src.IsMklTensor()) {
if (dnn_shape_src.IsTensorInNCHWFormat()) {
dnn_fmt = memory::format_tag::nchw;
mkl_tensor_fmt = MklTensorFormat::FORMAT_NCHW;
} else {
dnn_fmt = memory::format_tag::nhwc;
mkl_tensor_fmt = MklTensorFormat::FORMAT_NHWC;
}
} else {
mkl_tensor_fmt = TFDataFormatToMklDnnDataFormat(tensor_format_);
dnn_fmt = MklTensorFormatToMklDnnDataFormat(mkl_tensor_fmt);
}
MklDnnData<T> src(&cpu_engine_);
MklDnnData<T> diff_dst(&cpu_engine_);
#ifndef ENABLE_ONEDNN_V3
MklDnnData<U> scale_shift(&cpu_engine_);
MklDnnData<U> diff_scale_shift(&cpu_engine_);
#else
MklDnnData<U> scale(&cpu_engine_);
MklDnnData<U> diff_scale(&cpu_engine_);
MklDnnData<U> diff_shift(&cpu_engine_);
#endif
memory::dims src_dims =
dnn_shape_src.IsMklTensor()
? dnn_shape_src.GetSizesAsMklDnnDims()
: TFShapeToMklDnnDimsInNCHW(src_tensor.shape(), tensor_format_);
memory::dims diff_dst_dims =
dnn_shape_diff_dst.IsMklTensor()
? dnn_shape_diff_dst.GetSizesAsMklDnnDims()
: TFShapeToMklDnnDimsInNCHW(diff_dst_tensor.shape(),
tensor_format_);
memory::desc src_md =
dnn_shape_src.IsMklTensor()
? dnn_shape_src.GetMklLayout()
: memory::desc(src_dims, MklDnnType<T>(), dnn_fmt);
memory::desc diff_dst_md =
dnn_shape_diff_dst.IsMklTensor()
? dnn_shape_diff_dst.GetMklLayout()
: memory::desc(diff_dst_dims, MklDnnType<T>(), dnn_fmt);
#ifdef ENABLE_ONEDNN_V3
memory::desc dst_md = memory::desc(src_dims, MklDnnType<T>(), dnn_fmt);
memory::desc diff_src_md =
memory::desc(diff_dst_dims, MklDnnType<T>(), dnn_fmt);
#endif
MklDnnData<T> reorder_src(&cpu_engine_);
MklDnnData<T> reorder_diff_dst(&cpu_engine_);
T* diff_dst_data =
static_cast<T*>(const_cast<T*>(diff_dst_tensor.flat<T>().data()));
T* src_data =
static_cast<T*>(const_cast<T*>(src_tensor.flat<T>().data()));
if (!native_format) {
if (dnn_shape_src.IsMklTensor() && !dnn_shape_diff_dst.IsMklTensor()) {
reorder_diff_dst.SetUsrMem(diff_dst_md, &diff_dst_tensor);
reorder_diff_dst.CheckReorderToOpMem(src_md, cpu_engine_, context);
diff_dst_md = src_md;
diff_dst_data =
static_cast<T*>(reorder_diff_dst.GetOpMem().get_data_handle());
} else if (!dnn_shape_src.IsMklTensor() &&
dnn_shape_diff_dst.IsMklTensor()) {
reorder_src.SetUsrMem(src_md, &src_tensor);
reorder_src.CheckReorderToOpMem(diff_dst_md, cpu_engine_, context);
src_md = diff_dst_md;
src_data = static_cast<T*>(reorder_src.GetOpMem().get_data_handle());
}
}
#ifndef ENABLE_ONEDNN_V3
scale_shift.AllocateBuffer(2 * depth_ * sizeof(U));
U* scale_shift_data_tf =
reinterpret_cast<U*>(scale_shift.GetAllocatedBuffer());
const U* scale_tf = scale_tensor.flat<U>().data();
for (int k = 0; k < depth_; k++) {
scale_shift_data_tf[k] = scale_tf[k];
scale_shift_data_tf[k + depth_] = static_cast<U>(0);
}
diff_scale_shift.AllocateBuffer(2 * depth_ * sizeof(U));
#else
scale.AllocateBuffer(depth_ * sizeof(U));
U* scale_data_tf = reinterpret_cast<U*>(scale.GetAllocatedBuffer());
const U* scale_tf = scale_tensor.flat<U>().data();
std::memcpy(scale_data_tf, scale_tf, depth_ * sizeof(U));
diff_scale.AllocateBuffer(depth_ * sizeof(U));
diff_shift.AllocateBuffer(depth_ * sizeof(U));
#endif
MklBatchNormBwdParams bwdParams(src_dims, diff_dst_dims, depth_, epsilon_,
is_training_, tensor_format_, src_md,
#ifdef ENABLE_ONEDNN_V3
dst_md, diff_src_md,
#endif
diff_dst_md);
Eigen::ThreadPoolInterface* eigen_interface =
EigenThreadPoolFromTfContext(context);
tsl::OneDnnThreadPool eigen_tp(eigen_interface,
ThreadPoolUseCallerThread());
MklFusedBatchNormBwdPrimitive<T, U>* bn_bwd =
MklFusedBatchNormBwdPrimitiveFactory<T, U>::Get(bwdParams);
std::shared_ptr<BatchNormBwdPd> bn_bwd_pd = bn_bwd->GetBatchNormBwdPd();
if (!native_format && diff_dst_md != bn_bwd_pd->diff_dst_desc()) {
diff_dst.SetUsrMem(diff_dst_md, diff_dst_data);
diff_dst.CheckReorderToOpMem(bn_bwd_pd->diff_dst_desc(), cpu_engine_,
context);
diff_dst_data = static_cast<T*>(diff_dst.GetOpMem().get_data_handle());
}
if (!native_format && (src_md != bn_bwd_pd->src_desc())) {
src.SetUsrMem(src_md, src_data);
src.CheckReorderToOpMem(bn_bwd_pd->src_desc(), cpu_engine_, context);
src_data = static_cast<T*>(src.GetOpMem().get_data_handle());
}
const size_t kDiffSrcIndex = 0;
MklDnnShape dnn_shape_diff_src;
TensorShape tf_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(true);
auto diff_src_pd = bn_bwd->GetDiffSrcPd();
dnn_shape_diff_src.SET_MKL_LAYOUT(diff_src_pd);
dnn_shape_diff_src.SetElemType(MklDnnType<T>());
dnn_shape_diff_src.SetTfLayout(src_dims.size(), src_dims, mkl_tensor_fmt);
dnn_shape_diff_src.SetTfDimOrder(src_dims.size(), tensor_format_);
tf_shape_diff_src.AddDim(diff_src_pd.get_size() / sizeof(T));
if (native_format) {
tf_shape_diff_src = dnn_shape_diff_src.GetTfShape();
}
AllocateOutputSetMklShape(context, kDiffSrcIndex, &diff_src_tensor,
tf_shape_diff_src, dnn_shape_diff_src,
native_format);
U* mean_data =
static_cast<U*>(const_cast<U*>(saved_mean_tensor.flat<U>().data()));
U* variance_data = static_cast<U*>(
const_cast<U*>(saved_variance_tensor.flat<U>().data()));
#ifndef ENABLE_ONEDNN_V3
U* scale_shift_data = scale_shift_data_tf;
U* diff_scale_shift_data =
static_cast<U*>(diff_scale_shift.GetAllocatedBuffer());
#else
U* scale_data = scale_data_tf;
U* diff_scale_data = static_cast<U*>(diff_scale.GetAllocatedBuffer());
U* diff_shift_data = static_cast<U*>(diff_shift.GetAllocatedBuffer());
#endif
T* diff_src_data = static_cast<T*>(diff_src_tensor->flat<T>().data());
U* res_space_data =
((reserved_space) ? static_cast<U*>(const_cast<U*>(
reserved_space_tensor.flat<U>().data()))
: nullptr);
std::shared_ptr<stream> bwd_cpu_stream;
bwd_cpu_stream.reset(CreateStream(&eigen_tp, bn_bwd->GetEngine()));
bn_bwd->Execute(src_data, mean_data, variance_data, diff_dst_data,
GET_SCALE_DATA_BUFFER, diff_src_data,
GET_DIFF_SCALE_SHIFT_DATA_BUFFERS, res_space_data,
bwd_cpu_stream);
Tensor* diff_scale_tensor = nullptr;
Tensor* diff_shift_tensor = nullptr;
AllocateTFOutputs(context, scale_tensor.shape(), &diff_scale_tensor,
&diff_shift_tensor);
auto diff_scale_data_out = diff_scale_tensor->flat<U>().data();
auto diff_shift_data_out = diff_shift_tensor->flat<U>().data();
std::memcpy(reinterpret_cast<char*>(diff_scale_data_out),
reinterpret_cast<char*>(GET_DIFF_SCALE_DATA_BUFFER),
depth_ * sizeof(U));
std::memcpy(reinterpret_cast<char*>(diff_shift_data_out),
reinterpret_cast<char*>(GET_DIFF_SHIFT_DATA_BUFFER),
depth_ * sizeof(U));
} catch (dnnl::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
OP_REQUIRES_OK(context,
absl::AbortedError(absl::StrCat(
"Operation received an exception:", error_msg)));
}
}
private:
float epsilon_;
TensorFormat tensor_format_;
size_t depth_;
bool is_training_;
engine cpu_engine_ = engine(engine::kind::cpu, 0);
void ExtractParams(OpKernelContext* context) {
const Tensor& input = MklGetInput(context, 0);
depth_ = static_cast<int>(GetTensorDim(input, tensor_format_, 'C'));
}
void HandleEmptyInput(OpKernelContext* context, TensorShape tf_shape_src,
TensorShape tf_shape_scale_shift,
Tensor** diff_src_tensor) {
const size_t kDiffSrcIndex = 0;
MklDnnShape dnn_shape_diff_src;
dnn_shape_diff_src.SetMklTensor(false);
AllocateOutputSetMklShape(context, kDiffSrcIndex, diff_src_tensor,
tf_shape_src, dnn_shape_diff_src, native_format);
auto diff_src_data = (*diff_src_tensor)->flat<T>().data();
std::fill_n(diff_src_data, (*diff_src_tensor)->shape().num_elements(),
static_cast<T>(0));
Tensor* diff_scale_tensor = nullptr;
Tensor* diff_shift_tensor = nullptr;
AllocateTFOutputs(context, tf_shape_scale_shift, &diff_scale_tensor,
&diff_shift_tensor);
}
void AllocateTFOutputs(OpKernelContext* context,
TensorShape tf_shape_scale_shift,
Tensor** diff_scale_tensor,
Tensor** diff_shift_tensor) {
DCHECK(diff_scale_tensor);
DCHECK(diff_shift_tensor);
const size_t kDiffScaleIndex = 1;
const size_t kDiffShiftIndex = 2;
const size_t kP1Index = 3;
const size_t kP2Index = 4;
MklDnnShape mkl_shape_diff_scale;
mkl_shape_diff_scale.SetMklTensor(false);
AllocateOutputSetMklShape(context, kDiffScaleIndex, diff_scale_tensor,
tf_shape_scale_shift, mkl_shape_diff_scale,
native_format);
DCHECK(*diff_scale_tensor);
auto diff_scale_data = (*diff_scale_tensor)->flat<U>().data();
std::fill_n(diff_scale_data, (*diff_scale_tensor)->shape().num_elements(),
static_cast<U>(0));
MklDnnShape mkl_shape_diff_shift;
mkl_shape_diff_shift.SetMklTensor(false);
AllocateOutputSetMklShape(context, kDiffShiftIndex, diff_shift_tensor,
tf_shape_scale_shift, mkl_shape_diff_shift,
native_format);
DCHECK(*diff_shift_tensor);
auto diff_shift_data = (*diff_shift_tensor)->flat<U>().data();
std::fill_n(diff_shift_data, (*diff_shift_tensor)->shape().num_elements(),
static_cast<U>(0));
Tensor *p1_tensor = nullptr, *p2_tensor = nullptr;
MklDnnShape mkl_shape_p;
mkl_shape_p.SetMklTensor(false);
AllocateOutputSetMklShape(context, kP1Index, &p1_tensor, TensorShape({}),
mkl_shape_p, native_format);
std::fill_n(p1_tensor->flat<U>().data(), p1_tensor->shape().num_elements(),
static_cast<U>(0));
AllocateOutputSetMklShape(context, kP2Index, &p2_tensor, TensorShape({}),
mkl_shape_p, native_format);
std::fill_n(p2_tensor->flat<U>().data(), p2_tensor->shape().num_elements(),
static_cast<U>(0));
}
memory::dims GetMeanVarianceDims() { return memory::dims({1, depth_}); }
};
#define REGISTER_MKL_FUSED_BATCHNORM_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNorm") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, T, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNorm") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, T, false, false, true>);
TF_CALL_float(REGISTER_MKL_FUSED_BATCHNORM_CPU);
TF_CALL_bfloat16(REGISTER_MKL_FUSED_BATCHNORM_CPU);
TF_CALL_half(REGISTER_MKL_FUSED_BATCHNORM_CPU);
#undef REGISTER_MKL_FUSED_BATCHNORM_CPU
#define REGISTER_MKL_FUSED_BATCHNORM_V2_CPU(T, U) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, false, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, false, false, true>);
REGISTER_MKL_FUSED_BATCHNORM_V2_CPU(float, float);
REGISTER_MKL_FUSED_BATCHNORM_V2_CPU(bfloat16, float);
REGISTER_MKL_FUSED_BATCHNORM_V2_CPU(Eigen::half, float);
#undef REGISTER_MKL_FUSED_BATCHNORM_V2_CPU
#define REGISTER_MKL_FUSED_BATCHNORM_GRAD_CPU(T) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, T, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormGrad") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, T, false, true>);
TF_CALL_float(REGISTER_MKL_FUSED_BATCHNORM_GRAD_CPU);
TF_CALL_bfloat16(REGISTER_MKL_FUSED_BATCHNORM_GRAD_CPU);
#undef REGISTER_MKL_FUSED_BATCHNORM_GRAD_CPU
#define REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU(T, U) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormGradV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, U, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormGradV2") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, U, false, true>);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU(float, float);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU(bfloat16, float);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU(Eigen::half, float);
#undef REGISTER_MKL_FUSED_BATCHNORM_GRAD_V2_CPU
#define REGISTER_MKL_FUSED_BATCHNORM_V3_CPU(T, U) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormV3") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, true, false>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormEx") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, true, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormV3") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, true, false, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormEx") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormOp<CPUDevice, T, U, true, true, true>);
REGISTER_MKL_FUSED_BATCHNORM_V3_CPU(float, float);
REGISTER_MKL_FUSED_BATCHNORM_V3_CPU(bfloat16, float);
REGISTER_MKL_FUSED_BATCHNORM_V3_CPU(Eigen::half, float);
#undef REGISTER_MKL_FUSED_BATCHNORM_V3_CPU
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx")
.Device(DEVICE_CPU)
.TypeConstraint<float>("T")
.TypeConstraint<float>("U"),
NoOp);
REGISTER_KERNEL_BUILDER(Name("_FusedBatchNormEx")
.Device(DEVICE_CPU)
.TypeConstraint<bfloat16>("T")
.TypeConstraint<float>("U"),
NoOp);
#define REGISTER_MKL_FUSED_BATCHNORM_GRAD_V3_CPU(T, U) \
REGISTER_KERNEL_BUILDER( \
Name("_MklFusedBatchNormGradV3") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklLayoutDependentOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, U, true>); \
REGISTER_KERNEL_BUILDER( \
Name("_MklNativeFusedBatchNormGradV3") \
.Device(DEVICE_CPU) \
.TypeConstraint<T>("T") \
.TypeConstraint<U>("U") \
.Label(mkl_op_registry::kMklNameChangeOpLabel), \
MklFusedBatchNormGradOp<CPUDevice, T, U, true, true>);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V3_CPU(float, float);
REGISTER_MKL_FUSED_BATCHNORM_GRAD_V3_CPU(bfloat16, float);
#undef REGISTER_MKL_FUSED_BATCHNORM_GRAD_V3_CPU
}
#undef FORWARD_INFERENCE
#undef GET_DIFF_SCALE_DATA_BUFFER
#undef GET_DIFF_SCALE_SHIFT_DATA_BUFFERS
#undef GET_DIFF_SHIFT_DATA_BUFFER
#undef GET_SCALE_AND_SHIFT_FLAGS
#undef GET_SCALE_DATA_BUFFER
#undef IS_SCALE_AND_SHIFT_FLAG_SET
#undef SCALE_SHIFT_NET_ARGS
#undef SET_MKL_LAYOUT
#endif | #ifdef INTEL_MKL
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/image_ops.h"
#include "tensorflow/cc/ops/nn_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/kernels/conv_ops_gpu.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/public/session.h"
#include "tensorflow/core/util/util.h"
namespace tensorflow {
static const uint8 dummy_tensor[] = {0, 0, 0, 0, 0, 0, 0, 0};
static const TensorShape dummy_shape({8});
using GraphRunner = std::function<void(
const Tensor& input, const Tensor& scale, const Tensor& offset,
const Tensor& mean, const Tensor& variance,
const float exponential_avg_factor, const bool is_training, Tensor* output,
Tensor* batch_mean, Tensor* batch_var)>;
using GraphRunnerGrad = std::function<void(
const Tensor& input, const Tensor& filter, const Tensor& y_backprop,
const Tensor& scale, const Tensor& mean, const Tensor& variance,
const Tensor& res_sp3, Tensor* output, Tensor* scale_backprop,
Tensor* offset_backprop, bool disable_grappler_opts)>;
template <typename T>
class CommonTestUtilities : public OpsTestBase {
public:
void TestBody() {}
static void VerifyTensorsClose(const float exponential_avg_factor,
const bool is_training, const GraphRunner& run,
const GraphRunner& run_mkl) {
int batch = 1;
int height = 10;
int width = 10;
int depth = 3;
DataType dtype = DataTypeToEnum<T>::v();
Tensor input(dtype, {batch, height, width, depth});
input.flat<T>() = input.flat<T>().template setRandom<random_gen_>();
Tensor scale(dtype, {depth});
scale.flat<T>() = scale.flat<T>().template setRandom<random_gen_>();
Tensor offset(dtype, {depth});
offset.flat<T>() = offset.flat<T>().template setRandom<random_gen_>();
if (is_training && (exponential_avg_factor == 1.0)) {
depth = 0;
}
Tensor mean(dtype, {depth});
mean.flat<T>() = mean.flat<T>().template setRandom<random_gen_>();
Tensor variance(dtype, {depth});
variance.flat<T>() =
variance.flat<T>().template setRandom<random_gen_>().abs();
Tensor output;
Tensor batch_mean;
Tensor batch_var;
Tensor mkl_output;
Tensor mkl_batch_mean;
Tensor mkl_batch_var;
run(input, scale, offset, mean, variance, exponential_avg_factor,
is_training, &output, &batch_mean, &batch_var);
run_mkl(input, scale, offset, mean, variance, exponential_avg_factor,
is_training, &mkl_output, &mkl_batch_mean, &mkl_batch_var);
ASSERT_EQ(output.dtype(), mkl_output.dtype());
ASSERT_EQ(output.shape(), mkl_output.shape());
ASSERT_EQ(batch_mean.dtype(), mkl_batch_mean.dtype());
ASSERT_EQ(batch_mean.shape(), mkl_batch_mean.shape());
ASSERT_EQ(batch_var.dtype(), mkl_batch_var.dtype());
ASSERT_EQ(batch_var.shape(), mkl_batch_var.shape());
test::ExpectClose(output, mkl_output, 1e-5);
test::ExpectClose(batch_mean, mkl_batch_mean, 1e-5);
test::ExpectClose(batch_var, mkl_batch_var, 1e-5);
}
static void VerifyTensorsCloseForGrad(const float epsilon,
const GraphRunnerGrad& run,
const GraphRunnerGrad& run_mkl) {
int batch = 2;
int height = 8;
int width = 8;
int depth = 1;
int filter_height = 3;
int filter_width = 3;
int in_channels = 1;
int out_channels = 6;
DataType dtype = DataTypeToEnum<T>::v();
Tensor input(dtype, {batch, height, width, depth});
input.flat<T>() = input.flat<T>().template setRandom<random_gen_>();
Tensor filter(dtype,
{filter_height, filter_width, in_channels, out_channels});
filter.flat<T>() = filter.flat<T>().template setRandom<random_gen_>();
Tensor y_backprop(dtype, {batch, height, width, out_channels});
y_backprop.flat<T>() =
y_backprop.flat<T>().template setRandom<random_gen_>();
Tensor scale(dtype, {out_channels});
scale.flat<T>() = scale.flat<T>().template setRandom<random_gen_>();
Tensor mean(dtype, {out_channels});
mean.flat<T>() = mean.flat<T>().template setRandom<random_gen_>();
Tensor variance(dtype, {out_channels});
variance.flat<T>() =
variance.flat<T>().template setRandom<random_gen_>().abs();
Tensor res_sp3(dtype, {out_channels});
res_sp3.flat<T>() =
res_sp3.flat<T>().template setRandom<random_gen_>().abs();
Tensor output;
Tensor scale_backprop;
Tensor offset_backprop;
Tensor mkl_output;
Tensor mkl_scale_backprop;
Tensor mkl_offset_backprop;
run(input, filter, y_backprop, scale, mean, variance, res_sp3, &output,
&scale_backprop, &offset_backprop, epsilon);
run_mkl(input, filter, y_backprop, scale, mean, variance, res_sp3,
&mkl_output, &mkl_scale_backprop, &mkl_offset_backprop, epsilon);
ASSERT_EQ(output.dtype(), mkl_output.dtype());
ASSERT_EQ(output.shape(), mkl_output.shape());
ASSERT_EQ(scale_backprop.dtype(), mkl_scale_backprop.dtype());
ASSERT_EQ(scale_backprop.shape(), mkl_scale_backprop.shape());
ASSERT_EQ(offset_backprop.dtype(), mkl_offset_backprop.dtype());
ASSERT_EQ(offset_backprop.shape(), mkl_offset_backprop.shape());
test::ExpectClose(output, mkl_output, 1e-5);
test::ExpectClose(scale_backprop, mkl_scale_backprop, 1e-5,
1e-5);
test::ExpectClose(offset_backprop, mkl_offset_backprop, 1e-5);
}
private:
using random_gen_ = Eigen::internal::NormalRandomGenerator<T>;
};
template <typename T>
class Conv2DOpTest : public OpsTestBase {
void TestBody() {}
public:
void RunConv2D(const Tensor& input, const Tensor& filter, Tensor* output) {
DataType dtype = DataTypeToEnum<T>::v();
TF_EXPECT_OK(NodeDefBuilder("MklConv2D", "_MklNativeConv2D")
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Attr("strides", {1, 1, 1, 1})
.Attr("padding", "SAME")
.Attr("data_format", "NHWC")
.Attr("_kernel", "MklNameChangeOp")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<T>(input.shape(), input.flat<T>());
AddInputFromArray<T>(filter.shape(), filter.flat<T>());
TF_ASSERT_OK(RunOpKernel());
*output = *GetOutput(0);
}
};
template <typename T>
class FusedBatchNormOpTest : public OpsTestBase {
protected:
void VerifyFusedBatchNorm(const float exponential_avg_factor,
const bool is_training) {
const GraphRunner run = [this](const Tensor& input, const Tensor& scale,
const Tensor& offset, const Tensor& mean,
const Tensor& variance,
const float exponential_avg_factor,
const bool is_training, Tensor* output,
Tensor* batch_mean, Tensor* batch_var) {
auto root = tensorflow::Scope::NewRootScope();
auto input_op =
ops::Const(root.WithOpName("input"), Input::Initializer(input));
auto scale_op =
ops::Const(root.WithOpName("scale"), Input::Initializer(scale));
auto offset_op =
ops::Const(root.WithOpName("offset"), Input::Initializer(offset));
auto mean_op =
ops::Const(root.WithOpName("mean"), Input::Initializer(mean));
auto var_op =
ops::Const(root.WithOpName("variance"), Input::Initializer(variance));
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(is_training);
attr = attr.ExponentialAvgFactor(exponential_avg_factor);
attr = attr.Epsilon(0.001);
auto bn = ops::FusedBatchNorm(root.WithOpName("FusedBatchNorm"), input_op,
scale_op, offset_op, mean_op, var_op, attr);
auto y = ops::Identity(root.WithOpName("y"), bn.y);
auto y_batch_mean =
ops::Identity(root.WithOpName("y_batch_mean"), bn.batch_mean);
auto y_batch_var =
ops::Identity(root.WithOpName("y_batch_var"), bn.batch_variance);
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(tensorflow::SessionOptions()));
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(session->Run({}, {"y", "y_batch_mean", "y_batch_var"}, {},
&output_tensors));
*output = output_tensors[0];
*batch_mean = output_tensors[1];
*batch_var = output_tensors[2];
};
const GraphRunner run_mkl = [this](const Tensor& input, const Tensor& scale,
const Tensor& offset, const Tensor& mean,
const Tensor& variance,
const float exponential_avg_factor,
const bool is_training, Tensor* output,
Tensor* batch_mean, Tensor* batch_var) {
DataType dtype = DataTypeToEnum<T>::v();
TF_EXPECT_OK(
NodeDefBuilder("MklNativeFusedBatchNorm", "_MklNativeFusedBatchNorm")
.Input(FakeInput(dtype))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("exponential_avg_factor", exponential_avg_factor)
.Attr("epsilon", 0.001)
.Attr("is_training", is_training)
.Attr("_kernel", "MklNameChangeOp")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<T>(input.shape(), input.flat<T>());
AddInputFromArray<float>(scale.shape(), scale.flat<float>());
AddInputFromArray<float>(offset.shape(), offset.flat<float>());
AddInputFromArray<float>(mean.shape(), mean.flat<float>());
AddInputFromArray<float>(variance.shape(), variance.flat<float>());
TF_ASSERT_OK(RunOpKernel());
*output = *GetOutput(0);
*batch_mean = *GetOutput(1);
*batch_var = *GetOutput(2);
};
CommonTestUtilities<T>::VerifyTensorsClose(exponential_avg_factor,
is_training, run, run_mkl);
}
void VerifyFusedBatchNormGradWithConv2D(const float epsilon) {
const GraphRunnerGrad run =
[this](const Tensor& input, const Tensor& filter,
const Tensor& y_backprop, const Tensor& scale,
const Tensor& mean, const Tensor& variance,
const Tensor& res_sp3, Tensor* x_backprop_tensor,
Tensor* scale_backprop_tensor, Tensor* offset_backprop_tensor,
const float epsilon) {
auto root = tensorflow::Scope::NewRootScope();
auto input_op =
ops::Const(root.WithOpName("input"), Input::Initializer(input));
auto filter_op =
ops::Const(root.WithOpName("filter"), Input::Initializer(filter));
ops::Conv2D::Attrs conv_attr;
conv_attr = conv_attr.DataFormat("NHWC");
auto conv = ops::Conv2D(root.WithOpName("Conv"), input_op, filter_op,
{1, 1, 1, 1}, "SAME", conv_attr);
auto y_backprop_op = ops::Const(root.WithOpName("y_backprop"),
Input::Initializer(y_backprop));
auto scale_op =
ops::Const(root.WithOpName("scale"), Input::Initializer(scale));
auto mean_op =
ops::Const(root.WithOpName("mean"), Input::Initializer(mean));
auto var_op = ops::Const(root.WithOpName("variance"),
Input::Initializer(variance));
auto res_sp3_op = ops::Const(root.WithOpName("reserve_space_3"),
Input::Initializer(res_sp3));
ops::FusedBatchNormGradV3::Attrs bn_attr;
bn_attr = bn_attr.IsTraining(true);
bn_attr = bn_attr.Epsilon(epsilon);
bn_attr = bn_attr.DataFormat("NHWC");
auto bn = ops::FusedBatchNormGradV3(
root.WithOpName("FusedBatchNormGrad"), y_backprop_op, conv,
scale_op, mean_op, var_op, res_sp3_op, bn_attr);
auto x_backprop =
ops::Identity(root.WithOpName("x_backprop"), bn.x_backprop);
auto scale_backprop = ops::Identity(root.WithOpName("scale_backprop"),
bn.scale_backprop);
auto offset_backprop = ops::Identity(
root.WithOpName("offset_backprop"), bn.offset_backprop);
tensorflow::GraphDef graph;
TF_ASSERT_OK(root.ToGraphDef(&graph));
tensorflow::SessionOptions session_options;
std::unique_ptr<tensorflow::Session> session(
tensorflow::NewSession(session_options));
TF_ASSERT_OK(session->Create(graph));
std::vector<Tensor> output_tensors;
TF_ASSERT_OK(session->Run(
{}, {"x_backprop", "scale_backprop", "offset_backprop"}, {},
&output_tensors));
*x_backprop_tensor = output_tensors[0];
*scale_backprop_tensor = output_tensors[1];
*offset_backprop_tensor = output_tensors[2];
};
const GraphRunnerGrad run_mkl =
[this](const Tensor& input, const Tensor& filter,
const Tensor& y_backprop, const Tensor& scale,
const Tensor& mean, const Tensor& variance,
const Tensor& res_sp3, Tensor* x_backprop_tensor,
Tensor* scale_backprop_tensor, Tensor* offset_backprop_tensor,
const float epsilon) {
Tensor conv2d_output;
Conv2DOpTest<T> conv2d_test;
conv2d_test.RunConv2D(input, filter, &conv2d_output);
DataType dtype = DataTypeToEnum<T>::v();
TF_EXPECT_OK(NodeDefBuilder("MklFusedBatchNorm",
"_MklNativeFusedBatchNormGradV3")
.Input(FakeInput(dtype))
.Input(FakeInput(dtype))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("epsilon", epsilon)
.Attr("is_training", true)
.Attr("data_format", "NHWC")
.Attr("_kernel", "MklNameChangeOp")
.Finalize(node_def()));
TF_EXPECT_OK(InitOp());
AddInputFromArray<T>(y_backprop.shape(), y_backprop.flat<T>());
AddInputFromArray<T>(conv2d_output.shape(), conv2d_output.flat<T>());
AddInputFromArray<float>(scale.shape(), scale.flat<float>());
AddInputFromArray<float>(mean.shape(), mean.flat<float>());
AddInputFromArray<float>(variance.shape(), variance.flat<float>());
AddInputFromArray<float>(res_sp3.shape(), res_sp3.flat<float>());
TF_ASSERT_OK(RunOpKernel());
*x_backprop_tensor = *GetOutput(0);
*scale_backprop_tensor = *GetOutput(1);
*offset_backprop_tensor = *GetOutput(2);
};
CommonTestUtilities<T>::VerifyTensorsCloseForGrad(epsilon, run, run_mkl);
}
};
TYPED_TEST_SUITE_P(FusedBatchNormOpTest);
TYPED_TEST_P(FusedBatchNormOpTest, Training) {
const float exponential_avg_factor = 1.0;
const bool is_training = true;
this->VerifyFusedBatchNorm(exponential_avg_factor, is_training);
}
TYPED_TEST_P(FusedBatchNormOpTest, TrainingRunningMean) {
const float exponential_avg_factor = 0.5;
const bool is_training = true;
this->VerifyFusedBatchNorm(exponential_avg_factor, is_training);
}
TYPED_TEST_P(FusedBatchNormOpTest, Inference) {
const float exponential_avg_factor = 1.0;
const bool is_training = false;
this->VerifyFusedBatchNorm(exponential_avg_factor, is_training);
}
TYPED_TEST_P(FusedBatchNormOpTest, InferenceIgnoreAvgFactor) {
const float exponential_avg_factor = 0.5;
const bool is_training = false;
this->VerifyFusedBatchNorm(exponential_avg_factor, is_training);
}
TYPED_TEST_P(FusedBatchNormOpTest, FusedBatchNormGradV3) {
const float epsilon = 0.001;
this->VerifyFusedBatchNormGradWithConv2D(epsilon);
}
REGISTER_TYPED_TEST_SUITE_P(FusedBatchNormOpTest, Training, TrainingRunningMean,
Inference, InferenceIgnoreAvgFactor,
FusedBatchNormGradV3);
using FusedBatchNormDataTypes = ::testing::Types<float>;
INSTANTIATE_TYPED_TEST_SUITE_P(Test, FusedBatchNormOpTest,
FusedBatchNormDataTypes);
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_fused_batch_norm_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/mkl/mkl_fused_batch_norm_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04c6a655-18ac-47eb-9c89-76edaa0f0a6f | cpp | abseil/abseil-cpp | optional | absl/types/internal/optional.h | absl/types/optional_test.cc | #ifndef ABSL_TYPES_INTERNAL_OPTIONAL_H_
#define ABSL_TYPES_INTERNAL_OPTIONAL_H_
#include <functional>
#include <new>
#include <type_traits>
#include <utility>
#include "absl/base/internal/inline_variable.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
template <typename T>
class optional;
namespace optional_internal {
struct init_t {
explicit init_t() = default;
};
struct empty_struct {};
template <typename T, bool unused = std::is_trivially_destructible<T>::value>
class optional_data_dtor_base {
struct dummy_type {
static_assert(sizeof(T) % sizeof(empty_struct) == 0, "");
empty_struct data[sizeof(T) / sizeof(empty_struct)];
};
protected:
bool engaged_;
union {
T data_;
dummy_type dummy_;
};
void destruct() noexcept {
if (engaged_) {
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
#endif
data_.~T();
#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
#pragma GCC diagnostic pop
#endif
engaged_ = false;
}
}
constexpr optional_data_dtor_base() noexcept : engaged_(false), dummy_{{}} {}
template <typename... Args>
constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
: engaged_(true), data_(std::forward<Args>(args)...) {}
~optional_data_dtor_base() { destruct(); }
};
template <typename T>
class optional_data_dtor_base<T, true> {
struct dummy_type {
static_assert(sizeof(T) % sizeof(empty_struct) == 0, "");
empty_struct data[sizeof(T) / sizeof(empty_struct)];
};
protected:
bool engaged_;
union {
T data_;
dummy_type dummy_;
};
void destruct() noexcept { engaged_ = false; }
constexpr optional_data_dtor_base() noexcept : engaged_(false), dummy_{{}} {}
template <typename... Args>
constexpr explicit optional_data_dtor_base(in_place_t, Args&&... args)
: engaged_(true), data_(std::forward<Args>(args)...) {}
};
template <typename T>
class optional_data_base : public optional_data_dtor_base<T> {
protected:
using base = optional_data_dtor_base<T>;
using base::base;
template <typename... Args>
void construct(Args&&... args) {
::new (static_cast<void*>(&this->dummy_)) T(std::forward<Args>(args)...);
this->engaged_ = true;
}
template <typename U>
void assign(U&& u) {
if (this->engaged_) {
this->data_ = std::forward<U>(u);
} else {
construct(std::forward<U>(u));
}
}
};
template <typename T,
bool unused = absl::is_trivially_copy_constructible<T>::value&&
absl::is_trivially_copy_assignable<typename std::remove_cv<
T>::type>::value&& std::is_trivially_destructible<T>::value>
class optional_data;
template <typename T>
class optional_data<T, true> : public optional_data_base<T> {
protected:
using optional_data_base<T>::optional_data_base;
};
template <typename T>
class optional_data<T, false> : public optional_data_base<T> {
protected:
using optional_data_base<T>::optional_data_base;
optional_data() = default;
optional_data(const optional_data& rhs) : optional_data_base<T>() {
if (rhs.engaged_) {
this->construct(rhs.data_);
}
}
optional_data(optional_data&& rhs) noexcept(
absl::default_allocator_is_nothrow::value ||
std::is_nothrow_move_constructible<T>::value)
: optional_data_base<T>() {
if (rhs.engaged_) {
this->construct(std::move(rhs.data_));
}
}
optional_data& operator=(const optional_data& rhs) {
if (rhs.engaged_) {
this->assign(rhs.data_);
} else {
this->destruct();
}
return *this;
}
optional_data& operator=(optional_data&& rhs) noexcept(
std::is_nothrow_move_assignable<T>::value&&
std::is_nothrow_move_constructible<T>::value) {
if (rhs.engaged_) {
this->assign(std::move(rhs.data_));
} else {
this->destruct();
}
return *this;
}
};
enum class copy_traits { copyable = 0, movable = 1, non_movable = 2 };
template <copy_traits>
class optional_ctor_base;
template <>
class optional_ctor_base<copy_traits::copyable> {
public:
constexpr optional_ctor_base() = default;
optional_ctor_base(const optional_ctor_base&) = default;
optional_ctor_base(optional_ctor_base&&) = default;
optional_ctor_base& operator=(const optional_ctor_base&) = default;
optional_ctor_base& operator=(optional_ctor_base&&) = default;
};
template <>
class optional_ctor_base<copy_traits::movable> {
public:
constexpr optional_ctor_base() = default;
optional_ctor_base(const optional_ctor_base&) = delete;
optional_ctor_base(optional_ctor_base&&) = default;
optional_ctor_base& operator=(const optional_ctor_base&) = default;
optional_ctor_base& operator=(optional_ctor_base&&) = default;
};
template <>
class optional_ctor_base<copy_traits::non_movable> {
public:
constexpr optional_ctor_base() = default;
optional_ctor_base(const optional_ctor_base&) = delete;
optional_ctor_base(optional_ctor_base&&) = delete;
optional_ctor_base& operator=(const optional_ctor_base&) = default;
optional_ctor_base& operator=(optional_ctor_base&&) = default;
};
template <copy_traits>
class optional_assign_base;
template <>
class optional_assign_base<copy_traits::copyable> {
public:
constexpr optional_assign_base() = default;
optional_assign_base(const optional_assign_base&) = default;
optional_assign_base(optional_assign_base&&) = default;
optional_assign_base& operator=(const optional_assign_base&) = default;
optional_assign_base& operator=(optional_assign_base&&) = default;
};
template <>
class optional_assign_base<copy_traits::movable> {
public:
constexpr optional_assign_base() = default;
optional_assign_base(const optional_assign_base&) = default;
optional_assign_base(optional_assign_base&&) = default;
optional_assign_base& operator=(const optional_assign_base&) = delete;
optional_assign_base& operator=(optional_assign_base&&) = default;
};
template <>
class optional_assign_base<copy_traits::non_movable> {
public:
constexpr optional_assign_base() = default;
optional_assign_base(const optional_assign_base&) = default;
optional_assign_base(optional_assign_base&&) = default;
optional_assign_base& operator=(const optional_assign_base&) = delete;
optional_assign_base& operator=(optional_assign_base&&) = delete;
};
template <typename T>
struct ctor_copy_traits {
static constexpr copy_traits traits =
std::is_copy_constructible<T>::value
? copy_traits::copyable
: std::is_move_constructible<T>::value ? copy_traits::movable
: copy_traits::non_movable;
};
template <typename T>
struct assign_copy_traits {
static constexpr copy_traits traits =
absl::is_copy_assignable<T>::value && std::is_copy_constructible<T>::value
? copy_traits::copyable
: absl::is_move_assignable<T>::value &&
std::is_move_constructible<T>::value
? copy_traits::movable
: copy_traits::non_movable;
};
template <typename T, typename U>
struct is_constructible_convertible_from_optional
: std::integral_constant<
bool, std::is_constructible<T, optional<U>&>::value ||
std::is_constructible<T, optional<U>&&>::value ||
std::is_constructible<T, const optional<U>&>::value ||
std::is_constructible<T, const optional<U>&&>::value ||
std::is_convertible<optional<U>&, T>::value ||
std::is_convertible<optional<U>&&, T>::value ||
std::is_convertible<const optional<U>&, T>::value ||
std::is_convertible<const optional<U>&&, T>::value> {};
template <typename T, typename U>
struct is_constructible_convertible_assignable_from_optional
: std::integral_constant<
bool, is_constructible_convertible_from_optional<T, U>::value ||
std::is_assignable<T&, optional<U>&>::value ||
std::is_assignable<T&, optional<U>&&>::value ||
std::is_assignable<T&, const optional<U>&>::value ||
std::is_assignable<T&, const optional<U>&&>::value> {};
bool convertible_to_bool(bool);
template <typename T, typename = size_t>
struct optional_hash_base {
optional_hash_base() = delete;
optional_hash_base(const optional_hash_base&) = delete;
optional_hash_base(optional_hash_base&&) = delete;
optional_hash_base& operator=(const optional_hash_base&) = delete;
optional_hash_base& operator=(optional_hash_base&&) = delete;
};
template <typename T>
struct optional_hash_base<T, decltype(std::hash<absl::remove_const_t<T> >()(
std::declval<absl::remove_const_t<T> >()))> {
using argument_type = absl::optional<T>;
using result_type = size_t;
size_t operator()(const absl::optional<T>& opt) const {
absl::type_traits_internal::AssertHashEnabled<absl::remove_const_t<T>>();
if (opt) {
return std::hash<absl::remove_const_t<T> >()(*opt);
} else {
return static_cast<size_t>(0x297814aaad196e6dULL);
}
}
};
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/types/optional.h"
#if !defined(ABSL_USES_STD_OPTIONAL)
#include <string>
#include <type_traits>
#include <utility>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/log/log.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
#if defined(__cplusplus) && __cplusplus >= 202002L
#define ABSL_VOLATILE_RETURN_TYPES_DEPRECATED 1
#endif
template <class T, class...>
using GccIceHelper1 = T;
template <typename T>
struct GccIceHelper2 {};
template <typename T>
class GccIce {
template <typename U,
typename SecondTemplateArgHasToExistForSomeReason = void,
typename DependentType = void,
typename = std::is_assignable<GccIceHelper1<T, DependentType>&, U>>
GccIce& operator=(GccIceHelper2<U> const&) {}
};
TEST(OptionalTest, InternalCompilerErrorInGcc5ToGcc10) {
GccIce<int> instantiate_ice_with_same_type_as_optional;
static_cast<void>(instantiate_ice_with_same_type_as_optional);
absl::optional<int> val1;
absl::optional<int> val2;
val1 = val2;
}
struct Hashable {};
namespace std {
template <>
struct hash<Hashable> {
size_t operator()(const Hashable&) { return 0; }
};
}
struct NonHashable {};
namespace {
std::string TypeQuals(std::string&) { return "&"; }
std::string TypeQuals(std::string&&) { return "&&"; }
std::string TypeQuals(const std::string&) { return "c&"; }
std::string TypeQuals(const std::string&&) { return "c&&"; }
struct StructorListener {
int construct0 = 0;
int construct1 = 0;
int construct2 = 0;
int listinit = 0;
int copy = 0;
int move = 0;
int copy_assign = 0;
int move_assign = 0;
int destruct = 0;
int volatile_copy = 0;
int volatile_move = 0;
int volatile_copy_assign = 0;
int volatile_move_assign = 0;
};
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4521)
#pragma warning(disable : 4522)
#endif
struct Listenable {
static StructorListener* listener;
Listenable() { ++listener->construct0; }
explicit Listenable(int ) { ++listener->construct1; }
Listenable(int , int ) { ++listener->construct2; }
Listenable(std::initializer_list<int> ) { ++listener->listinit; }
Listenable(const Listenable& ) { ++listener->copy; }
Listenable(const volatile Listenable& ) {
++listener->volatile_copy;
}
Listenable(volatile Listenable&& ) { ++listener->volatile_move; }
Listenable(Listenable&& ) { ++listener->move; }
Listenable& operator=(const Listenable& ) {
++listener->copy_assign;
return *this;
}
Listenable& operator=(Listenable&& ) {
++listener->move_assign;
return *this;
}
void operator=(const volatile Listenable& ) volatile {
++listener->volatile_copy_assign;
}
void operator=(volatile Listenable&& ) volatile {
++listener->volatile_move_assign;
}
~Listenable() { ++listener->destruct; }
};
#ifdef _MSC_VER
#pragma warning(pop)
#endif
StructorListener* Listenable::listener = nullptr;
struct ConstexprType {
enum CtorTypes {
kCtorDefault,
kCtorInt,
kCtorInitializerList,
kCtorConstChar
};
constexpr ConstexprType() : x(kCtorDefault) {}
constexpr explicit ConstexprType(int i) : x(kCtorInt) {}
constexpr ConstexprType(std::initializer_list<int> il)
: x(kCtorInitializerList) {}
constexpr ConstexprType(const char*)
: x(kCtorConstChar) {}
int x;
};
struct Copyable {
Copyable() {}
Copyable(const Copyable&) {}
Copyable& operator=(const Copyable&) { return *this; }
};
struct MoveableThrow {
MoveableThrow() {}
MoveableThrow(MoveableThrow&&) {}
MoveableThrow& operator=(MoveableThrow&&) { return *this; }
};
struct MoveableNoThrow {
MoveableNoThrow() {}
MoveableNoThrow(MoveableNoThrow&&) noexcept {}
MoveableNoThrow& operator=(MoveableNoThrow&&) noexcept { return *this; }
};
struct NonMovable {
NonMovable() {}
NonMovable(const NonMovable&) = delete;
NonMovable& operator=(const NonMovable&) = delete;
NonMovable(NonMovable&&) = delete;
NonMovable& operator=(NonMovable&&) = delete;
};
struct NoDefault {
NoDefault() = delete;
NoDefault(const NoDefault&) {}
NoDefault& operator=(const NoDefault&) { return *this; }
};
struct ConvertsFromInPlaceT {
ConvertsFromInPlaceT(absl::in_place_t) {}
};
TEST(optionalTest, DefaultConstructor) {
absl::optional<int> empty;
EXPECT_FALSE(empty);
constexpr absl::optional<int> cempty;
static_assert(!cempty.has_value(), "");
EXPECT_TRUE(
std::is_nothrow_default_constructible<absl::optional<int>>::value);
}
TEST(optionalTest, nulloptConstructor) {
absl::optional<int> empty(absl::nullopt);
EXPECT_FALSE(empty);
constexpr absl::optional<int> cempty{absl::nullopt};
static_assert(!cempty.has_value(), "");
EXPECT_TRUE((std::is_nothrow_constructible<absl::optional<int>,
absl::nullopt_t>::value));
}
TEST(optionalTest, CopyConstructor) {
{
absl::optional<int> empty, opt42 = 42;
absl::optional<int> empty_copy(empty);
EXPECT_FALSE(empty_copy);
absl::optional<int> opt42_copy(opt42);
EXPECT_TRUE(opt42_copy);
EXPECT_EQ(42, *opt42_copy);
}
{
absl::optional<const int> empty, opt42 = 42;
absl::optional<const int> empty_copy(empty);
EXPECT_FALSE(empty_copy);
absl::optional<const int> opt42_copy(opt42);
EXPECT_TRUE(opt42_copy);
EXPECT_EQ(42, *opt42_copy);
}
#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
{
absl::optional<volatile int> empty, opt42 = 42;
absl::optional<volatile int> empty_copy(empty);
EXPECT_FALSE(empty_copy);
absl::optional<volatile int> opt42_copy(opt42);
EXPECT_TRUE(opt42_copy);
EXPECT_EQ(42, *opt42_copy);
}
#endif
EXPECT_TRUE(std::is_copy_constructible<absl::optional<int>>::value);
EXPECT_TRUE(std::is_copy_constructible<absl::optional<Copyable>>::value);
EXPECT_FALSE(
std::is_copy_constructible<absl::optional<MoveableThrow>>::value);
EXPECT_FALSE(
std::is_copy_constructible<absl::optional<MoveableNoThrow>>::value);
EXPECT_FALSE(std::is_copy_constructible<absl::optional<NonMovable>>::value);
EXPECT_FALSE(
absl::is_trivially_copy_constructible<absl::optional<Copyable>>::value);
EXPECT_TRUE(
absl::is_trivially_copy_constructible<absl::optional<int>>::value);
EXPECT_TRUE(
absl::is_trivially_copy_constructible<absl::optional<const int>>::value);
#if !defined(_MSC_VER) && !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
EXPECT_TRUE(absl::is_trivially_copy_constructible<
absl::optional<volatile int>>::value);
#endif
{
constexpr absl::optional<int> o1;
constexpr absl::optional<int> o2 = o1;
static_assert(!o2, "");
}
{
constexpr absl::optional<int> o1 = 42;
constexpr absl::optional<int> o2 = o1;
static_assert(o2, "");
static_assert(*o2 == 42, "");
}
{
struct TrivialCopyable {
constexpr TrivialCopyable() : x(0) {}
constexpr explicit TrivialCopyable(int i) : x(i) {}
int x;
};
constexpr absl::optional<TrivialCopyable> o1(42);
constexpr absl::optional<TrivialCopyable> o2 = o1;
static_assert(o2, "");
static_assert((*o2).x == 42, "");
#ifndef ABSL_GLIBCXX_OPTIONAL_TRIVIALITY_BUG
EXPECT_TRUE(absl::is_trivially_copy_constructible<
absl::optional<TrivialCopyable>>::value);
EXPECT_TRUE(absl::is_trivially_copy_constructible<
absl::optional<const TrivialCopyable>>::value);
#endif
#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
EXPECT_FALSE(std::is_copy_constructible<
absl::optional<volatile TrivialCopyable>>::value);
#endif
}
}
TEST(optionalTest, MoveConstructor) {
absl::optional<int> empty, opt42 = 42;
absl::optional<int> empty_move(std::move(empty));
EXPECT_FALSE(empty_move);
absl::optional<int> opt42_move(std::move(opt42));
EXPECT_TRUE(opt42_move);
EXPECT_EQ(42, opt42_move);
EXPECT_TRUE(std::is_move_constructible<absl::optional<int>>::value);
EXPECT_TRUE(std::is_move_constructible<absl::optional<Copyable>>::value);
EXPECT_TRUE(std::is_move_constructible<absl::optional<MoveableThrow>>::value);
EXPECT_TRUE(
std::is_move_constructible<absl::optional<MoveableNoThrow>>::value);
EXPECT_FALSE(std::is_move_constructible<absl::optional<NonMovable>>::value);
EXPECT_TRUE(std::is_nothrow_move_constructible<absl::optional<int>>::value);
EXPECT_EQ(
absl::default_allocator_is_nothrow::value,
std::is_nothrow_move_constructible<absl::optional<MoveableThrow>>::value);
EXPECT_TRUE(std::is_nothrow_move_constructible<
absl::optional<MoveableNoThrow>>::value);
}
TEST(optionalTest, Destructor) {
struct Trivial {};
struct NonTrivial {
NonTrivial(const NonTrivial&) {}
NonTrivial& operator=(const NonTrivial&) { return *this; }
~NonTrivial() {}
};
EXPECT_TRUE(std::is_trivially_destructible<absl::optional<int>>::value);
EXPECT_TRUE(std::is_trivially_destructible<absl::optional<Trivial>>::value);
EXPECT_FALSE(
std::is_trivially_destructible<absl::optional<NonTrivial>>::value);
}
TEST(optionalTest, InPlaceConstructor) {
constexpr absl::optional<ConstexprType> opt0{absl::in_place_t()};
static_assert(opt0, "");
static_assert((*opt0).x == ConstexprType::kCtorDefault, "");
constexpr absl::optional<ConstexprType> opt1{absl::in_place_t(), 1};
static_assert(opt1, "");
static_assert((*opt1).x == ConstexprType::kCtorInt, "");
constexpr absl::optional<ConstexprType> opt2{absl::in_place_t(), {1, 2}};
static_assert(opt2, "");
static_assert((*opt2).x == ConstexprType::kCtorInitializerList, "");
EXPECT_FALSE((std::is_constructible<absl::optional<ConvertsFromInPlaceT>,
absl::in_place_t>::value));
EXPECT_FALSE((std::is_constructible<absl::optional<ConvertsFromInPlaceT>,
const absl::in_place_t&>::value));
EXPECT_TRUE(
(std::is_constructible<absl::optional<ConvertsFromInPlaceT>,
absl::in_place_t, absl::in_place_t>::value));
EXPECT_FALSE((std::is_constructible<absl::optional<NoDefault>,
absl::in_place_t>::value));
EXPECT_FALSE((std::is_constructible<absl::optional<NoDefault>,
absl::in_place_t&&>::value));
}
TEST(optionalTest, ValueConstructor) {
constexpr absl::optional<int> opt0(0);
static_assert(opt0, "");
static_assert(*opt0 == 0, "");
EXPECT_TRUE((std::is_convertible<int, absl::optional<int>>::value));
constexpr absl::optional<ConstexprType> opt1 = {"abc"};
static_assert(opt1, "");
static_assert(ConstexprType::kCtorConstChar == (*opt1).x, "");
EXPECT_TRUE(
(std::is_convertible<const char*, absl::optional<ConstexprType>>::value));
constexpr absl::optional<ConstexprType> opt2{2};
static_assert(opt2, "");
static_assert(ConstexprType::kCtorInt == (*opt2).x, "");
EXPECT_FALSE(
(std::is_convertible<int, absl::optional<ConstexprType>>::value));
#if defined(__GNUC__) && !defined(__clang__) && __GNUC__ == 7 && \
__cplusplus == 201703L
#define ABSL_GCC7_OVER_ICS_LIST_BUG 1
#endif
#ifndef ABSL_GCC7_OVER_ICS_LIST_BUG
constexpr absl::optional<int> opt3({});
static_assert(opt3, "");
static_assert(*opt3 == 0, "");
#endif
absl::optional<ConstexprType> opt4({});
EXPECT_FALSE(opt4);
}
struct Implicit {};
struct Explicit {};
struct Convert {
Convert(const Implicit&)
: implicit(true), move(false) {}
Convert(Implicit&&)
: implicit(true), move(true) {}
explicit Convert(const Explicit&) : implicit(false), move(false) {}
explicit Convert(Explicit&&) : implicit(false), move(true) {}
bool implicit;
bool move;
};
struct ConvertFromOptional {
ConvertFromOptional(const Implicit&)
: implicit(true), move(false), from_optional(false) {}
ConvertFromOptional(Implicit&&)
: implicit(true), move(true), from_optional(false) {}
ConvertFromOptional(
const absl::optional<Implicit>&)
: implicit(true), move(false), from_optional(true) {}
ConvertFromOptional(absl::optional<Implicit>&&)
: implicit(true), move(true), from_optional(true) {}
explicit ConvertFromOptional(const Explicit&)
: implicit(false), move(false), from_optional(false) {}
explicit ConvertFromOptional(Explicit&&)
: implicit(false), move(true), from_optional(false) {}
explicit ConvertFromOptional(const absl::optional<Explicit>&)
: implicit(false), move(false), from_optional(true) {}
explicit ConvertFromOptional(absl::optional<Explicit>&&)
: implicit(false), move(true), from_optional(true) {}
bool implicit;
bool move;
bool from_optional;
};
TEST(optionalTest, ConvertingConstructor) {
absl::optional<Implicit> i_empty;
absl::optional<Implicit> i(absl::in_place);
absl::optional<Explicit> e_empty;
absl::optional<Explicit> e(absl::in_place);
{
absl::optional<Convert> empty = i_empty;
EXPECT_FALSE(empty);
absl::optional<Convert> opt_copy = i;
EXPECT_TRUE(opt_copy);
EXPECT_TRUE(opt_copy->implicit);
EXPECT_FALSE(opt_copy->move);
absl::optional<Convert> opt_move = absl::optional<Implicit>(absl::in_place);
EXPECT_TRUE(opt_move);
EXPECT_TRUE(opt_move->implicit);
EXPECT_TRUE(opt_move->move);
}
{
absl::optional<Convert> empty(e_empty);
EXPECT_FALSE(empty);
absl::optional<Convert> opt_copy(e);
EXPECT_TRUE(opt_copy);
EXPECT_FALSE(opt_copy->implicit);
EXPECT_FALSE(opt_copy->move);
EXPECT_FALSE((std::is_convertible<const absl::optional<Explicit>&,
absl::optional<Convert>>::value));
absl::optional<Convert> opt_move{absl::optional<Explicit>(absl::in_place)};
EXPECT_TRUE(opt_move);
EXPECT_FALSE(opt_move->implicit);
EXPECT_TRUE(opt_move->move);
EXPECT_FALSE((std::is_convertible<absl::optional<Explicit>&&,
absl::optional<Convert>>::value));
}
{
static_assert(
std::is_convertible<absl::optional<Implicit>,
absl::optional<ConvertFromOptional>>::value,
"");
absl::optional<ConvertFromOptional> opt0 = i_empty;
EXPECT_TRUE(opt0);
EXPECT_TRUE(opt0->implicit);
EXPECT_FALSE(opt0->move);
EXPECT_TRUE(opt0->from_optional);
absl::optional<ConvertFromOptional> opt1 = absl::optional<Implicit>();
EXPECT_TRUE(opt1);
EXPECT_TRUE(opt1->implicit);
EXPECT_TRUE(opt1->move);
EXPECT_TRUE(opt1->from_optional);
}
{
absl::optional<ConvertFromOptional> opt0(e_empty);
EXPECT_TRUE(opt0);
EXPECT_FALSE(opt0->implicit);
EXPECT_FALSE(opt0->move);
EXPECT_TRUE(opt0->from_optional);
EXPECT_FALSE(
(std::is_convertible<const absl::optional<Explicit>&,
absl::optional<ConvertFromOptional>>::value));
absl::optional<ConvertFromOptional> opt1{absl::optional<Explicit>()};
EXPECT_TRUE(opt1);
EXPECT_FALSE(opt1->implicit);
EXPECT_TRUE(opt1->move);
EXPECT_TRUE(opt1->from_optional);
EXPECT_FALSE(
(std::is_convertible<absl::optional<Explicit>&&,
absl::optional<ConvertFromOptional>>::value));
}
}
TEST(optionalTest, StructorBasic) {
StructorListener listener;
Listenable::listener = &listener;
{
absl::optional<Listenable> empty;
EXPECT_FALSE(empty);
absl::optional<Listenable> opt0(absl::in_place);
EXPECT_TRUE(opt0);
absl::optional<Listenable> opt1(absl::in_place, 1);
EXPECT_TRUE(opt1);
absl::optional<Listenable> opt2(absl::in_place, 1, 2);
EXPECT_TRUE(opt2);
}
EXPECT_EQ(1, listener.construct0);
EXPECT_EQ(1, listener.construct1);
EXPECT_EQ(1, listener.construct2);
EXPECT_EQ(3, listener.destruct);
}
TEST(optionalTest, CopyMoveStructor) {
StructorListener listener;
Listenable::listener = &listener;
absl::optional<Listenable> original(absl::in_place);
EXPECT_EQ(1, listener.construct0);
EXPECT_EQ(0, listener.copy);
EXPECT_EQ(0, listener.move);
absl::optional<Listenable> copy(original);
EXPECT_EQ(1, listener.construct0);
EXPECT_EQ(1, listener.copy);
EXPECT_EQ(0, listener.move);
absl::optional<Listenable> move(std::move(original));
EXPECT_EQ(1, listener.construct0);
EXPECT_EQ(1, listener.copy);
EXPECT_EQ(1, listener.move);
}
TEST(optionalTest, ListInit) {
StructorListener listener;
Listenable::listener = &listener;
absl::optional<Listenable> listinit1(absl::in_place, {1});
absl::optional<Listenable> listinit2(absl::in_place, {1, 2});
EXPECT_EQ(2, listener.listinit);
}
TEST(optionalTest, AssignFromNullopt) {
absl::optional<int> opt(1);
opt = absl::nullopt;
EXPECT_FALSE(opt);
StructorListener listener;
Listenable::listener = &listener;
absl::optional<Listenable> opt1(absl::in_place);
opt1 = absl::nullopt;
EXPECT_FALSE(opt1);
EXPECT_EQ(1, listener.construct0);
EXPECT_EQ(1, listener.destruct);
EXPECT_TRUE((
std::is_nothrow_assignable<absl::optional<int>, absl::nullopt_t>::value));
EXPECT_TRUE((std::is_nothrow_assignable<absl::optional<Listenable>,
absl::nullopt_t>::value));
}
TEST(optionalTest, CopyAssignment) {
const absl::optional<int> empty, opt1 = 1, opt2 = 2;
absl::optional<int> empty_to_opt1, opt1_to_opt2, opt2_to_empty;
EXPECT_FALSE(empty_to_opt1);
empty_to_opt1 = empty;
EXPECT_FALSE(empty_to_opt1);
empty_to_opt1 = opt1;
EXPECT_TRUE(empty_to_opt1);
EXPECT_EQ(1, empty_to_opt1.value());
EXPECT_FALSE(opt1_to_opt2);
opt1_to_opt2 = opt1;
EXPECT_TRUE(opt1_to_opt2);
EXPECT_EQ(1, opt1_to_opt2.value());
opt1_to_opt2 = opt2;
EXPECT_TRUE(opt1_to_opt2);
EXPECT_EQ(2, opt1_to_opt2.value());
EXPECT_FALSE(opt2_to_empty);
opt2_to_empty = opt2;
EXPECT_TRUE(opt2_to_empty);
EXPECT_EQ(2, opt2_to_empty.value());
opt2_to_empty = empty;
EXPECT_FALSE(opt2_to_empty);
EXPECT_FALSE(absl::is_copy_assignable<absl::optional<const int>>::value);
EXPECT_TRUE(absl::is_copy_assignable<absl::optional<Copyable>>::value);
EXPECT_FALSE(absl::is_copy_assignable<absl::optional<MoveableThrow>>::value);
EXPECT_FALSE(
absl::is_copy_assignable<absl::optional<MoveableNoThrow>>::value);
EXPECT_FALSE(absl::is_copy_assignable<absl::optional<NonMovable>>::value);
EXPECT_TRUE(absl::is_trivially_copy_assignable<int>::value);
EXPECT_TRUE(absl::is_trivially_copy_assignable<volatile int>::value);
struct Trivial {
int i;
};
struct NonTrivial {
NonTrivial& operator=(const NonTrivial&) { return *this; }
int i;
};
EXPECT_TRUE(absl::is_trivially_copy_assignable<Trivial>::value);
EXPECT_FALSE(absl::is_copy_assignable<const Trivial>::value);
EXPECT_FALSE(absl::is_copy_assignable<volatile Trivial>::value);
EXPECT_TRUE(absl::is_copy_assignable<NonTrivial>::value);
EXPECT_FALSE(absl::is_trivially_copy_assignable<NonTrivial>::value);
#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
{
StructorListener listener;
Listenable::listener = &listener;
absl::optional<volatile Listenable> empty, set(absl::in_place);
EXPECT_EQ(1, listener.construct0);
absl::optional<volatile Listenable> empty_to_empty, empty_to_set,
set_to_empty(absl::in_place), set_to_set(absl::in_place);
EXPECT_EQ(3, listener.construct0);
empty_to_empty = empty;
empty_to_set = set;
set_to_empty = empty;
set_to_set = set;
EXPECT_EQ(1, listener.volatile_copy);
EXPECT_EQ(0, listener.volatile_move);
EXPECT_EQ(1, listener.destruct);
EXPECT_EQ(1, listener.volatile_copy_assign);
}
#endif
}
TEST(optionalTest, MoveAssignment) {
{
StructorListener listener;
Listenable::listener = &listener;
absl::optional<Listenable> empty1, empty2, set1(absl::in_place),
set2(absl::in_place);
EXPECT_EQ(2, listener.construct0);
absl::optional<Listenable> empty_to_empty, empty_to_set,
set_to_empty(absl::in_place), set_to_set(absl::in_place);
EXPECT_EQ(4, listener.construct0);
empty_to_empty = std::move(empty1);
empty_to_set = std::move(set1);
set_to_empty = std::move(empty2);
set_to_set = std::move(set2);
EXPECT_EQ(0, listener.copy);
EXPECT_EQ(1, listener.move);
EXPECT_EQ(1, listener.destruct);
EXPECT_EQ(1, listener.move_assign);
}
#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
{
StructorListener listener;
Listenable::listener = &listener;
absl::optional<volatile Listenable> empty1, empty2, set1(absl::in_place),
set2(absl::in_place);
EXPECT_EQ(2, listener.construct0);
absl::optional<volatile Listenable> empty_to_empty, empty_to_set,
set_to_empty(absl::in_place), set_to_set(absl::in_place);
EXPECT_EQ(4, listener.construct0);
empty_to_empty = std::move(empty1);
empty_to_set = std::move(set1);
set_to_empty = std::move(empty2);
set_to_set = std::move(set2);
EXPECT_EQ(0, listener.volatile_copy);
EXPECT_EQ(1, listener.volatile_move);
EXPECT_EQ(1, listener.destruct);
EXPECT_EQ(1, listener.volatile_move_assign);
}
#endif
EXPECT_FALSE(absl::is_move_assignable<absl::optional<const int>>::value);
EXPECT_TRUE(absl::is_move_assignable<absl::optional<Copyable>>::value);
EXPECT_TRUE(absl::is_move_assignable<absl::optional<MoveableThrow>>::value);
EXPECT_TRUE(absl::is_move_assignable<absl::optional<MoveableNoThrow>>::value);
EXPECT_FALSE(absl::is_move_assignable<absl::optional<NonMovable>>::value);
EXPECT_FALSE(
std::is_nothrow_move_assignable<absl::optional<MoveableThrow>>::value);
EXPECT_TRUE(
std::is_nothrow_move_assignable<absl::optional<MoveableNoThrow>>::value);
}
struct NoConvertToOptional {
NoConvertToOptional(const NoConvertToOptional&) = delete;
};
struct CopyConvert {
CopyConvert(const NoConvertToOptional&);
CopyConvert& operator=(const CopyConvert&) = delete;
CopyConvert& operator=(const NoConvertToOptional&);
};
struct CopyConvertFromOptional {
CopyConvertFromOptional(const NoConvertToOptional&);
CopyConvertFromOptional(const absl::optional<NoConvertToOptional>&);
CopyConvertFromOptional& operator=(const CopyConvertFromOptional&) = delete;
CopyConvertFromOptional& operator=(const NoConvertToOptional&);
CopyConvertFromOptional& operator=(
const absl::optional<NoConvertToOptional>&);
};
struct MoveConvert {
MoveConvert(NoConvertToOptional&&);
MoveConvert& operator=(const MoveConvert&) = delete;
MoveConvert& operator=(NoConvertToOptional&&);
};
struct MoveConvertFromOptional {
MoveConvertFromOptional(NoConvertToOptional&&);
MoveConvertFromOptional(absl::optional<NoConvertToOptional>&&);
MoveConvertFromOptional& operator=(const MoveConvertFromOptional&) = delete;
MoveConvertFromOptional& operator=(NoConvertToOptional&&);
MoveConvertFromOptional& operator=(absl::optional<NoConvertToOptional>&&);
};
TEST(optionalTest, ValueAssignment) {
absl::optional<int> opt;
EXPECT_FALSE(opt);
opt = 42;
EXPECT_TRUE(opt);
EXPECT_EQ(42, opt.value());
opt = absl::nullopt;
EXPECT_FALSE(opt);
opt = 42;
EXPECT_TRUE(opt);
EXPECT_EQ(42, opt.value());
opt = 43;
EXPECT_TRUE(opt);
EXPECT_EQ(43, opt.value());
opt = {};
EXPECT_FALSE(opt);
opt = {44};
EXPECT_TRUE(opt);
EXPECT_EQ(44, opt.value());
EXPECT_TRUE((std::is_assignable<absl::optional<CopyConvert>&,
const NoConvertToOptional&>::value));
EXPECT_TRUE((std::is_assignable<absl::optional<CopyConvertFromOptional>&,
const NoConvertToOptional&>::value));
EXPECT_FALSE((std::is_assignable<absl::optional<MoveConvert>&,
const NoConvertToOptional&>::value));
EXPECT_TRUE((std::is_assignable<absl::optional<MoveConvert>&,
NoConvertToOptional&&>::value));
EXPECT_FALSE((std::is_assignable<absl::optional<MoveConvertFromOptional>&,
const NoConvertToOptional&>::value));
EXPECT_TRUE((std::is_assignable<absl::optional<MoveConvertFromOptional>&,
NoConvertToOptional&&>::value));
EXPECT_TRUE(
(std::is_assignable<absl::optional<CopyConvertFromOptional>&,
const absl::optional<NoConvertToOptional>&>::value));
EXPECT_TRUE(
(std::is_assignable<absl::optional<MoveConvertFromOptional>&,
absl::optional<NoConvertToOptional>&&>::value));
}
TEST(optionalTest, ConvertingAssignment) {
absl::optional<int> opt_i;
absl::optional<char> opt_c('c');
opt_i = opt_c;
EXPECT_TRUE(opt_i);
EXPECT_EQ(*opt_c, *opt_i);
opt_i = absl::optional<char>();
EXPECT_FALSE(opt_i);
opt_i = absl::optional<char>('d');
EXPECT_TRUE(opt_i);
EXPECT_EQ('d', *opt_i);
absl::optional<std::string> opt_str;
absl::optional<const char*> opt_cstr("abc");
opt_str = opt_cstr;
EXPECT_TRUE(opt_str);
EXPECT_EQ(std::string("abc"), *opt_str);
opt_str = absl::optional<const char*>();
EXPECT_FALSE(opt_str);
opt_str = absl::optional<const char*>("def");
EXPECT_TRUE(opt_str);
EXPECT_EQ(std::string("def"), *opt_str);
EXPECT_TRUE(
(std::is_assignable<absl::optional<CopyConvert>,
const absl::optional<NoConvertToOptional>&>::value));
EXPECT_FALSE(
(std::is_assignable<absl::optional<MoveConvert>&,
const absl::optional<NoConvertToOptional>&>::value));
EXPECT_TRUE(
(std::is_assignable<absl::optional<MoveConvert>&,
absl::optional<NoConvertToOptional>&&>::value));
EXPECT_FALSE(
(std::is_assignable<absl::optional<MoveConvertFromOptional>&,
const absl::optional<NoConvertToOptional>&>::value));
}
TEST(optionalTest, ResetAndHasValue) {
StructorListener listener;
Listenable::listener = &listener;
absl::optional<Listenable> opt;
EXPECT_FALSE(opt);
EXPECT_FALSE(opt.has_value());
opt.emplace();
EXPECT_TRUE(opt);
EXPECT_TRUE(opt.has_value());
opt.reset();
EXPECT_FALSE(opt);
EXPECT_FALSE(opt.has_value());
EXPECT_EQ(1, listener.destruct);
opt.reset();
EXPECT_FALSE(opt);
EXPECT_FALSE(opt.has_value());
constexpr absl::optional<int> empty;
static_assert(!empty.has_value(), "");
constexpr absl::optional<int> nonempty(1);
static_assert(nonempty.has_value(), "");
}
TEST(optionalTest, Emplace) {
StructorListener listener;
Listenable::listener = &listener;
absl::optional<Listenable> opt;
EXPECT_FALSE(opt);
opt.emplace(1);
EXPECT_TRUE(opt);
opt.emplace(1, 2);
EXPECT_EQ(1, listener.construct1);
EXPECT_EQ(1, listener.construct2);
EXPECT_EQ(1, listener.destruct);
absl::optional<std::string> o;
EXPECT_TRUE((std::is_same<std::string&, decltype(o.emplace("abc"))>::value));
std::string& ref = o.emplace("abc");
EXPECT_EQ(&ref, &o.value());
}
TEST(optionalTest, ListEmplace) {
StructorListener listener;
Listenable::listener = &listener;
absl::optional<Listenable> opt;
EXPECT_FALSE(opt);
opt.emplace({1});
EXPECT_TRUE(opt);
opt.emplace({1, 2});
EXPECT_EQ(2, listener.listinit);
EXPECT_EQ(1, listener.destruct);
absl::optional<Listenable> o;
EXPECT_TRUE((std::is_same<Listenable&, decltype(o.emplace({1}))>::value));
Listenable& ref = o.emplace({1});
EXPECT_EQ(&ref, &o.value());
}
TEST(optionalTest, Swap) {
absl::optional<int> opt_empty, opt1 = 1, opt2 = 2;
EXPECT_FALSE(opt_empty);
EXPECT_TRUE(opt1);
EXPECT_EQ(1, opt1.value());
EXPECT_TRUE(opt2);
EXPECT_EQ(2, opt2.value());
swap(opt_empty, opt1);
EXPECT_FALSE(opt1);
EXPECT_TRUE(opt_empty);
EXPECT_EQ(1, opt_empty.value());
EXPECT_TRUE(opt2);
EXPECT_EQ(2, opt2.value());
swap(opt_empty, opt1);
EXPECT_FALSE(opt_empty);
EXPECT_TRUE(opt1);
EXPECT_EQ(1, opt1.value());
EXPECT_TRUE(opt2);
EXPECT_EQ(2, opt2.value());
swap(opt1, opt2);
EXPECT_FALSE(opt_empty);
EXPECT_TRUE(opt1);
EXPECT_EQ(2, opt1.value());
EXPECT_TRUE(opt2);
EXPECT_EQ(1, opt2.value());
EXPECT_TRUE(noexcept(opt1.swap(opt2)));
EXPECT_TRUE(noexcept(swap(opt1, opt2)));
}
template <int v>
struct DeletedOpAddr {
int value = v;
constexpr DeletedOpAddr() = default;
constexpr const DeletedOpAddr<v>* operator&() const = delete;
DeletedOpAddr<v>* operator&() = delete;
};
TEST(optionalTest, OperatorAddr) {
constexpr int v = -1;
{
constexpr absl::optional<DeletedOpAddr<v>> opt(absl::in_place_t{});
static_assert(opt.has_value(), "");
static_assert((*opt).value == v, "");
}
{
const absl::optional<DeletedOpAddr<v>> opt(absl::in_place_t{});
EXPECT_TRUE(opt.has_value());
EXPECT_TRUE(opt->value == v);
EXPECT_TRUE((*opt).value == v);
}
}
TEST(optionalTest, PointerStuff) {
absl::optional<std::string> opt(absl::in_place, "foo");
EXPECT_EQ("foo", *opt);
const auto& opt_const = opt;
EXPECT_EQ("foo", *opt_const);
EXPECT_EQ(opt->size(), 3u);
EXPECT_EQ(opt_const->size(), 3u);
constexpr absl::optional<ConstexprType> opt1(1);
static_assert((*opt1).x == ConstexprType::kCtorInt, "");
}
TEST(optionalTest, Value) {
using O = absl::optional<std::string>;
using CO = const absl::optional<std::string>;
using OC = absl::optional<const std::string>;
O lvalue(absl::in_place, "lvalue");
CO clvalue(absl::in_place, "clvalue");
OC lvalue_c(absl::in_place, "lvalue_c");
EXPECT_EQ("lvalue", lvalue.value());
EXPECT_EQ("clvalue", clvalue.value());
EXPECT_EQ("lvalue_c", lvalue_c.value());
EXPECT_EQ("xvalue", O(absl::in_place, "xvalue").value());
EXPECT_EQ("xvalue_c", OC(absl::in_place, "xvalue_c").value());
EXPECT_EQ("cxvalue", CO(absl::in_place, "cxvalue").value());
EXPECT_EQ("&", TypeQuals(lvalue.value()));
EXPECT_EQ("c&", TypeQuals(clvalue.value()));
EXPECT_EQ("c&", TypeQuals(lvalue_c.value()));
EXPECT_EQ("&&", TypeQuals(O(absl::in_place, "xvalue").value()));
EXPECT_EQ("c&&", TypeQuals(CO(absl::in_place, "cxvalue").value()));
EXPECT_EQ("c&&", TypeQuals(OC(absl::in_place, "xvalue_c").value()));
#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
using OV = absl::optional<volatile int>;
OV lvalue_v(absl::in_place, 42);
EXPECT_EQ(42, lvalue_v.value());
EXPECT_EQ(42, OV(42).value());
EXPECT_TRUE((std::is_same<volatile int&, decltype(lvalue_v.value())>::value));
EXPECT_TRUE((std::is_same<volatile int&&, decltype(OV(42).value())>::value));
#endif
absl::optional<int> empty;
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW((void)empty.value(), absl::bad_optional_access);
#else
EXPECT_DEATH_IF_SUPPORTED((void)empty.value(), "Bad optional access");
#endif
constexpr absl::optional<int> o1(1);
static_assert(1 == o1.value(), "");
#ifndef _MSC_VER
using COI = const absl::optional<int>;
static_assert(2 == COI(2).value(), "");
#endif
}
TEST(optionalTest, DerefOperator) {
using O = absl::optional<std::string>;
using CO = const absl::optional<std::string>;
using OC = absl::optional<const std::string>;
O lvalue(absl::in_place, "lvalue");
CO clvalue(absl::in_place, "clvalue");
OC lvalue_c(absl::in_place, "lvalue_c");
EXPECT_EQ("lvalue", *lvalue);
EXPECT_EQ("clvalue", *clvalue);
EXPECT_EQ("lvalue_c", *lvalue_c);
EXPECT_EQ("xvalue", *O(absl::in_place, "xvalue"));
EXPECT_EQ("xvalue_c", *OC(absl::in_place, "xvalue_c"));
EXPECT_EQ("cxvalue", *CO(absl::in_place, "cxvalue"));
EXPECT_EQ("&", TypeQuals(*lvalue));
EXPECT_EQ("c&", TypeQuals(*clvalue));
EXPECT_EQ("&&", TypeQuals(*O(absl::in_place, "xvalue")));
EXPECT_EQ("c&&", TypeQuals(*CO(absl::in_place, "cxvalue")));
EXPECT_EQ("c&&", TypeQuals(*OC(absl::in_place, "xvalue_c")));
#if !defined(ABSL_VOLATILE_RETURN_TYPES_DEPRECATED)
using OV = absl::optional<volatile int>;
OV lvalue_v(absl::in_place, 42);
EXPECT_EQ(42, *lvalue_v);
EXPECT_EQ(42, *OV(42));
EXPECT_TRUE((std::is_same<volatile int&, decltype(*lvalue_v)>::value));
EXPECT_TRUE((std::is_same<volatile int&&, decltype(*OV(42))>::value));
#endif
constexpr absl::optional<int> opt1(1);
static_assert(*opt1 == 1, "");
#if !defined(_MSC_VER) && !defined(ABSL_SKIP_OVERLOAD_TEST_DUE_TO_GCC_BUG)
using COI = const absl::optional<int>;
static_assert(*COI(2) == 2, "");
#endif
}
TEST(optionalTest, ValueOr) {
absl::optional<double> opt_empty, opt_set = 1.2;
EXPECT_EQ(42.0, opt_empty.value_or(42));
EXPECT_EQ(1.2, opt_set.value_or(42));
EXPECT_EQ(42.0, absl::optional<double>().value_or(42));
EXPECT_EQ(1.2, absl::optional<double>(1.2).value_or(42));
constexpr absl::optional<double> copt_empty, copt_set = {1.2};
static_assert(42.0 == copt_empty.value_or(42), "");
static_assert(1.2 == copt_set.value_or(42), "");
using COD = const absl::optional<double>;
static_assert(42.0 == COD().value_or(42), "");
static_assert(1.2 == COD(1.2).value_or(42), "");
}
TEST(optionalTest, make_optional) {
auto opt_int = absl::make_optional(42);
EXPECT_TRUE((std::is_same<decltype(opt_int), absl::optional<int>>::value));
EXPECT_EQ(42, opt_int);
StructorListener listener;
Listenable::listener = &listener;
absl::optional<Listenable> opt0 = absl::make_optional<Listenable>();
EXPECT_EQ(1, listener.construct0);
absl::optional<Listenable> opt1 = absl::make_optional<Listenable>(1);
EXPECT_EQ(1, listener.construct1);
absl::optional<Listenable> opt2 = absl::make_optional<Listenable>(1, 2);
EXPECT_EQ(1, listener.construct2);
absl::optional<Listenable> opt3 = absl::make_optional<Listenable>({1});
absl::optional<Listenable> opt4 = absl::make_optional<Listenable>({1, 2});
EXPECT_EQ(2, listener.listinit);
{
constexpr absl::optional<int> c_opt = absl::make_optional(42);
static_assert(c_opt.value() == 42, "");
}
{
struct TrivialCopyable {
constexpr TrivialCopyable() : x(0) {}
constexpr explicit TrivialCopyable(int i) : x(i) {}
int x;
};
constexpr TrivialCopyable v;
constexpr absl::optional<TrivialCopyable> c_opt0 = absl::make_optional(v);
static_assert((*c_opt0).x == 0, "");
constexpr absl::optional<TrivialCopyable> c_opt1 =
absl::make_optional<TrivialCopyable>();
static_assert((*c_opt1).x == 0, "");
constexpr absl::optional<TrivialCopyable> c_opt2 =
absl::make_optional<TrivialCopyable>(42);
static_assert((*c_opt2).x == 42, "");
}
}
template <typename T, typename U>
void optionalTest_Comparisons_EXPECT_LESS(T x, U y) {
EXPECT_FALSE(x == y);
EXPECT_TRUE(x != y);
EXPECT_TRUE(x < y);
EXPECT_FALSE(x > y);
EXPECT_TRUE(x <= y);
EXPECT_FALSE(x >= y);
}
template <typename T, typename U>
void optionalTest_Comparisons_EXPECT_SAME(T x, U y) {
EXPECT_TRUE(x == y);
EXPECT_FALSE(x != y);
EXPECT_FALSE(x < y);
EXPECT_FALSE(x > y);
EXPECT_TRUE(x <= y);
EXPECT_TRUE(x >= y);
}
template <typename T, typename U>
void optionalTest_Comparisons_EXPECT_GREATER(T x, U y) {
EXPECT_FALSE(x == y);
EXPECT_TRUE(x != y);
EXPECT_FALSE(x < y);
EXPECT_TRUE(x > y);
EXPECT_FALSE(x <= y);
EXPECT_TRUE(x >= y);
}
template <typename T, typename U, typename V>
void TestComparisons() {
absl::optional<T> ae, a2{2}, a4{4};
absl::optional<U> be, b2{2}, b4{4};
V v3 = 3;
optionalTest_Comparisons_EXPECT_SAME(absl::nullopt, be);
optionalTest_Comparisons_EXPECT_LESS(absl::nullopt, b2);
optionalTest_Comparisons_EXPECT_LESS(absl::nullopt, b4);
optionalTest_Comparisons_EXPECT_SAME(ae, absl::nullopt);
optionalTest_Comparisons_EXPECT_SAME(ae, be);
optionalTest_Comparisons_EXPECT_LESS(ae, b2);
optionalTest_Comparisons_EXPECT_LESS(ae, v3);
optionalTest_Comparisons_EXPECT_LESS(ae, b4);
optionalTest_Comparisons_EXPECT_GREATER(a2, absl::nullopt);
optionalTest_Comparisons_EXPECT_GREATER(a2, be);
optionalTest_Comparisons_EXPECT_SAME(a2, b2);
optionalTest_Comparisons_EXPECT_LESS(a2, v3);
optionalTest_Comparisons_EXPECT_LESS(a2, b4);
optionalTest_Comparisons_EXPECT_GREATER(v3, be);
optionalTest_Comparisons_EXPECT_GREATER(v3, b2);
optionalTest_Comparisons_EXPECT_SAME(v3, v3);
optionalTest_Comparisons_EXPECT_LESS(v3, b4);
optionalTest_Comparisons_EXPECT_GREATER(a4, absl::nullopt);
optionalTest_Comparisons_EXPECT_GREATER(a4, be);
optionalTest_Comparisons_EXPECT_GREATER(a4, b2);
optionalTest_Comparisons_EXPECT_GREATER(a4, v3);
optionalTest_Comparisons_EXPECT_SAME(a4, b4);
}
struct Int1 {
Int1() = default;
Int1(int i) : i(i) {}
int i;
};
struct Int2 {
Int2() = default;
Int2(int i) : i(i) {}
int i;
};
constexpr bool operator==(const Int1& lhs, const Int2& rhs) {
return lhs.i == rhs.i;
}
constexpr bool operator!=(const Int1& lhs, const Int2& rhs) {
return !(lhs == rhs);
}
constexpr bool operator<(const Int1& lhs, const Int2& rhs) {
return lhs.i < rhs.i;
}
constexpr bool operator<=(const Int1& lhs, const Int2& rhs) {
return lhs < rhs || lhs == rhs;
}
constexpr bool operator>(const Int1& lhs, const Int2& rhs) {
return !(lhs <= rhs);
}
constexpr bool operator>=(const Int1& lhs, const Int2& rhs) {
return !(lhs < rhs);
}
TEST(optionalTest, Comparisons) {
TestComparisons<int, int, int>();
TestComparisons<const int, int, int>();
TestComparisons<Int1, int, int>();
TestComparisons<int, Int2, int>();
TestComparisons<Int1, Int2, int>();
absl::optional<std::string> opt_str = "abc";
const char* cstr = "abc";
EXPECT_TRUE(opt_str == cstr);
absl::optional<const char*> opt_cstr = cstr;
EXPECT_TRUE(opt_str == opt_cstr);
absl::optional<absl::string_view> e1;
absl::optional<std::string> e2;
EXPECT_TRUE(e1 == e2);
}
TEST(optionalTest, SwapRegression) {
StructorListener listener;
Listenable::listener = &listener;
{
absl::optional<Listenable> a;
absl::optional<Listenable> b(absl::in_place);
a.swap(b);
}
EXPECT_EQ(1, listener.construct0);
EXPECT_EQ(1, listener.move);
EXPECT_EQ(2, listener.destruct);
{
absl::optional<Listenable> a(absl::in_place);
absl::optional<Listenable> b;
a.swap(b);
}
EXPECT_EQ(2, listener.construct0);
EXPECT_EQ(2, listener.move);
EXPECT_EQ(4, listener.destruct);
}
TEST(optionalTest, BigStringLeakCheck) {
constexpr size_t n = 1 << 16;
using OS = absl::optional<std::string>;
OS a;
OS b = absl::nullopt;
OS c = std::string(n, 'c');
std::string sd(n, 'd');
OS d = sd;
OS e(absl::in_place, n, 'e');
OS f;
f.emplace(n, 'f');
OS ca(a);
OS cb(b);
OS cc(c);
OS cd(d);
OS ce(e);
OS oa;
OS ob = absl::nullopt;
OS oc = std::string(n, 'c');
std::string sod(n, 'd');
OS od = sod;
OS oe(absl::in_place, n, 'e');
OS of;
of.emplace(n, 'f');
OS ma(std::move(oa));
OS mb(std::move(ob));
OS mc(std::move(oc));
OS md(std::move(od));
OS me(std::move(oe));
OS mf(std::move(of));
OS aa1;
OS ab1 = absl::nullopt;
OS ac1 = std::string(n, 'c');
std::string sad1(n, 'd');
OS ad1 = sad1;
OS ae1(absl::in_place, n, 'e');
OS af1;
af1.emplace(n, 'f');
OS aa2;
OS ab2 = absl::nullopt;
OS ac2 = std::string(n, 'c');
std::string sad2(n, 'd');
OS ad2 = sad2;
OS ae2(absl::in_place, n, 'e');
OS af2;
af2.emplace(n, 'f');
aa1 = af2;
ab1 = ae2;
ac1 = ad2;
ad1 = ac2;
ae1 = ab2;
af1 = aa2;
OS aa3;
OS ab3 = absl::nullopt;
OS ac3 = std::string(n, 'c');
std::string sad3(n, 'd');
OS ad3 = sad3;
OS ae3(absl::in_place, n, 'e');
OS af3;
af3.emplace(n, 'f');
aa3 = absl::nullopt;
ab3 = absl::nullopt;
ac3 = absl::nullopt;
ad3 = absl::nullopt;
ae3 = absl::nullopt;
af3 = absl::nullopt;
OS aa4;
OS ab4 = absl::nullopt;
OS ac4 = std::string(n, 'c');
std::string sad4(n, 'd');
OS ad4 = sad4;
OS ae4(absl::in_place, n, 'e');
OS af4;
af4.emplace(n, 'f');
aa4 = OS(absl::in_place, n, 'a');
ab4 = OS(absl::in_place, n, 'b');
ac4 = OS(absl::in_place, n, 'c');
ad4 = OS(absl::in_place, n, 'd');
ae4 = OS(absl::in_place, n, 'e');
af4 = OS(absl::in_place, n, 'f');
OS aa5;
OS ab5 = absl::nullopt;
OS ac5 = std::string(n, 'c');
std::string sad5(n, 'd');
OS ad5 = sad5;
OS ae5(absl::in_place, n, 'e');
OS af5;
af5.emplace(n, 'f');
std::string saa5(n, 'a');
std::string sab5(n, 'a');
std::string sac5(n, 'a');
std::string sad52(n, 'a');
std::string sae5(n, 'a');
std::string saf5(n, 'a');
aa5 = saa5;
ab5 = sab5;
ac5 = sac5;
ad5 = sad52;
ae5 = sae5;
af5 = saf5;
OS aa6;
OS ab6 = absl::nullopt;
OS ac6 = std::string(n, 'c');
std::string sad6(n, 'd');
OS ad6 = sad6;
OS ae6(absl::in_place, n, 'e');
OS af6;
af6.emplace(n, 'f');
aa6 = std::string(n, 'a');
ab6 = std::string(n, 'b');
ac6 = std::string(n, 'c');
ad6 = std::string(n, 'd');
ae6 = std::string(n, 'e');
af6 = std::string(n, 'f');
OS aa7;
OS ab7 = absl::nullopt;
OS ac7 = std::string(n, 'c');
std::string sad7(n, 'd');
OS ad7 = sad7;
OS ae7(absl::in_place, n, 'e');
OS af7;
af7.emplace(n, 'f');
aa7.emplace(n, 'A');
ab7.emplace(n, 'B');
ac7.emplace(n, 'C');
ad7.emplace(n, 'D');
ae7.emplace(n, 'E');
af7.emplace(n, 'F');
}
TEST(optionalTest, MoveAssignRegression) {
StructorListener listener;
Listenable::listener = &listener;
{
absl::optional<Listenable> a;
Listenable b;
a = std::move(b);
}
EXPECT_EQ(1, listener.construct0);
EXPECT_EQ(1, listener.move);
EXPECT_EQ(2, listener.destruct);
}
TEST(optionalTest, ValueType) {
EXPECT_TRUE((std::is_same<absl::optional<int>::value_type, int>::value));
EXPECT_TRUE((std::is_same<absl::optional<std::string>::value_type,
std::string>::value));
EXPECT_FALSE(
(std::is_same<absl::optional<int>::value_type, absl::nullopt_t>::value));
}
template <typename T>
struct is_hash_enabled_for {
template <typename U, typename = decltype(std::hash<U>()(std::declval<U>()))>
static std::true_type test(int);
template <typename U>
static std::false_type test(...);
static constexpr bool value = decltype(test<T>(0))::value;
};
TEST(optionalTest, Hash) {
std::hash<absl::optional<int>> hash;
std::set<size_t> hashcodes;
hashcodes.insert(hash(absl::nullopt));
for (int i = 0; i < 100; ++i) {
hashcodes.insert(hash(i));
}
EXPECT_GT(hashcodes.size(), 90u);
static_assert(is_hash_enabled_for<absl::optional<int>>::value, "");
static_assert(is_hash_enabled_for<absl::optional<Hashable>>::value, "");
static_assert(
absl::type_traits_internal::IsHashable<absl::optional<int>>::value, "");
static_assert(
absl::type_traits_internal::IsHashable<absl::optional<Hashable>>::value,
"");
absl::type_traits_internal::AssertHashEnabled<absl::optional<int>>();
absl::type_traits_internal::AssertHashEnabled<absl::optional<Hashable>>();
#if ABSL_META_INTERNAL_STD_HASH_SFINAE_FRIENDLY_
static_assert(!is_hash_enabled_for<absl::optional<NonHashable>>::value, "");
static_assert(!absl::type_traits_internal::IsHashable<
absl::optional<NonHashable>>::value,
"");
#endif
#ifndef __GLIBCXX__
static_assert(is_hash_enabled_for<absl::optional<const int>>::value, "");
static_assert(is_hash_enabled_for<absl::optional<const Hashable>>::value, "");
std::hash<absl::optional<const int>> c_hash;
for (int i = 0; i < 100; ++i) {
EXPECT_EQ(hash(i), c_hash(i));
}
#endif
}
struct MoveMeNoThrow {
MoveMeNoThrow() : x(0) {}
[[noreturn]] MoveMeNoThrow(const MoveMeNoThrow& other) : x(other.x) {
LOG(FATAL) << "Should not be called.";
}
MoveMeNoThrow(MoveMeNoThrow&& other) noexcept : x(other.x) {}
int x;
};
struct MoveMeThrow {
MoveMeThrow() : x(0) {}
MoveMeThrow(const MoveMeThrow& other) : x(other.x) {}
MoveMeThrow(MoveMeThrow&& other) : x(other.x) {}
int x;
};
TEST(optionalTest, NoExcept) {
static_assert(
std::is_nothrow_move_constructible<absl::optional<MoveMeNoThrow>>::value,
"");
static_assert(absl::default_allocator_is_nothrow::value ==
std::is_nothrow_move_constructible<
absl::optional<MoveMeThrow>>::value,
"");
std::vector<absl::optional<MoveMeNoThrow>> v;
for (int i = 0; i < 10; ++i) v.emplace_back();
}
struct AnyLike {
AnyLike(AnyLike&&) = default;
AnyLike(const AnyLike&) = default;
template <typename ValueType,
typename T = typename std::decay<ValueType>::type,
typename std::enable_if<
!absl::disjunction<
std::is_same<AnyLike, T>,
absl::negation<std::is_copy_constructible<T>>>::value,
int>::type = 0>
AnyLike(ValueType&&) {}
AnyLike& operator=(AnyLike&&) = default;
AnyLike& operator=(const AnyLike&) = default;
template <typename ValueType,
typename T = typename std::decay<ValueType>::type>
typename std::enable_if<
absl::conjunction<absl::negation<std::is_same<AnyLike, T>>,
std::is_copy_constructible<T>>::value,
AnyLike&>::type
operator=(ValueType&& ) {
return *this;
}
};
TEST(optionalTest, ConstructionConstraints) {
EXPECT_TRUE((std::is_constructible<AnyLike, absl::optional<AnyLike>>::value));
EXPECT_TRUE(
(std::is_constructible<AnyLike, const absl::optional<AnyLike>&>::value));
EXPECT_TRUE((std::is_constructible<absl::optional<AnyLike>, AnyLike>::value));
EXPECT_TRUE(
(std::is_constructible<absl::optional<AnyLike>, const AnyLike&>::value));
EXPECT_TRUE((std::is_convertible<absl::optional<AnyLike>, AnyLike>::value));
EXPECT_TRUE(
(std::is_convertible<const absl::optional<AnyLike>&, AnyLike>::value));
EXPECT_TRUE((std::is_convertible<AnyLike, absl::optional<AnyLike>>::value));
EXPECT_TRUE(
(std::is_convertible<const AnyLike&, absl::optional<AnyLike>>::value));
EXPECT_TRUE(std::is_move_constructible<absl::optional<AnyLike>>::value);
EXPECT_TRUE(std::is_copy_constructible<absl::optional<AnyLike>>::value);
}
TEST(optionalTest, AssignmentConstraints) {
EXPECT_TRUE((std::is_assignable<AnyLike&, absl::optional<AnyLike>>::value));
EXPECT_TRUE(
(std::is_assignable<AnyLike&, const absl::optional<AnyLike>&>::value));
EXPECT_TRUE((std::is_assignable<absl::optional<AnyLike>&, AnyLike>::value));
EXPECT_TRUE(
(std::is_assignable<absl::optional<AnyLike>&, const AnyLike&>::value));
EXPECT_TRUE(std::is_move_assignable<absl::optional<AnyLike>>::value);
EXPECT_TRUE(absl::is_copy_assignable<absl::optional<AnyLike>>::value);
}
#if !defined(__EMSCRIPTEN__)
struct NestedClassBug {
struct Inner {
bool dummy = false;
};
absl::optional<Inner> value;
};
TEST(optionalTest, InPlaceTSFINAEBug) {
NestedClassBug b;
((void)b);
using Inner = NestedClassBug::Inner;
EXPECT_TRUE((std::is_default_constructible<Inner>::value));
EXPECT_TRUE((std::is_constructible<Inner>::value));
EXPECT_TRUE(
(std::is_constructible<absl::optional<Inner>, absl::in_place_t>::value));
absl::optional<Inner> o(absl::in_place);
EXPECT_TRUE(o.has_value());
o.emplace();
EXPECT_TRUE(o.has_value());
}
#endif
}
#endif | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/types/internal/optional.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/types/optional_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
bdecb66a-e4c2-4a63-858d-085c8dc2db48 | cpp | google/quiche | quiche_callbacks | quiche/common/quiche_callbacks.h | quiche/common/quiche_callbacks_test.cc | #ifndef QUICHE_COMMON_QUICHE_CALLBACKS_H_
#define QUICHE_COMMON_QUICHE_CALLBACKS_H_
#include <type_traits>
#include "absl/functional/any_invocable.h"
#include "absl/functional/function_ref.h"
#include "quiche/common/platform/api/quiche_export.h"
namespace quiche {
namespace callbacks_internal {
template <class Sig>
class QUICHE_EXPORT SignatureChanger {};
template <typename ReturnType, typename... Args>
class QUICHE_NO_EXPORT SignatureChanger<ReturnType(Args...)> {
public:
using Rvalue = ReturnType(Args...) &&;
using Const = ReturnType(Args...) const;
};
}
template <class T>
using UnretainedCallback = absl::FunctionRef<T>;
template <class T>
using SingleUseCallback = absl::AnyInvocable<
typename callbacks_internal::SignatureChanger<T>::Rvalue>;
static_assert(std::is_same_v<SingleUseCallback<void(int, int &, int &&)>,
absl::AnyInvocable<void(int, int &, int &&) &&>>);
template <class T>
using MultiUseCallback =
absl::AnyInvocable<typename callbacks_internal::SignatureChanger<T>::Const>;
static_assert(
std::is_same_v<MultiUseCallback<void()>, absl::AnyInvocable<void() const>>);
}
#endif | #include "quiche/common/quiche_callbacks.h"
#include <memory>
#include <utility>
#include <vector>
#include "quiche/common/platform/api/quiche_test.h"
namespace quiche {
namespace {
void Apply(const std::vector<int>& container,
UnretainedCallback<void(int)> function) {
for (int n : container) {
function(n);
}
}
TEST(QuicheCallbacksTest, UnretainedCallback) {
std::vector<int> nums = {1, 2, 3, 4};
int sum = 0;
Apply(nums, [&sum](int n) { sum += n; });
EXPECT_EQ(sum, 10);
}
TEST(QuicheCallbacksTest, SingleUseCallback) {
int called = 0;
SingleUseCallback<void()> callback = [&called]() { called++; };
EXPECT_EQ(called, 0);
SingleUseCallback<void()> new_callback = std::move(callback);
EXPECT_EQ(called, 0);
std::move(new_callback)();
EXPECT_EQ(called, 1);
EXPECT_QUICHE_DEBUG_DEATH(
std::move(new_callback)(),
"AnyInvocable");
}
class SetFlagOnDestruction {
public:
SetFlagOnDestruction(bool* flag) : flag_(flag) {}
~SetFlagOnDestruction() { *flag_ = true; }
private:
bool* flag_;
};
TEST(QuicheCallbacksTest, SingleUseCallbackOwnership) {
bool deleted = false;
auto flag_setter = std::make_unique<SetFlagOnDestruction>(&deleted);
{
SingleUseCallback<void()> callback = [setter = std::move(flag_setter)]() {};
EXPECT_FALSE(deleted);
}
EXPECT_TRUE(deleted);
}
TEST(QuicheCallbacksTest, MultiUseCallback) {
int called = 0;
MultiUseCallback<void()> callback = [&called]() { called++; };
EXPECT_EQ(called, 0);
callback();
EXPECT_EQ(called, 1);
callback();
callback();
EXPECT_EQ(called, 3);
}
TEST(QuicheCallbacksTest, MultiUseCallbackOwnership) {
bool deleted = false;
auto flag_setter = std::make_unique<SetFlagOnDestruction>(&deleted);
{
MultiUseCallback<void()> callback = [setter = std::move(flag_setter)]() {};
EXPECT_FALSE(deleted);
}
EXPECT_TRUE(deleted);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_callbacks.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/common/quiche_callbacks_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
3cff052a-121d-4eb7-af05-2568e0a596f9 | cpp | tensorflow/tensorflow | tfrt_op_kernel | tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.cc | tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel_test.cc | #include "tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_split.h"
#include "absl/strings/strip.h"
#include "llvm/Support/raw_ostream.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/runtime_fallback/kernel/attr_util.h"
#include "tensorflow/core/tfrt/utils/error_util.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/kernel_frame.h"
namespace tensorflow {
TFRTOpKernelConstruction::TFRTOpKernelConstruction(
const tfrt::OpAttrsRef& attributes)
: attributes_(std::move(attributes)) {}
Status MissingAttributeError(StringPiece attr_name) {
return errors::InvalidArgument("Missing attribute: ", attr_name);
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
std::string* value) const {
tfrt::string_view view;
bool success = attributes_.GetString(
llvm::StringRef(attr_name.data(), attr_name.size()), &view);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = view.str();
return absl::OkStatus();
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
DataType* value) const {
tfrt::OpAttrType attrtype;
bool success = attributes_.Get<tfrt::OpAttrType>(
llvm::StringRef(attr_name.data(), attr_name.size()), &attrtype);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = tfd::ConvertToTfDataType(attrtype);
return absl::OkStatus();
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
Padding* value) const {
std::string padding_str;
TF_RETURN_IF_ERROR(GetAttr<std::string>(attr_name, &padding_str));
return GetPaddingFromString(padding_str, value);
}
template <>
Status TFRTOpKernelConstruction::GetAttr(StringPiece attr_name,
std::vector<int32>* value) const {
llvm::ArrayRef<int32> arrayref;
bool success = attributes_.GetArray<int32>(
llvm::StringRef(attr_name.data(), attr_name.size()), &arrayref);
if (!success) {
return MissingAttributeError(attr_name);
}
*value = arrayref;
return absl::OkStatus();
}
void TFRTOpKernelConstruction::CtxFailure(const Status& s) {
error_ = tfrt::MakeStatusString(s);
}
void TFRTOpKernelConstruction::CtxFailureWithWarning(const Status& s) {
CtxFailure(s);
}
namespace {
std::string FillFailureMessage(const char* file, int line, const Status& s) {
std::string error;
llvm::raw_string_ostream sstr(error);
sstr << "OP_REQUIRES failed at " << file << ":" << line << " : "
<< tfrt::MakeStatusString(s);
sstr.str();
return error;
}
}
void TFRTOpKernelConstruction::CtxFailure(const char* file, int line,
const Status& s) {
error_ = FillFailureMessage(file, line, s);
}
void TFRTOpKernelConstruction::CtxFailureWithWarning(const char* file, int line,
const Status& s) {
CtxFailure(file, line, s);
}
const std::optional<std::string>& TFRTOpKernelConstruction::error() {
return error_;
}
TFRTOpKernelContext::TFRTOpKernelContext(
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> inputs, int num_outputs,
const TFRTOpMeta* op_meta, tfrt::HostContext* host)
: inputs_(inputs),
op_meta_(op_meta),
outputs_(num_outputs),
eigen_host_context_(host) {}
const Tensor& TFRTOpKernelContext::output(int index) { return outputs_[index]; }
const std::optional<std::string>& TFRTOpKernelContext::error() {
return error_;
}
bool TFRTOpKernelContext::ValidateInputsAreSameShape(TFRTOpKernel* op) {
return true;
}
const Tensor& TFRTOpKernelContext::input(int index) {
return inputs_[index]->get<Tensor>();
}
int TFRTOpKernelContext::num_inputs() const { return inputs_.size(); }
int TFRTOpKernelContext::num_outputs() const { return outputs_.size(); }
void TFRTOpKernelContext::set_output(int index, const Tensor& tensor) {
outputs_[index] = tensor;
}
Status TFRTOpKernelContext::allocate_temp(DataType type,
const TensorShape& shape,
Tensor* out_temp) {
*out_temp = Tensor(type, shape);
return absl::OkStatus();
}
Status TFRTOpKernelContext::allocate_output(int index, const TensorShape& shape,
Tensor** tensor) {
DataType output_type = op_meta_->output_type(index);
outputs_[index] = Tensor(output_type, shape);
*tensor = &outputs_[index];
return absl::OkStatus();
}
DataType TFRTOpKernelContext::expected_output_dtype(int i) const {
return op_meta_->output_type(i);
}
void TFRTOpKernelContext::CtxFailure(const Status& s) { error_ = s.message(); }
void TFRTOpKernelContext::CtxFailureWithWarning(const Status& s) {
CtxFailure(s);
}
void TFRTOpKernelContext::CtxFailure(const char* file, int line,
const Status& s) {
error_ = FillFailureMessage(file, line, s);
}
void TFRTOpKernelContext::CtxFailureWithWarning(const char* file, int line,
const Status& s) {
CtxFailure(file, line, s);
}
template <>
const Eigen::ThreadPoolDevice& TFRTOpKernelContext::eigen_device() const {
return eigen_host_context_.Device();
}
TFRTOpMeta::TFRTOpMeta(std::vector<DataType> output_types)
: output_types_(std::move(output_types)) {}
DataType TFRTOpMeta::output_type(int index) const {
return output_types_[index];
}
TFRTOpMetaBuilder::TFRTOpMetaBuilder(StringPiece op_name) : op_name_(op_name) {}
namespace {
DataType ParseInputOutputSpec(StringPiece spec) {
std::vector<absl::string_view> name_type =
absl::StrSplit(spec, absl::MaxSplits(':', 2));
DataType data_type;
bool success =
DataTypeFromString(absl::StripAsciiWhitespace(name_type[1]), &data_type);
assert(success && "Failed to parse DataType");
(void)success;
return data_type;
}
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Output(StringPiece output_spec) {
output_types_.push_back(ParseInputOutputSpec(output_spec));
return *this;
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Input(StringPiece input_spec) {
return *this;
}
TFRTOpMetaBuilder& TFRTOpMetaBuilder::Attr(StringPiece attr_spec) {
return *this;
}
const string& TFRTOpMetaBuilder::op_name() const { return op_name_; }
TFRTOpMeta TFRTOpMetaBuilder::BuildMeta() const {
return TFRTOpMeta(output_types_);
}
TFRTOpMetaMap::TFRTOpMetaMap() = default;
void TFRTOpMetaMap::RegisterOpMeta(const TFRTOpMetaBuilder& op_builder) {
auto insert_result = op_metas_.insert(
std::make_pair(op_builder.op_name(), op_builder.BuildMeta()));
assert(insert_result.second && "Multiple registrations for the same op_name");
(void)insert_result;
}
const TFRTOpMeta* TFRTOpMetaMap::GetOpMeta(StringPiece op_name) const {
auto it = op_metas_.find(llvm::StringRef(op_name.data(), op_name.size()));
if (it == op_metas_.end()) return nullptr;
return &it->second;
}
TFRTOpRegisterer::TFRTOpRegisterer(const TFRTOpMetaBuilder& op_builder) {
tfrt_forwarding_op_meta_map->RegisterOpMeta(op_builder);
}
llvm::ManagedStatic<TFRTOpMetaMap> tfrt_forwarding_op_meta_map;
llvm::ManagedStatic<TFRTOpKernelFactories> tfrt_forwarding_kernel_factories;
TFRTOpKernelFactories::TFRTOpKernelFactories() = default;
void TFRTOpKernelFactories::RegisterFactory(StringPiece kernel_class_name,
TFRTOpKernelReg kernel_info) {
factories_[std::string(kernel_class_name)].push_back(kernel_info);
}
Status ValidKernelAttr(StringPiece kernel_class_name,
TFRTOpKernelConstruction* construction,
const llvm::StringMap<DataType>& constraints) {
for (const auto& constraint : constraints) {
auto attr_name = std::string(constraint.first());
DataType type;
Status s = construction->GetAttr(attr_name, &type);
if (!s.ok()) {
return errors::InvalidArgument(
"Kernel ", kernel_class_name,
" has constraint for unset tfdtype attribute ", attr_name, ".");
}
if (type != constraint.second) {
return errors::InvalidArgument(
"Kernel ", kernel_class_name, " with type constraint ", attr_name,
": ", DataTypeString(constraint.second),
" does not match attribute type ", DataTypeString(type), ".");
}
}
return absl::OkStatus();
}
std::unique_ptr<TFRTOpKernel> TFRTOpKernelFactories::CreateKernel(
StringPiece kernel_class_name,
TFRTOpKernelConstruction* op_kernel_construction) const {
auto it = factories_.find(std::string(kernel_class_name));
if (it == factories_.end()) {
op_kernel_construction->CtxFailure(errors::NotFound(
"Could not find kernel ", kernel_class_name, " in the registry."));
return std::unique_ptr<TFRTOpKernel>(nullptr);
}
Status status;
for (const auto& kernel_info : it->second) {
Status s = ValidKernelAttr(kernel_class_name, op_kernel_construction,
kernel_info.type_constraints);
if (s.ok()) {
return kernel_info.callback(op_kernel_construction);
}
status.Update(s);
}
op_kernel_construction->CtxFailure(status);
return std::unique_ptr<TFRTOpKernel>(nullptr);
}
} | #include "tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.h"
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/lib/core/error_codes.pb.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/padding.h"
#include "tfrt/core_runtime/op_attrs.h"
#include "tfrt/host_context/async_value.h"
#include "tfrt/host_context/concurrent_work_queue.h"
#include "tfrt/host_context/diagnostic.h"
#include "tfrt/host_context/host_allocator.h"
#include "tfrt/host_context/host_context.h"
namespace tensorflow {
namespace {
std::unique_ptr<tfrt::HostContext> CreateTestHostContext(int num_threads) {
return std::make_unique<tfrt::HostContext>(
[](const tfrt::DecodedDiagnostic&) {}, tfrt::CreateMallocAllocator(),
tfrt::CreateSingleThreadedWorkQueue());
}
TEST(TFRTOpKernelTest, TestGetBoolAttr) {
tfrt::OpAttrs attrs;
attrs.Set<bool>("foo", true);
attrs.Set<bool>("bar", false);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
bool value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_TRUE(value);
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_FALSE(value);
}
TEST(TFRTOpKernelTest, TestGetIntAttr) {
tfrt::OpAttrs attrs;
attrs.Set<int32>("foo", -2);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
int32_t value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, -2);
}
TEST(TFRTOpKernelTest, TestGetIntListAttr) {
tfrt::OpAttrs attrs;
attrs.SetArray<int32>("foo", {});
attrs.SetArray<int32>("bar", {1});
attrs.SetArray<int32>("baz", {1, 2, 3});
attrs.SetString("bar", "test");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
std::vector<int32> v1, v2, v3;
std::vector<int32> expected_v1;
std::vector<int32> expected_v2 = {1};
std::vector<int32> expected_v3 = {1, 2, 3};
TF_ASSERT_OK(ctx.GetAttr("foo", &v1));
ASSERT_EQ(v1, expected_v1);
TF_ASSERT_OK(ctx.GetAttr("bar", &v2));
ASSERT_EQ(v2, expected_v2);
TF_ASSERT_OK(ctx.GetAttr("baz", &v3));
ASSERT_EQ(v3, expected_v3);
}
TEST(TFRTOpKernelTest, TestGetStrAttr) {
tfrt::OpAttrs attrs;
attrs.SetString("foo", "");
attrs.SetString("bar", "test");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
std::string value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, "");
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_EQ(value, "test");
}
TEST(TFRTOpKernelTest, TestGetPaddingAttr) {
tfrt::OpAttrs attrs;
attrs.SetString("foo", "VALID");
attrs.SetString("bar", "SAME");
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
Padding value;
TF_ASSERT_OK(ctx.GetAttr("foo", &value));
ASSERT_EQ(value, Padding::VALID);
TF_ASSERT_OK(ctx.GetAttr("bar", &value));
ASSERT_EQ(value, Padding::SAME);
}
TEST(TFRTOpKernelTest, TestMissingAttr) {
tfrt::OpAttrs attrs;
attrs.Set<bool>("foo", true);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
bool value;
auto status = ctx.GetAttr("bar", &value);
EXPECT_EQ(status.code(), error::INVALID_ARGUMENT);
}
class TestKernel : public TFRTOpKernel {
public:
explicit TestKernel(TFRTOpKernelConstruction* construction)
: TFRTOpKernel(construction) {}
void Compute(TFRTOpKernelContext* context) override {}
};
TEST(TFRTOpKernelTest, TestKernelMatchesTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::F32);
attrs.Set<tfrt::OpAttrType>("bar", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg.type_constraints["foo"] = DT_FLOAT;
reg.type_constraints["bar"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernelFloatInt", reg);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernelFloatInt",
&ctx);
ASSERT_NE(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestSecondKernelMatchesTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg1([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
TFRTOpKernelReg reg2([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg1.type_constraints["foo"] = DT_FLOAT;
reg2.type_constraints["foo"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernel2ndConstraint", reg1);
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernel2ndConstraint", reg2);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernel2ndConstraint",
&ctx);
ASSERT_NE(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestKernelDoesNotMatchTypeConstraints) {
tfrt::OpAttrs attrs;
attrs.Set<tfrt::OpAttrType>("foo", tfrt::OpAttrType::I32);
attrs.Set<tfrt::OpAttrType>("bar", tfrt::OpAttrType::I32);
tfrt::OpAttrsRef attrsref(attrs);
TFRTOpKernelConstruction ctx(attrsref);
TFRTOpKernelReg reg([](TFRTOpKernelConstruction* construction)
-> std::unique_ptr<TFRTOpKernel> {
return std::make_unique<TestKernel>(construction);
});
reg.type_constraints["foo"] = DT_FLOAT;
reg.type_constraints["bar"] = DT_INT32;
::tensorflow::tfrt_forwarding_kernel_factories->RegisterFactory(
"TestKernelIntInt", reg);
std::unique_ptr<TFRTOpKernel> op =
tfrt_forwarding_kernel_factories->CreateKernel("TestKernelIntInt", &ctx);
ASSERT_EQ(op.get(), nullptr);
}
TEST(TFRTOpKernelTest, TestAllocateTemp) {
auto host_context = CreateTestHostContext(1);
int num_outputs = 1;
llvm::ArrayRef<tfrt::RCReference<tfrt::AsyncValue>> inputs;
TFRTOpMeta op_meta({DT_INT32});
TFRTOpKernelContext ctx(inputs, num_outputs, &op_meta, host_context.get());
Tensor out;
ASSERT_EQ(out.AllocatedBytes(), 0);
TF_EXPECT_OK(ctx.allocate_temp(DT_INT32, {}, &out));
ASSERT_GT(out.AllocatedBytes(), 0);
out.scalar<int32>()() = 123;
ASSERT_EQ(out.dtype(), DT_INT32);
ASSERT_EQ(out.shape().dims(), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/runtime_fallback/kernel/tfrt_op_kernel_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
665bfea1-89ee-49d0-99c1-042637e13beb | cpp | tensorflow/tensorflow | outfeed_receiver | third_party/xla/xla/python/outfeed_receiver.cc | third_party/xla/xla/python/outfeed_receiver_test.cc | #include "xla/python/outfeed_receiver.h"
#include <sys/types.h>
#include <cstdint>
#include <memory>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/sharding_builder.h"
#include "xla/client/xla_builder.h"
#include "xla/client/xla_computation.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_executable.h"
#include "xla/python/pjrt_ifrt/pjrt_client.h"
#include "xla/python/pjrt_ifrt/pjrt_device.h"
#include "xla/service/computation_placer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/casts.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/traceme.h"
namespace xla {
int constexpr kOutfeedHeaderWords = 2;
uint32_t constexpr kOutfeedHeaderStart = 271828;
uint32_t constexpr kOutfeedCidShutdown = 0;
class OutfeedData {
public:
OutfeedData(ifrt::PjRtDevice* device, uint32_t consumer_id, Shape shape)
: device_(device),
consumer_id_(consumer_id),
shape_(shape),
literal_(nullptr),
literal_size_bytes_(0) {}
ifrt::PjRtDevice* device() { return device_; }
uint32_t consumer_id() const { return consumer_id_; }
Shape shape() const { return shape_; }
std::unique_ptr<Literal> literal() {
CHECK(literal_);
return std::move(literal_);
}
void SetLiteral(std::unique_ptr<Literal> literal);
ssize_t literal_size_bytes() const { return literal_size_bytes_; }
std::string DebugString() const;
private:
ifrt::PjRtDevice* device_;
uint32_t consumer_id_;
Shape shape_;
std::unique_ptr<Literal> literal_;
ssize_t literal_size_bytes_;
};
void OutfeedData::SetLiteral(std::unique_ptr<Literal> literal) {
literal_ = std::move(literal);
shape_ = literal_->shape();
int total_size_bytes = 0;
ShapeUtil::ForEachSubshape(
shape_, [&](const Shape& literal_subshape, const ShapeIndex& index) {
if (!literal_subshape.IsTuple()) {
total_size_bytes += ShapeUtil::ByteSizeOf(literal_subshape, 8);
}
});
literal_size_bytes_ = total_size_bytes;
}
std::string OutfeedData::DebugString() const {
return absl::StrFormat("dev=%s; cons=%d; shape=%s", device_->DebugString(),
consumer_id_, shape_.ToString());
}
class OutfeedReceiverImpl {
public:
OutfeedReceiverImpl(
OutfeedReceiver::Callback callback,
absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options);
OutfeedReceiverImpl(const OutfeedReceiverImpl&) = delete;
OutfeedReceiverImpl& operator=(const OutfeedReceiverImpl&) = delete;
~OutfeedReceiverImpl();
void Start();
absl::StatusOr<XlaOp> AddOutfeedToBuilder(XlaBuilder* builder, XlaOp token,
uint32_t consumer_id,
std::vector<XlaOp> arrays,
uint32_t device_idx);
absl::Status RegisterOutfeed(uint32_t consumer_id, const Shape& shape);
private:
bool CallbackQueueHasSpace() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return callback_queue_size_bytes_ < max_callback_queue_size_bytes_;
}
bool ShutdownDone() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (num_working_callback_threads_ == 0 && num_listening_threads_ == 0);
}
void CallbackThreadLoop(int device_idx);
void DeviceListenerThreadLoop(int device_idx);
absl::Status SendShutdownOutfeedHeader(int device_idx);
absl::StatusOr<std::unique_ptr<Literal>> ReceiveRawFromOutfeed(
ifrt::PjRtDevice* device, const Shape& shape);
void EnqueueReceivedData(uint32_t device_idx,
std::unique_ptr<OutfeedData> received)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_);
void Shutdown();
OutfeedReceiver::Callback callback_;
std::vector<ifrt::PjRtDevice*> devices_;
uint64_t max_callback_queue_size_bytes_;
std::optional<ExecutableBuildOptions> executable_build_options_;
absl::Mutex mu_;
absl::flat_hash_map<uint32_t, Shape> shape_registry_ ABSL_GUARDED_BY(mu_);
uint64_t callback_queue_size_bytes_ ABSL_GUARDED_BY(mu_);
int num_listening_threads_ ABSL_GUARDED_BY(mu_);
bool shutdown_started_ ABSL_GUARDED_BY(mu_);
int num_working_callback_threads_ ABSL_GUARDED_BY(mu_);
std::vector<std::queue<std::unique_ptr<OutfeedData>>> callback_queues_
ABSL_GUARDED_BY(mu_);
std::unique_ptr<tsl::thread::ThreadPool> threads_;
};
OutfeedReceiverImpl::OutfeedReceiverImpl(
OutfeedReceiver::Callback callback,
absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options)
: executable_build_options_(executable_build_options) {
callback_ = callback;
max_callback_queue_size_bytes_ = max_callback_queue_size_bytes;
for (const auto& client : clients) {
for (auto device : client->addressable_devices()) {
devices_.push_back(tensorflow::down_cast<ifrt::PjRtDevice*>(device));
}
}
CHECK_GT(devices_.size(), 0);
callback_queues_ =
std::vector<std::queue<std::unique_ptr<OutfeedData>>>(devices_.size());
callback_queue_size_bytes_ = 0;
num_listening_threads_ = 0;
num_working_callback_threads_ = 0;
shutdown_started_ = false;
}
void OutfeedReceiverImpl::Start() {
{
absl::MutexLock lock(&mu_);
CHECK(!shutdown_started_);
}
int num_threads = 2 * devices_.size();
threads_ = std::make_unique<tsl::thread::ThreadPool>(
tsl::Env::Default(), "outfeed_receiver", num_threads);
for (int device_idx = 0; device_idx < devices_.size(); ++device_idx) {
threads_->Schedule(
[this, device_idx]() { DeviceListenerThreadLoop(device_idx); });
threads_->Schedule(
[this, device_idx]() { CallbackThreadLoop(device_idx); });
}
}
void OutfeedReceiverImpl::Shutdown() {
VLOG(2) << "Shutdown start";
{
absl::MutexLock lock(&mu_);
CHECK(!shutdown_started_);
shutdown_started_ = true;
}
for (int device_idx = 0; device_idx < devices_.size(); ++device_idx) {
TF_CHECK_OK(SendShutdownOutfeedHeader(device_idx));
}
VLOG(2) << "Shutdown waiting for listening and callback threads to stop";
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(this, &OutfeedReceiverImpl::ShutdownDone));
VLOG(2) << "Shutdown done";
}
OutfeedReceiverImpl::~OutfeedReceiverImpl() {
VLOG(2) << "~OutfeedReceiverImpl";
Shutdown();
}
void OutfeedReceiverImpl::DeviceListenerThreadLoop(int device_idx) {
{
absl::MutexLock lock(&mu_);
++num_listening_threads_;
}
ifrt::PjRtDevice* device = devices_[device_idx];
while (true) {
Shape header_shape = ShapeUtil::MakeShape(U32, {kOutfeedHeaderWords});
std::unique_ptr<Literal> header =
ReceiveRawFromOutfeed(device, header_shape).value();
absl::Span<uint32_t> header_data = header->data<uint32_t>();
CHECK_EQ(header_data.size(), kOutfeedHeaderWords);
CHECK_EQ(header_data[0], kOutfeedHeaderStart);
uint32_t consumer_id = header_data[1];
Shape shape;
{
absl::MutexLock lock(&mu_);
auto registered_shape = shape_registry_.find(consumer_id);
if (registered_shape == shape_registry_.end()) {
LOG(FATAL)
<< "[" << device->DebugString()
<< "] Cannot find registered shape for consumer ID " << consumer_id
<< ". Perhaps the code was compiled with a different instance "
<< "of OutfeedReceiver.";
}
shape = registered_shape->second;
}
auto received = std::make_unique<OutfeedData>(device, consumer_id, shape);
VLOG(2) << "Listener received header " << received->DebugString();
if (consumer_id == kOutfeedCidShutdown) {
VLOG(2) << "[" << device->DebugString()
<< "] Listener received shutdown header";
absl::MutexLock lock(&mu_);
--num_listening_threads_;
VLOG(2) << "[" << device->DebugString() << "] Enqueue shutdown callback";
EnqueueReceivedData(device_idx, std::move(received));
return;
}
std::unique_ptr<Literal> data =
ReceiveRawFromOutfeed(device, shape).value();
received->SetLiteral(std::move(data));
absl::MutexLock lock(&mu_);
EnqueueReceivedData(device_idx, std::move(received));
}
}
void OutfeedReceiverImpl::EnqueueReceivedData(
uint32_t device_idx, std::unique_ptr<OutfeedData> received)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
mu_.Await(absl::Condition(this, &OutfeedReceiverImpl::CallbackQueueHasSpace));
ssize_t literal_size_bytes = received->literal_size_bytes();
callback_queue_size_bytes_ += literal_size_bytes;
VLOG(2) << "Listener enqueues data " << received->DebugString() << " of size "
<< literal_size_bytes << " bytes; "
<< (1 + callback_queues_[device_idx].size())
<< " callbacks in queue of total size " << callback_queue_size_bytes_
<< " bytes.\n";
callback_queues_[device_idx].push(std::move(received));
}
absl::StatusOr<std::unique_ptr<Literal>>
OutfeedReceiverImpl::ReceiveRawFromOutfeed(ifrt::PjRtDevice* device,
const Shape& shape) {
auto literal = std::make_unique<Literal>(shape);
TF_RETURN_IF_ERROR(
device->client()->TransferFromOutfeed(device, literal.get()));
return literal;
}
void OutfeedReceiverImpl::CallbackThreadLoop(int device_idx) {
const ifrt::PjRtDevice* device = devices_[device_idx];
{
absl::MutexLock lock(&mu_);
num_working_callback_threads_++;
}
while (true) {
std::unique_ptr<OutfeedData> received;
{
absl::MutexLock lock(&mu_);
mu_.Await(absl::Condition(
+[](std::queue<std::unique_ptr<OutfeedData>>* queue) {
return !queue->empty();
},
&callback_queues_[device_idx]));
received = std::move(callback_queues_[device_idx].front());
callback_queues_[device_idx].pop();
callback_queue_size_bytes_ -= received->literal_size_bytes();
VLOG(2) << "[" << device->DebugString() << "] Dequeued callback for "
<< received->DebugString() << "; "
<< callback_queues_[device_idx].size()
<< " callbacks in queue of total size "
<< callback_queue_size_bytes_ << " bytes.\n";
}
if (received->consumer_id() == kOutfeedCidShutdown) {
VLOG(2) << "[" << device->DebugString()
<< "] Callback loop received shutdown signal";
{
absl::MutexLock lock(&mu_);
CHECK(callback_queues_[device_idx].empty());
--num_working_callback_threads_;
}
VLOG(2) << "[" << device->DebugString() << "] Callback loop done";
return;
}
{
tsl::profiler::TraceMe traceme("OutfeedReceiver::Callback");
callback_(received->device(), received->consumer_id(),
received->literal());
}
}
}
absl::Status OutfeedReceiverImpl::SendShutdownOutfeedHeader(int device_idx) {
const ifrt::PjRtDevice* device = devices_[device_idx];
constexpr int consumer_id = kOutfeedCidShutdown;
VLOG(2) << "[" << device->DebugString()
<< "] SendSpecialHeader cons=" << consumer_id;
XlaBuilder builder(
absl::StrFormat("special_outfeed_header_%d_%d", consumer_id, device_idx));
XlaOp cst_operand = xla::ConstantR0<int32_t>(&builder, 0);
XlaOp outfeed =
AddOutfeedToBuilder(&builder, CreateToken(&builder), consumer_id, {}, 0)
.value();
XlaOp add_dep = xla::internal::XlaBuilderFriend::BuildAddDependency(
&builder, cst_operand, outfeed, ShapeUtil::MakeScalarShape(S32));
XlaComputation computation = builder.Build(add_dep).value();
CompileOptions compile_options;
if (executable_build_options_) {
compile_options.executable_build_options = *executable_build_options_;
}
compile_options.executable_build_options.set_num_replicas(1);
compile_options.executable_build_options.set_num_partitions(1);
DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = device->Id().value();
compile_options.executable_build_options.set_device_assignment(
device_assignment);
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
devices_[device_idx]->client()->pjrt_client()->Compile(
computation, std::move(compile_options)));
ExecuteOptions execute_options;
TF_ASSIGN_OR_RETURN(
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> output_buffers,
executable->Execute({{}}, execute_options));
return absl::OkStatus();
}
absl::Status OutfeedReceiverImpl::RegisterOutfeed(uint32_t consumer_id,
const Shape& shape) {
VLOG(2) << "RegisterShape cons=" << consumer_id
<< "; shape=" << shape.ToString();
{
absl::MutexLock lock(&mu_);
auto found = shape_registry_.find(consumer_id);
if (found != shape_registry_.end()) {
if (!ShapeUtil::Equal(shape, found->second)) {
return InvalidArgument(
"Shape %s does not match previous shape %s used "
"for consumer id %d",
shape.DebugString(), found->second.DebugString(), consumer_id);
}
} else {
shape_registry_.insert({consumer_id, shape});
}
}
return absl::OkStatus();
}
absl::StatusOr<XlaOp> OutfeedReceiverImpl::AddOutfeedToBuilder(
XlaBuilder* builder, XlaOp token, uint32_t consumer_id,
std::vector<XlaOp> arrays, uint32_t device_idx) {
XlaOp data = Tuple(builder, std::move(arrays));
Shape shape_with_layout = builder->GetShape(data).value();
ShapeUtil::ForEachMutableSubshape(
&shape_with_layout, [](Shape* subshape, const ShapeIndex&) {
if (!subshape->has_layout()) {
LayoutUtil::SetToDefaultLayout(subshape);
}
});
TF_RETURN_IF_ERROR(RegisterOutfeed(consumer_id, shape_with_layout));
std::vector<uint32_t> header{kOutfeedHeaderStart, consumer_id};
XlaOp header_op = ConstantR1<uint32_t>(builder, header);
builder->SetSharding(sharding_builder::AssignDevice(device_idx));
token = OutfeedWithToken(
header_op, token, ShapeUtil::MakeShape(U32, {kOutfeedHeaderWords}), "");
if (consumer_id != kOutfeedCidShutdown) {
token = OutfeedWithToken(data, token, shape_with_layout, "");
}
builder->ClearSharding();
return token;
}
OutfeedReceiver::OutfeedReceiver(
Callback callback, absl::Span<ifrt::PjRtClient* const> clients,
ssize_t max_callback_queue_size_bytes,
const std::optional<ExecutableBuildOptions>& executable_build_options) {
p_impl_ = std::make_unique<OutfeedReceiverImpl>(callback, clients,
max_callback_queue_size_bytes,
executable_build_options);
}
OutfeedReceiver::~OutfeedReceiver() = default;
void OutfeedReceiver::Start() { p_impl_->Start(); }
absl::StatusOr<XlaOp> OutfeedReceiver::AddOutfeedToBuilder(
XlaBuilder* builder, XlaOp token, uint32_t consumer_id,
std::vector<XlaOp> arrays, uint32_t device_idx) {
if (consumer_id == kOutfeedCidShutdown) {
return InvalidArgument("Consumer ID cannot be a reserved value: %d",
consumer_id);
}
return p_impl_->AddOutfeedToBuilder(builder, token, consumer_id, arrays,
device_idx);
}
absl::Status OutfeedReceiver::RegisterOutfeed(uint32_t consumer_id,
const Shape& shape) {
if (consumer_id == kOutfeedCidShutdown) {
return InvalidArgument("Consumer ID cannot be a reserved value: %d",
consumer_id);
}
return p_impl_->RegisterOutfeed(consumer_id, shape);
}
} | #include "xla/python/outfeed_receiver.h"
#include <memory>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/client/client_library.h"
#include "xla/client/executable_build_options.h"
#include "xla/client/xla_builder.h"
#include "xla/pjrt/cpu/cpu_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/pjrt/pjrt_stream_executor_client.h"
#include "xla/service/platform_util.h"
#include "xla/test.h"
namespace xla {
namespace {
absl::Status CompileAndExecute(XlaBuilder* builder, XlaOp root, int device_id,
PjRtClient* client) {
XlaComputation computation = builder->Build(root).value();
CompileOptions compile_options;
compile_options.executable_build_options.set_num_replicas(1);
compile_options.executable_build_options.set_num_partitions(1);
DeviceAssignment device_assignment(1, 1);
device_assignment(0, 0) = device_id;
compile_options.executable_build_options.set_device_assignment(
device_assignment);
TF_ASSIGN_OR_RETURN(std::unique_ptr<PjRtLoadedExecutable> executable,
client->Compile(computation, std::move(compile_options)));
ExecuteOptions execute_options;
TF_ASSIGN_OR_RETURN(
std::vector<std::vector<std::unique_ptr<PjRtBuffer>>> output_buffers,
executable->Execute({{}}, execute_options));
return absl::OkStatus();
}
class Accumulator {
public:
struct Data {
uint32_t consumer_id;
std::shared_ptr<Literal> data;
};
void Receive(uint32_t consumer_id, std::shared_ptr<Literal> data) {
absl::MutexLock lock(&mutex_);
received_.push_back(Data{consumer_id, data});
}
std::vector<Data> received() {
absl::MutexLock lock(&mutex_);
return received_;
}
private:
absl::Mutex mutex_;
std::vector<Data> received_ ABSL_GUARDED_BY(mutex_);
};
TEST(OutfeedReceiverTest, ReceiveOutfeedSimple) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data = Iota(&builder, shape0, 0);
XlaOp send = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder, send, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(1, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
}
TEST(OutfeedReceiverTest, ReceiveOutfeedTwoComputations) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder0("execute_test_outfeed_0");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder0, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder0, CreateToken(&builder0),
consumer_id0, {data0}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder0, send0, 0, cpu_client.get()).ok());
XlaBuilder builder1("execute_test_outfeed_1");
constexpr int consumer_id1 = 6;
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder1, shape1, 0);
XlaOp send1 = outfeed_receiver
->AddOutfeedToBuilder(&builder1, CreateToken(&builder1),
consumer_id1, {data1}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder1, send1, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(2, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
EXPECT_EQ(consumer_id1, received[1].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape1}), received[1].data->shape());
}
TEST(OutfeedReceiverTest, ReceiveOutfeedTwoOutfeed) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data0}, 0)
.value();
constexpr int consumer_id1 = 6;
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder, shape1, 0);
XlaOp send1 =
outfeed_receiver
->AddOutfeedToBuilder(&builder, send0, consumer_id1, {data1}, 0)
.value();
EXPECT_TRUE(CompileAndExecute(&builder, send1, 0, cpu_client.get()).ok());
outfeed_receiver = nullptr;
std::vector<Accumulator::Data> received = receiver->received();
EXPECT_EQ(2, received.size());
EXPECT_EQ(consumer_id0, received[0].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape0}), received[0].data->shape());
EXPECT_EQ(consumer_id1, received[1].consumer_id);
EXPECT_EQ(ShapeUtil::MakeTupleShape({shape1}), received[1].data->shape());
}
TEST(OutfeedReceiverTest, DifferentShapeForConsumerIdError) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
constexpr int consumer_id0 = 5;
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
XlaOp send0 = outfeed_receiver
->AddOutfeedToBuilder(&builder, CreateToken(&builder),
consumer_id0, {data0}, 0)
.value();
const Shape shape1 = ShapeUtil::MakeShape(U32, {128});
XlaOp data1 = Iota(&builder, shape1, 0);
absl::StatusOr<XlaOp> send1 = outfeed_receiver->AddOutfeedToBuilder(
&builder, send0, consumer_id0, {data1}, 0);
EXPECT_FALSE(send1.ok());
EXPECT_THAT(
send1.status().ToString(),
testing::ContainsRegex(
#if defined(PLATFORM_WINDOWS)
"does not match previous shape \\w*/*\\w* *\\n?element_type"));
#else
"does not match previous shape (go/\\w+[ "
"]+\\n)?element_type"));
#endif
}
TEST(OutfeedReceiverTest, InvalidConsumerIdError) {
TF_ASSERT_OK_AND_ASSIGN(std::shared_ptr<PjRtClient> cpu_client,
GetTfrtCpuClient(CpuClientOptions()));
auto ifrt_cpu_client = ifrt::PjRtClient::Create(cpu_client);
std::vector<ifrt::PjRtClient*> clients{ifrt_cpu_client.get()};
auto receiver = std::make_unique<Accumulator>();
OutfeedReceiver::Callback callback =
[&receiver](xla::ifrt::PjRtDevice* device, uint32_t consumer_id,
std::shared_ptr<Literal> data) {
receiver->Receive(consumer_id, data);
};
auto outfeed_receiver =
std::make_shared<OutfeedReceiver>(callback, clients, 128, std::nullopt);
outfeed_receiver->Start();
XlaBuilder builder("execute_test_outfeed");
const Shape shape0 = ShapeUtil::MakeShape(U32, {16});
XlaOp data0 = Iota(&builder, shape0, 0);
absl::StatusOr<XlaOp> send0 = outfeed_receiver->AddOutfeedToBuilder(
&builder, CreateToken(&builder), 0, {data0}, 0);
EXPECT_FALSE(send0.ok());
EXPECT_THAT(send0.status().ToString(),
testing::HasSubstr("Consumer ID cannot be a reserved value"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/outfeed_receiver.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/outfeed_receiver_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6c806b30-785a-4e6a-bc6a-047845ca3876 | cpp | tensorflow/tensorflow | device_util | tensorflow/compiler/jit/device_util.cc | tensorflow/compiler/jit/device_util_test.cc | #include "tensorflow/compiler/jit/device_util.h"
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "xla/status_macros.h"
namespace tensorflow {
namespace jit {
void DeviceSet::Insert(DeviceId device_id) {
int word_index = device_id.id() / kWordSize;
int bit_index = device_id.id() % kWordSize;
const int storage_size = storage_.size();
if (word_index >= storage_size) {
storage_.resize(word_index + 1, 0);
}
storage_[word_index] |= (1ull << bit_index);
}
void DeviceSet::UnionWith(const DeviceSet& other) {
if (other.storage_.size() > storage_.size()) {
storage_.resize(other.storage_.size(), 0);
}
for (int i = 0, end = other.storage_.size(); i < end; i++) {
storage_[i] |= other.storage_[i];
}
}
bool DeviceSet::IsEmpty() const {
return absl::c_all_of(storage_, [&](uint64 val) { return val == 0; });
}
absl::StatusOr<DeviceId> DeviceInfoCache::GetIdFor(absl::string_view name) {
TF_RET_CHECK(!name.empty());
auto it = name_to_id_.find(name);
if (it != name_to_id_.end()) {
return it->second;
}
int new_id = names_.size();
names_.push_back(string(name));
id_to_device_type_.push_back(std::make_unique<DeviceType>(""));
DeviceType* device_type = id_to_device_type_.back().get();
TF_RETURN_IF_ERROR(DeviceNameToDeviceType(names_.back(), device_type));
is_cpu_.push_back(device_type->type_string() == DEVICE_CPU);
is_gpu_.push_back(device_type->type_string() == DEVICE_GPU);
name_to_id_.emplace(string(name), DeviceId(new_id));
const XlaOpRegistry::DeviceRegistration* compilation_device;
if (!XlaOpRegistry::GetCompilationDevice(device_type->type(),
&compilation_device)) {
compilation_device = nullptr;
}
id_to_compilation_device_.push_back(compilation_device);
return DeviceId(new_id);
}
string DeviceInfoCache::DebugString(const DeviceSet& device_set) const {
std::vector<string> names;
device_set.ForEach([&](DeviceId device_id) {
names.push_back(string(GetNameFor(device_id)));
return true;
});
return absl::StrCat("[", absl::StrJoin(names, ","), "]");
}
}
Status DeviceNameToDeviceType(const string& device, DeviceType* device_type) {
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(device, &parsed)) {
return errors::Internal("Malformed assigned device '", device, "'");
}
*device_type = DeviceType(parsed.type);
return absl::OkStatus();
}
absl::StatusOr<std::optional<jit::DeviceId>> PickDeviceForXlaImpl(
const jit::DeviceInfoCache& device_info_cache,
const jit::DeviceSet& devices, bool allow_mixing_unknown_and_cpu,
bool failure_to_pick_is_error) {
#define FAILED_TO_PICK_DEVICE(failing_status) \
do { \
if (failure_to_pick_is_error) { \
return failing_status; \
} else { \
return {std::nullopt}; \
} \
} while (false)
std::optional<jit::DeviceId> maybe_gpu_device;
std::optional<jit::DeviceId> maybe_cpu_device;
std::optional<jit::DeviceId> maybe_unknown_device;
bool multiple_cpu_devices = false;
bool multiple_gpu_devices = false;
bool multiple_unknown_devices = false;
const auto is_multiple_devices =
[&](const jit::DeviceId& d0, std::optional<jit::DeviceId>* d1) -> bool {
const absl::string_view name0 = device_info_cache.GetNameFor(d0);
const absl::string_view name1 = device_info_cache.GetNameFor(d1->value());
DeviceNameUtils::ParsedName parsed0, parsed1;
if (!DeviceNameUtils::ParseFullName(name0, &parsed0) ||
!DeviceNameUtils::ParseFullName(name1, &parsed1) ||
!DeviceNameUtils::AreCompatibleDevNames(parsed0, parsed1)) {
return true;
}
if (DeviceNameUtils::IsSpecification(parsed0, parsed1)) {
return false;
}
if (DeviceNameUtils::IsSpecification(parsed1, parsed0)) {
*d1 = d0;
return false;
}
return true;
};
devices.ForEach([&](jit::DeviceId device) {
if (device_info_cache.IsGpu(device)) {
if (maybe_gpu_device) {
multiple_gpu_devices = is_multiple_devices(device, &maybe_gpu_device);
if (multiple_gpu_devices) return false;
} else {
maybe_gpu_device = device;
}
} else if (device_info_cache.IsCpu(device)) {
if (maybe_cpu_device) {
multiple_cpu_devices = is_multiple_devices(device, &maybe_cpu_device);
if (multiple_cpu_devices) return false;
} else {
maybe_cpu_device = device;
}
} else {
if (maybe_unknown_device) {
multiple_unknown_devices = true;
return false;
}
maybe_unknown_device = device;
}
return true;
});
if (multiple_cpu_devices) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Multiple CPU devices ", device_info_cache.DebugString(devices)));
}
if (multiple_gpu_devices) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Multiple GPU devices ", device_info_cache.DebugString(devices)));
}
if (multiple_unknown_devices) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Multiple unknown devices ", device_info_cache.DebugString(devices)));
}
if (maybe_unknown_device && maybe_gpu_device) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Found both unknown and GPU devices: ",
device_info_cache.GetNameFor(*maybe_unknown_device), ", ",
device_info_cache.GetNameFor(*maybe_gpu_device)));
}
if (!allow_mixing_unknown_and_cpu) {
if (maybe_unknown_device && maybe_cpu_device) {
FAILED_TO_PICK_DEVICE(errors::Internal(
"Found both unknown and CPU devices: ",
device_info_cache.GetNameFor(*maybe_unknown_device), ", ",
device_info_cache.GetNameFor(*maybe_cpu_device)));
}
}
if (maybe_gpu_device) {
return {*maybe_gpu_device};
} else if (maybe_unknown_device) {
return {*maybe_unknown_device};
} else if (maybe_cpu_device) {
return {*maybe_cpu_device};
}
FAILED_TO_PICK_DEVICE(errors::Internal("Empty device set!"));
#undef FAILED_TO_PICK_DEVICE
}
absl::StatusOr<jit::DeviceId> PickDeviceForXla(
const jit::DeviceInfoCache& device_info_cache,
const jit::DeviceSet& devices, bool allow_mixing_unknown_and_cpu) {
TF_ASSIGN_OR_RETURN(std::optional<jit::DeviceId> device_id,
PickDeviceForXlaImpl(device_info_cache, devices,
allow_mixing_unknown_and_cpu,
true));
return *device_id;
}
absl::StatusOr<std::optional<jit::DeviceId>> MaybePickDeviceForXla(
const jit::DeviceInfoCache& device_info_cache,
const jit::DeviceSet& devices, bool allow_mixing_unknown_and_cpu) {
return PickDeviceForXlaImpl(device_info_cache, devices,
allow_mixing_unknown_and_cpu,
false);
}
} | #include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
Status PickDeviceHelper(bool allow_mixing_unknown_and_cpu,
absl::Span<const absl::string_view> device_names,
string* result) {
jit::DeviceInfoCache cache;
jit::DeviceSet device_set;
for (absl::string_view name : device_names) {
TF_ASSIGN_OR_RETURN(jit::DeviceId device_id, cache.GetIdFor(name));
device_set.Insert(device_id);
}
TF_ASSIGN_OR_RETURN(
jit::DeviceId result_id,
PickDeviceForXla(cache, device_set, allow_mixing_unknown_and_cpu));
*result = string(cache.GetNameFor(result_id));
return absl::OkStatus();
}
void CheckPickDeviceResult(absl::string_view expected_result,
bool allow_mixing_unknown_and_cpu,
absl::Span<const absl::string_view> inputs) {
string result;
TF_ASSERT_OK(PickDeviceHelper(allow_mixing_unknown_and_cpu, inputs, &result))
<< "inputs = [" << absl::StrJoin(inputs, ", ")
<< "], allow_mixing_unknown_and_cpu=" << allow_mixing_unknown_and_cpu
<< ", expected_result=" << expected_result;
EXPECT_EQ(result, expected_result);
}
void CheckPickDeviceHasError(bool allow_mixing_unknown_and_cpu,
absl::Span<const absl::string_view> inputs) {
string result;
EXPECT_FALSE(
PickDeviceHelper(allow_mixing_unknown_and_cpu, inputs, &result).ok());
}
const char* kCPU0 = "/job:localhost/replica:0/task:0/device:CPU:0";
const char* kGPU0 = "/job:localhost/replica:0/task:0/device:GPU:0";
const char* kXPU0 = "/job:localhost/replica:0/task:0/device:XPU:0";
const char* kYPU0 = "/job:localhost/replica:0/task:0/device:YPU:0";
const char* kCPU1 = "/job:localhost/replica:0/task:0/device:CPU:1";
const char* kGPU1 = "/job:localhost/replica:0/task:0/device:GPU:1";
const char* kXPU1 = "/job:localhost/replica:0/task:0/device:XPU:1";
const char* kCPU0Partial = "/device:CPU:0";
const char* kGPU0Partial = "/device:GPU:0";
const char* kXPU0Partial = "/device:XPU:0";
TEST(PickDeviceForXla, UniqueDevice) {
CheckPickDeviceResult(kGPU0, false, {kGPU0, kGPU0});
}
TEST(PickDeviceForXla, MoreSpecificDevice) {
CheckPickDeviceResult(kCPU0, false, {kCPU0, kCPU0Partial});
CheckPickDeviceResult(kGPU0, false, {kGPU0, kGPU0Partial});
CheckPickDeviceHasError(false, {kXPU1, kXPU0Partial});
}
TEST(PickDeviceForXla, DeviceOrder) {
CheckPickDeviceResult(kGPU0, false, {kGPU0, kCPU0});
CheckPickDeviceResult(kGPU0, false, {kCPU0, kGPU0});
CheckPickDeviceResult(kXPU0, true, {kXPU0, kCPU0});
}
TEST(PickDeviceForXla, MultipleUnknownDevices) {
CheckPickDeviceHasError(false, {kXPU0, kYPU0});
}
TEST(PickDeviceForXla, GpuAndUnknown) {
CheckPickDeviceHasError(false, {kGPU0, kXPU1});
}
TEST(PickDeviceForXla, UnknownAndCpu) {
CheckPickDeviceHasError(false, {kXPU0, kCPU1});
}
TEST(PickDeviceForXla, MultipleDevicesOfSameType) {
CheckPickDeviceHasError(true, {kCPU0, kCPU1});
CheckPickDeviceHasError(false, {kCPU0, kCPU1});
CheckPickDeviceHasError(false, {kGPU0, kGPU1});
CheckPickDeviceHasError(false, {kXPU0, kXPU1});
CheckPickDeviceHasError(false, {kCPU0, kCPU1, kGPU0});
}
void SimpleRoundTripTestForDeviceSet(int num_devices) {
jit::DeviceSet device_set;
jit::DeviceInfoCache device_info_cache;
std::vector<string> expected_devices, actual_devices;
for (int i = 0; i < num_devices; i++) {
string device_name =
absl::StrCat("/job:localhost/replica:0/task:0/device:XPU:", i);
TF_ASSERT_OK_AND_ASSIGN(jit::DeviceId device_id,
device_info_cache.GetIdFor(device_name));
device_set.Insert(device_id);
expected_devices.push_back(device_name);
}
device_set.ForEach([&](jit::DeviceId device_id) {
actual_devices.push_back(string(device_info_cache.GetNameFor(device_id)));
return true;
});
EXPECT_EQ(expected_devices, actual_devices);
}
TEST(DeviceSetTest, SimpleRoundTrip_One) { SimpleRoundTripTestForDeviceSet(1); }
TEST(DeviceSetTest, SimpleRoundTrip_Small) {
SimpleRoundTripTestForDeviceSet(8);
}
TEST(DeviceSetTest, SimpleRoundTrip_Large) {
SimpleRoundTripTestForDeviceSet(800);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d2386078-ce90-43a2-9dde-276049b32c97 | cpp | google/quiche | push_promise_payload_decoder | quiche/http2/decoder/payload_decoders/push_promise_payload_decoder.cc | quiche/http2/decoder/payload_decoders/push_promise_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/push_promise_payload_decoder.h"
#include <stddef.h>
#include <ostream>
#include "absl/base/macros.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::ostream& operator<<(std::ostream& out,
PushPromisePayloadDecoder::PayloadState v) {
switch (v) {
case PushPromisePayloadDecoder::PayloadState::kReadPadLength:
return out << "kReadPadLength";
case PushPromisePayloadDecoder::PayloadState::
kStartDecodingPushPromiseFields:
return out << "kStartDecodingPushPromiseFields";
case PushPromisePayloadDecoder::PayloadState::kReadPayload:
return out << "kReadPayload";
case PushPromisePayloadDecoder::PayloadState::kSkipPadding:
return out << "kSkipPadding";
case PushPromisePayloadDecoder::PayloadState::
kResumeDecodingPushPromiseFields:
return out << "kResumeDecodingPushPromiseFields";
}
return out << static_cast<int>(v);
}
DecodeStatus PushPromisePayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
const Http2FrameHeader& frame_header = state->frame_header();
const uint32_t total_length = frame_header.payload_length;
QUICHE_DVLOG(2) << "PushPromisePayloadDecoder::StartDecodingPayload: "
<< frame_header;
QUICHE_DCHECK_EQ(Http2FrameType::PUSH_PROMISE, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), total_length);
QUICHE_DCHECK_EQ(0, frame_header.flags & ~(Http2FrameFlag::END_HEADERS |
Http2FrameFlag::PADDED));
if (!frame_header.IsPadded()) {
payload_state_ = PayloadState::kStartDecodingPushPromiseFields;
} else {
payload_state_ = PayloadState::kReadPadLength;
}
state->InitializeRemainders();
return ResumeDecodingPayload(state, db);
}
DecodeStatus PushPromisePayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "UnknownPayloadDecoder::ResumeDecodingPayload"
<< " remaining_payload=" << state->remaining_payload()
<< " db->Remaining=" << db->Remaining();
const Http2FrameHeader& frame_header = state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::PUSH_PROMISE, frame_header.type);
QUICHE_DCHECK_LE(state->remaining_payload(), frame_header.payload_length);
QUICHE_DCHECK_LE(db->Remaining(), frame_header.payload_length);
DecodeStatus status;
while (true) {
QUICHE_DVLOG(2)
<< "PushPromisePayloadDecoder::ResumeDecodingPayload payload_state_="
<< payload_state_;
switch (payload_state_) {
case PayloadState::kReadPadLength:
QUICHE_DCHECK_EQ(state->remaining_payload(),
frame_header.payload_length);
status = state->ReadPadLength(db, false);
if (status != DecodeStatus::kDecodeDone) {
payload_state_ = PayloadState::kReadPadLength;
return status;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kStartDecodingPushPromiseFields:
status =
state->StartDecodingStructureInPayload(&push_promise_fields_, db);
if (status != DecodeStatus::kDecodeDone) {
payload_state_ = PayloadState::kResumeDecodingPushPromiseFields;
return status;
}
ReportPushPromise(state);
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kReadPayload:
QUICHE_DCHECK_LT(state->remaining_payload(),
frame_header.payload_length);
QUICHE_DCHECK_LE(state->remaining_payload(),
frame_header.payload_length -
Http2PushPromiseFields::EncodedSize());
QUICHE_DCHECK_LE(
state->remaining_payload(),
frame_header.payload_length -
Http2PushPromiseFields::EncodedSize() -
(frame_header.IsPadded() ? (1 + state->remaining_padding())
: 0));
{
size_t avail = state->AvailablePayload(db);
state->listener()->OnHpackFragment(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() > 0) {
payload_state_ = PayloadState::kReadPayload;
return DecodeStatus::kDecodeInProgress;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kSkipPadding:
if (state->SkipPadding(db)) {
state->listener()->OnPushPromiseEnd();
return DecodeStatus::kDecodeDone;
}
payload_state_ = PayloadState::kSkipPadding;
return DecodeStatus::kDecodeInProgress;
case PayloadState::kResumeDecodingPushPromiseFields:
status =
state->ResumeDecodingStructureInPayload(&push_promise_fields_, db);
if (status == DecodeStatus::kDecodeDone) {
ReportPushPromise(state);
payload_state_ = PayloadState::kReadPayload;
continue;
}
payload_state_ = PayloadState::kResumeDecodingPushPromiseFields;
return status;
}
QUICHE_BUG(http2_bug_183_1) << "PayloadState: " << payload_state_;
}
}
void PushPromisePayloadDecoder::ReportPushPromise(FrameDecoderState* state) {
const Http2FrameHeader& frame_header = state->frame_header();
if (frame_header.IsPadded()) {
state->listener()->OnPushPromiseStart(frame_header, push_promise_fields_,
1 + state->remaining_padding());
} else {
state->listener()->OnPushPromiseStart(frame_header, push_promise_fields_,
0);
}
}
} | #include "quiche/http2/decoder/payload_decoders/push_promise_payload_decoder.h"
#include <stddef.h>
#include <string>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class PushPromisePayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() {
return Http2FrameType::PUSH_PROMISE;
}
static constexpr uint8_t FlagsAffectingPayloadDecoding() {
return Http2FrameFlag::PADDED;
}
};
namespace {
struct Listener : public FramePartsCollector {
void OnPushPromiseStart(const Http2FrameHeader& header,
const Http2PushPromiseFields& promise,
size_t total_padding_length) override {
QUICHE_VLOG(1) << "OnPushPromiseStart header: " << header
<< " promise: " << promise
<< " total_padding_length: " << total_padding_length;
EXPECT_EQ(Http2FrameType::PUSH_PROMISE, header.type);
StartFrame(header)->OnPushPromiseStart(header, promise,
total_padding_length);
}
void OnHpackFragment(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnHpackFragment: len=" << len;
CurrentFrame()->OnHpackFragment(data, len);
}
void OnPushPromiseEnd() override {
QUICHE_VLOG(1) << "OnPushPromiseEnd";
EndFrame()->OnPushPromiseEnd();
}
void OnPadding(const char* padding, size_t skipped_length) override {
QUICHE_VLOG(1) << "OnPadding: " << skipped_length;
CurrentFrame()->OnPadding(padding, skipped_length);
}
void OnPaddingTooLong(const Http2FrameHeader& header,
size_t missing_length) override {
QUICHE_VLOG(1) << "OnPaddingTooLong: " << header
<< "; missing_length: " << missing_length;
FrameError(header)->OnPaddingTooLong(header, missing_length);
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class PushPromisePayloadDecoderTest
: public AbstractPaddablePayloadDecoderTest<
PushPromisePayloadDecoder, PushPromisePayloadDecoderPeer, Listener> {
};
INSTANTIATE_TEST_SUITE_P(VariousPadLengths, PushPromisePayloadDecoderTest,
::testing::Values(0, 1, 2, 3, 4, 254, 255, 256));
TEST_P(PushPromisePayloadDecoderTest, VariousHpackPayloadSizes) {
for (size_t hpack_size : {0, 1, 2, 3, 255, 256, 1024}) {
QUICHE_LOG(INFO) << "########### hpack_size = " << hpack_size
<< " ###########";
Reset();
std::string hpack_payload = Random().RandString(hpack_size);
Http2PushPromiseFields push_promise{RandStreamId()};
frame_builder_.Append(push_promise);
frame_builder_.Append(hpack_payload);
MaybeAppendTrailingPadding();
Http2FrameHeader frame_header(frame_builder_.size(),
Http2FrameType::PUSH_PROMISE, RandFlags(),
RandStreamId());
set_frame_header(frame_header);
FrameParts expected(frame_header, hpack_payload, total_pad_length_);
expected.SetOptPushPromise(push_promise);
EXPECT_TRUE(
DecodePayloadAndValidateSeveralWays(frame_builder_.buffer(), expected));
}
}
TEST_P(PushPromisePayloadDecoderTest, Truncated) {
auto approve_size = [](size_t size) {
return size != Http2PushPromiseFields::EncodedSize();
};
Http2PushPromiseFields push_promise{RandStreamId()};
Http2FrameBuilder fb;
fb.Append(push_promise);
EXPECT_TRUE(VerifyDetectsMultipleFrameSizeErrors(0, fb.buffer(), approve_size,
total_pad_length_));
}
TEST_P(PushPromisePayloadDecoderTest, PaddingTooLong) {
EXPECT_TRUE(VerifyDetectsPaddingTooLong());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/push_promise_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/push_promise_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
95881b06-48de-4930-9377-92d811d54452 | cpp | abseil/abseil-cpp | substitute | absl/strings/substitute.cc | absl/strings/substitute_test.cc | #include "absl/strings/substitute.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <string>
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/nullability.h"
#include "absl/strings/ascii.h"
#include "absl/strings/escaping.h"
#include "absl/strings/internal/resize_uninitialized.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace substitute_internal {
void SubstituteAndAppendArray(
absl::Nonnull<std::string*> output, absl::string_view format,
absl::Nullable<const absl::string_view*> args_array, size_t num_args) {
size_t size = 0;
for (size_t i = 0; i < format.size(); i++) {
if (format[i] == '$') {
if (i + 1 >= format.size()) {
#ifndef NDEBUG
ABSL_RAW_LOG(FATAL,
"Invalid absl::Substitute() format string: \"%s\".",
absl::CEscape(format).c_str());
#endif
return;
} else if (absl::ascii_isdigit(
static_cast<unsigned char>(format[i + 1]))) {
int index = format[i + 1] - '0';
if (static_cast<size_t>(index) >= num_args) {
#ifndef NDEBUG
ABSL_RAW_LOG(
FATAL,
"Invalid absl::Substitute() format string: asked for \"$"
"%d\", but only %d args were given. Full format string was: "
"\"%s\".",
index, static_cast<int>(num_args), absl::CEscape(format).c_str());
#endif
return;
}
size += args_array[index].size();
++i;
} else if (format[i + 1] == '$') {
++size;
++i;
} else {
#ifndef NDEBUG
ABSL_RAW_LOG(FATAL,
"Invalid absl::Substitute() format string: \"%s\".",
absl::CEscape(format).c_str());
#endif
return;
}
} else {
++size;
}
}
if (size == 0) return;
size_t original_size = output->size();
ABSL_INTERNAL_CHECK(
size <= std::numeric_limits<size_t>::max() - original_size,
"size_t overflow");
strings_internal::STLStringResizeUninitializedAmortized(output,
original_size + size);
char* target = &(*output)[original_size];
for (size_t i = 0; i < format.size(); i++) {
if (format[i] == '$') {
if (absl::ascii_isdigit(static_cast<unsigned char>(format[i + 1]))) {
const absl::string_view src = args_array[format[i + 1] - '0'];
target = std::copy(src.begin(), src.end(), target);
++i;
} else if (format[i + 1] == '$') {
*target++ = '$';
++i;
}
} else {
*target++ = format[i];
}
}
assert(target == output->data() + output->size());
}
Arg::Arg(absl::Nullable<const void*> value) {
static_assert(sizeof(scratch_) >= sizeof(value) * 2 + 2,
"fix sizeof(scratch_)");
if (value == nullptr) {
piece_ = "NULL";
} else {
char* ptr = scratch_ + sizeof(scratch_);
uintptr_t num = reinterpret_cast<uintptr_t>(value);
do {
*--ptr = absl::numbers_internal::kHexChar[num & 0xf];
num >>= 4;
} while (num != 0);
*--ptr = 'x';
*--ptr = '0';
piece_ = absl::string_view(
ptr, static_cast<size_t>(scratch_ + sizeof(scratch_) - ptr));
}
}
Arg::Arg(Hex hex) {
char* const end = &scratch_[numbers_internal::kFastToBufferSize];
char* writer = end;
uint64_t value = hex.value;
do {
*--writer = absl::numbers_internal::kHexChar[value & 0xF];
value >>= 4;
} while (value != 0);
char* beg;
if (end - writer < hex.width) {
beg = end - hex.width;
std::fill_n(beg, writer - beg, hex.fill);
} else {
beg = writer;
}
piece_ = absl::string_view(beg, static_cast<size_t>(end - beg));
}
Arg::Arg(Dec dec) {
assert(dec.width <= numbers_internal::kFastToBufferSize);
char* const end = &scratch_[numbers_internal::kFastToBufferSize];
char* const minfill = end - dec.width;
char* writer = end;
uint64_t value = dec.value;
bool neg = dec.neg;
while (value > 9) {
*--writer = '0' + (value % 10);
value /= 10;
}
*--writer = '0' + static_cast<char>(value);
if (neg) *--writer = '-';
ptrdiff_t fillers = writer - minfill;
if (fillers > 0) {
bool add_sign_again = false;
if (neg && dec.fill == '0') {
++writer;
add_sign_again = true;
}
writer -= fillers;
std::fill_n(writer, fillers, dec.fill);
if (add_sign_again) *--writer = '-';
}
piece_ = absl::string_view(writer, static_cast<size_t>(end - writer));
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/substitute.h"
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace {
struct MyStruct {
template <typename Sink>
friend void AbslStringify(Sink& sink, const MyStruct& s) {
sink.Append("MyStruct{.value = ");
sink.Append(absl::StrCat(s.value));
sink.Append("}");
}
int value;
};
TEST(SubstituteTest, Substitute) {
EXPECT_EQ("Hello, world!", absl::Substitute("$0, $1!", "Hello", "world"));
EXPECT_EQ("123 0.2 0.1 foo true false x",
absl::Substitute("$0 $1 $2 $3 $4 $5 $6", 123, 0.2, 0.1f,
std::string("foo"), true, false, 'x'));
EXPECT_EQ(
"-32767 65535 "
"-1234567890 3234567890 "
"-1234567890 3234567890 "
"-1234567890123456789 9234567890123456789",
absl::Substitute(
"$0 $1 $2 $3 $4 $5 $6 $7",
static_cast<short>(-32767),
static_cast<unsigned short>(65535),
-1234567890, 3234567890U, -1234567890L, 3234567890UL,
-int64_t{1234567890123456789}, uint64_t{9234567890123456789u}));
EXPECT_EQ("0 1 f ffff0ffff 0123456789abcdef",
absl::Substitute("$0$1$2$3$4 $5",
absl::Hex(0), absl::Hex(1, absl::kSpacePad2),
absl::Hex(0xf, absl::kSpacePad2),
absl::Hex(int16_t{-1}, absl::kSpacePad5),
absl::Hex(int16_t{-1}, absl::kZeroPad5),
absl::Hex(0x123456789abcdef, absl::kZeroPad16)));
EXPECT_EQ("0 115 -1-0001 81985529216486895",
absl::Substitute("$0$1$2$3$4 $5",
absl::Dec(0), absl::Dec(1, absl::kSpacePad2),
absl::Dec(0xf, absl::kSpacePad2),
absl::Dec(int16_t{-1}, absl::kSpacePad5),
absl::Dec(int16_t{-1}, absl::kZeroPad5),
absl::Dec(0x123456789abcdef, absl::kZeroPad16)));
const int* int_p = reinterpret_cast<const int*>(0x12345);
std::string str = absl::Substitute("$0", int_p);
EXPECT_EQ(absl::StrCat("0x", absl::Hex(int_p)), str);
volatile int vol = 237;
volatile int* volatile volptr = &vol;
str = absl::Substitute("$0", volptr);
EXPECT_EQ("true", str);
const uint64_t* null_p = nullptr;
str = absl::Substitute("$0", null_p);
EXPECT_EQ("NULL", str);
const char* char_p = "print me";
str = absl::Substitute("$0", char_p);
EXPECT_EQ("print me", str);
char char_buf[16];
strncpy(char_buf, "print me too", sizeof(char_buf));
str = absl::Substitute("$0", char_buf);
EXPECT_EQ("print me too", str);
char_p = nullptr;
str = absl::Substitute("$0", char_p);
EXPECT_EQ("", str);
EXPECT_EQ("b, a, c, b", absl::Substitute("$1, $0, $2, $1", "a", "b", "c"));
EXPECT_EQ("$", absl::Substitute("$$"));
EXPECT_EQ("$1", absl::Substitute("$$1"));
EXPECT_EQ("a", absl::Substitute("$0", "a"));
EXPECT_EQ("a b", absl::Substitute("$0 $1", "a", "b"));
EXPECT_EQ("a b c", absl::Substitute("$0 $1 $2", "a", "b", "c"));
EXPECT_EQ("a b c d", absl::Substitute("$0 $1 $2 $3", "a", "b", "c", "d"));
EXPECT_EQ("a b c d e",
absl::Substitute("$0 $1 $2 $3 $4", "a", "b", "c", "d", "e"));
EXPECT_EQ("a b c d e f", absl::Substitute("$0 $1 $2 $3 $4 $5", "a", "b", "c",
"d", "e", "f"));
EXPECT_EQ("a b c d e f g", absl::Substitute("$0 $1 $2 $3 $4 $5 $6", "a", "b",
"c", "d", "e", "f", "g"));
EXPECT_EQ("a b c d e f g h",
absl::Substitute("$0 $1 $2 $3 $4 $5 $6 $7", "a", "b", "c", "d", "e",
"f", "g", "h"));
EXPECT_EQ("a b c d e f g h i",
absl::Substitute("$0 $1 $2 $3 $4 $5 $6 $7 $8", "a", "b", "c", "d",
"e", "f", "g", "h", "i"));
EXPECT_EQ("a b c d e f g h i j",
absl::Substitute("$0 $1 $2 $3 $4 $5 $6 $7 $8 $9", "a", "b", "c",
"d", "e", "f", "g", "h", "i", "j"));
EXPECT_EQ("a b c d e f g h i j b0",
absl::Substitute("$0 $1 $2 $3 $4 $5 $6 $7 $8 $9 $10", "a", "b", "c",
"d", "e", "f", "g", "h", "i", "j"));
const char* null_cstring = nullptr;
EXPECT_EQ("Text: ''", absl::Substitute("Text: '$0'", null_cstring));
MyStruct s1 = MyStruct{17};
MyStruct s2 = MyStruct{1043};
EXPECT_EQ("MyStruct{.value = 17}, MyStruct{.value = 1043}",
absl::Substitute("$0, $1", s1, s2));
}
TEST(SubstituteTest, SubstituteAndAppend) {
std::string str = "Hello";
absl::SubstituteAndAppend(&str, ", $0!", "world");
EXPECT_EQ("Hello, world!", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0", "a");
EXPECT_EQ("a", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1", "a", "b");
EXPECT_EQ("a b", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1 $2", "a", "b", "c");
EXPECT_EQ("a b c", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1 $2 $3", "a", "b", "c", "d");
EXPECT_EQ("a b c d", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1 $2 $3 $4", "a", "b", "c", "d", "e");
EXPECT_EQ("a b c d e", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1 $2 $3 $4 $5", "a", "b", "c", "d", "e",
"f");
EXPECT_EQ("a b c d e f", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1 $2 $3 $4 $5 $6", "a", "b", "c", "d",
"e", "f", "g");
EXPECT_EQ("a b c d e f g", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1 $2 $3 $4 $5 $6 $7", "a", "b", "c", "d",
"e", "f", "g", "h");
EXPECT_EQ("a b c d e f g h", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1 $2 $3 $4 $5 $6 $7 $8", "a", "b", "c",
"d", "e", "f", "g", "h", "i");
EXPECT_EQ("a b c d e f g h i", str);
str.clear();
absl::SubstituteAndAppend(&str, "$0 $1 $2 $3 $4 $5 $6 $7 $8 $9", "a", "b",
"c", "d", "e", "f", "g", "h", "i", "j");
EXPECT_EQ("a b c d e f g h i j", str);
str.clear();
MyStruct s1 = MyStruct{17};
MyStruct s2 = MyStruct{1043};
absl::SubstituteAndAppend(&str, "$0, $1", s1, s2);
EXPECT_EQ("MyStruct{.value = 17}, MyStruct{.value = 1043}", str);
}
TEST(SubstituteTest, VectorBoolRef) {
std::vector<bool> v = {true, false};
const auto& cv = v;
EXPECT_EQ("true false true false",
absl::Substitute("$0 $1 $2 $3", v[0], v[1], cv[0], cv[1]));
std::string str = "Logic be like: ";
absl::SubstituteAndAppend(&str, "$0 $1 $2 $3", v[0], v[1], cv[0], cv[1]);
EXPECT_EQ("Logic be like: true false true false", str);
}
TEST(SubstituteTest, Enums) {
enum UnscopedEnum { kEnum0 = 0, kEnum1 = 1 };
EXPECT_EQ("0 1", absl::Substitute("$0 $1", UnscopedEnum::kEnum0,
UnscopedEnum::kEnum1));
enum class ScopedEnum { kEnum0 = 0, kEnum1 = 1 };
EXPECT_EQ("0 1",
absl::Substitute("$0 $1", ScopedEnum::kEnum0, ScopedEnum::kEnum1));
enum class ScopedEnumInt32 : int32_t { kEnum0 = 989, kEnum1 = INT32_MIN };
EXPECT_EQ("989 -2147483648",
absl::Substitute("$0 $1", ScopedEnumInt32::kEnum0,
ScopedEnumInt32::kEnum1));
enum class ScopedEnumUInt32 : uint32_t { kEnum0 = 1, kEnum1 = UINT32_MAX };
EXPECT_EQ("1 4294967295", absl::Substitute("$0 $1", ScopedEnumUInt32::kEnum0,
ScopedEnumUInt32::kEnum1));
enum class ScopedEnumInt64 : int64_t { kEnum0 = -1, kEnum1 = 42949672950 };
EXPECT_EQ("-1 42949672950", absl::Substitute("$0 $1", ScopedEnumInt64::kEnum0,
ScopedEnumInt64::kEnum1));
enum class ScopedEnumUInt64 : uint64_t { kEnum0 = 1, kEnum1 = 42949672950 };
EXPECT_EQ("1 42949672950", absl::Substitute("$0 $1", ScopedEnumUInt64::kEnum0,
ScopedEnumUInt64::kEnum1));
enum class ScopedEnumChar : signed char { kEnum0 = -1, kEnum1 = 1 };
EXPECT_EQ("-1 1", absl::Substitute("$0 $1", ScopedEnumChar::kEnum0,
ScopedEnumChar::kEnum1));
enum class ScopedEnumUChar : unsigned char {
kEnum0 = 0,
kEnum1 = 1,
kEnumMax = 255
};
EXPECT_EQ("0 1 255", absl::Substitute("$0 $1 $2", ScopedEnumUChar::kEnum0,
ScopedEnumUChar::kEnum1,
ScopedEnumUChar::kEnumMax));
enum class ScopedEnumInt16 : int16_t { kEnum0 = -100, kEnum1 = 10000 };
EXPECT_EQ("-100 10000", absl::Substitute("$0 $1", ScopedEnumInt16::kEnum0,
ScopedEnumInt16::kEnum1));
enum class ScopedEnumUInt16 : uint16_t { kEnum0 = 0, kEnum1 = 10000 };
EXPECT_EQ("0 10000", absl::Substitute("$0 $1", ScopedEnumUInt16::kEnum0,
ScopedEnumUInt16::kEnum1));
}
enum class EnumWithStringify { Many = 0, Choices = 1 };
template <typename Sink>
void AbslStringify(Sink& sink, EnumWithStringify e) {
sink.Append(e == EnumWithStringify::Many ? "Many" : "Choices");
}
TEST(SubstituteTest, AbslStringifyWithEnum) {
const auto e = EnumWithStringify::Choices;
EXPECT_EQ(absl::Substitute("$0", e), "Choices");
}
#if GTEST_HAS_DEATH_TEST
TEST(SubstituteDeathTest, SubstituteDeath) {
EXPECT_DEBUG_DEATH(
static_cast<void>(absl::Substitute(absl::string_view("-$2"), "a", "b")),
"Invalid absl::Substitute\\(\\) format string: asked for \"\\$2\", "
"but only 2 args were given.");
EXPECT_DEBUG_DEATH(
static_cast<void>(absl::Substitute(absl::string_view("-$z-"))),
"Invalid absl::Substitute\\(\\) format string: \"-\\$z-\"");
EXPECT_DEBUG_DEATH(
static_cast<void>(absl::Substitute(absl::string_view("-$"))),
"Invalid absl::Substitute\\(\\) format string: \"-\\$\"");
}
#endif
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/substitute.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/substitute_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
744b13d6-0162-45d3-85b9-beec62d260a6 | cpp | tensorflow/tensorflow | copy_fusion | third_party/xla/xla/service/gpu/transforms/copy_fusion.cc | third_party/xla/xla/service/gpu/transforms/copy_fusion_test.cc | #include "xla/service/gpu/transforms/copy_fusion.h"
#include <cstdint>
#include <queue>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
bool OnlyElementwiseOpsReachableFromParams(HloComputation* fused_computation) {
std::queue<const HloInstruction*> q;
absl::flat_hash_set<const HloInstruction*> visited;
for (auto param : fused_computation->parameter_instructions()) {
q.push(param);
visited.insert(param);
}
while (!q.empty()) {
const HloInstruction* hlo = q.front();
q.pop();
for (auto user : hlo->users()) {
if ((!user->IsElementwiseOnOperand(user->operand_index(hlo)) ||
user->opcode() == HloOpcode::kCopy) &&
user->opcode() != HloOpcode::kBitcast &&
user->opcode() != HloOpcode::kTuple) {
return false;
}
if (visited.insert(user).second) {
q.push(user);
}
}
}
return true;
}
absl::StatusOr<bool> CopyFusion::DoCopyFusion(HloComputation* computation) {
bool changed = false;
std::vector<HloInstruction*> defs_before_uses =
computation->MakeInstructionPostOrder();
for (HloInstruction* hlo : defs_before_uses) {
if (hlo->opcode() != HloOpcode::kFusion) {
continue;
}
std::vector<HloInstruction*> copies;
std::vector<HloInstruction*> other_users;
HloComputation* fused_computation = hlo->fused_instructions_computation();
if (!OnlyElementwiseOpsReachableFromParams(fused_computation)) {
continue;
}
HloInstruction* root = fused_computation->root_instruction();
if (IsReductionFromOrToContiguousDimensions(*root) ||
root->opcode() == HloOpcode::kScatter ||
(hlo->IsMultiOutputFusion() &&
absl::c_all_of(root->operands(), [](const HloInstruction* slice) {
return slice->opcode() == HloOpcode::kSlice;
}))) {
continue;
}
for (auto user : hlo->users()) {
HloInstruction* copy_user = user;
if (copy_user->opcode() == HloOpcode::kGetTupleElement &&
copy_user->user_count() == 1) {
if (IsReductionFromOrToContiguousDimensions(
*(root->operand(copy_user->tuple_index())))) {
other_users.push_back(user);
continue;
}
copy_user = copy_user->users()[0];
}
if (copy_user->opcode() == HloOpcode::kBitcast &&
copy_user->user_count() == 1) {
copy_user = copy_user->users()[0];
}
if (copy_user->opcode() == HloOpcode::kCopy &&
copy_user->shape() == copy_user->operand(0)->shape() &&
!copy_user->shape().IsTuple() &&
!copy_user->HasControlDependencies()) {
copies.push_back(copy_user);
} else {
other_users.push_back(user);
}
}
if (copies.empty()) {
continue;
}
auto fusion_adaptor = HloFusionAdaptor::ForComputation(fused_computation);
auto dynamic_update_slices =
GetOutputDefiningDynamicUpdateSlices(fusion_adaptor->GetRoots());
if (!dynamic_update_slices.empty() &&
(root->opcode() != HloOpcode::kTuple ||
dynamic_update_slices.size() == root->shape().tuple_shapes_size())) {
continue;
}
changed = true;
HloInstruction::InstructionVector tuple_elements;
int64_t num_outputs =
hlo->IsMultiOutputFusion() ? root->operand_count() : int64_t{1};
tuple_elements.reserve(copies.size() + num_outputs);
if (hlo->IsMultiOutputFusion()) {
for (HloInstruction* operand : root->operands()) {
tuple_elements.push_back(operand);
}
} else {
tuple_elements.push_back(root);
}
for (auto copy : copies) {
HloInstruction* user = copy;
std::vector<HloInstruction*> operand_chain;
operand_chain.push_back(user);
while (user->operand(0) != hlo) {
user = user->mutable_operand(0);
operand_chain.push_back(user);
}
HloInstruction* clone_operand = root;
if (hlo->IsMultiOutputFusion()) {
clone_operand = root->mutable_operand(user->tuple_index());
CHECK_EQ(operand_chain.back()->opcode(), HloOpcode::kGetTupleElement);
operand_chain.pop_back();
}
for (int64_t i = operand_chain.size() - 1; i >= 0; --i) {
HloInstruction* user = operand_chain[i];
clone_operand = fused_computation->AddInstruction(
user->CloneWithNewOperands(user->shape(), {clone_operand}));
}
tuple_elements.push_back(clone_operand);
}
HloInstruction* new_root = fused_computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
fused_computation->set_root_instruction(new_root,
true);
*hlo->mutable_shape() = new_root->shape();
if (root->opcode() == HloOpcode::kTuple) {
TF_RETURN_IF_ERROR(fused_computation->RemoveInstruction(root));
} else {
auto get_tuple_element_root = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(hlo, 0));
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWithDifferentShape(
other_users, get_tuple_element_root));
}
for (int64_t i = 0; i < copies.size(); ++i) {
auto get_tuple_element = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(hlo, num_outputs + i));
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(copies[i], get_tuple_element));
}
}
return changed;
}
absl::StatusOr<bool> CopyFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return DoCopyFusion(module->entry_computation());
}
}
} | #include "xla/service/gpu/transforms/copy_fusion.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace m = ::xla::match;
class CopyFusionTest : public HloTestBase {
public:
CopyFusion cf_;
};
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
scalar_mul_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
ROOT mul.1 = f32[] multiply(scalar_lhs.1, scalar_rhs.1)
})";
TEST_F(CopyFusionTest, CopyFusionTransposeOfBroadcastedConstantTwoCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
two = f32[] constant(2.0)
broadcast = f32[16,32]{1,0} broadcast(two), dimensions={}
s.1 = f32[16,32]{1,0} sqrt(broadcast)
ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
fusion = f32[32,16]{1,0} fusion(), kind=kInput, calls=fused_computation
copy.1 = f32[32,16]{1,0} copy(fusion)
copy.2 = f32[32,16]{1,0} copy(fusion)
ROOT t = (f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.2, copy.1)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Transpose(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionTransposeTwoCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
param_0.1 = f32[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
copy.1 = f32[32,16]{1,0} copy(fusion)
copy.2 = f32[32,16]{1,0} copy(fusion)
ROOT t = (f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.2, copy.1)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionNegateAndTwoCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
copy.2 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
fusion = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation
copy.1 = f32[512]{0} copy(fusion)
copy.2 = f32[512]{0} copy(fusion)
ROOT root = (f32[512]{0}, f32[512]{0}) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionShouldRunWithUncopiedReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
two = f32[] constant(2.0)
broadcast = f32[128,512,28,28]{3,2,1,0} broadcast(two)
mul = f32[128,512,28,28]{3,2,1,0} multiply(broadcast, broadcast)
const = f32[] constant(0.0)
reduce = f32[512]{0} reduce(mul, const), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[512]{0}) tuple(mul, reduce)
}
ENTRY entry {
fusion = (f32[128,512,28,28]{3,2,1,0}, f32[512]) fusion(), kind=kInput, calls=fused_computation
gte = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0
gte.2 = f32[512]{0} get-tuple-element(fusion), index=1
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512]{0}) tuple(copy.1, gte.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Reduce(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionShouldNotFuseForSliceMultioutputFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)
slice1 = f32[128,100,28,28]{3,2,1,0} slice(mul), slice={[0:128],[0:100],[0:28],[0:28]}
slice2 = f32[128,200,28,28]{3,2,1,0} slice(mul), slice={[0:128],[50:250],[0:28],[0:28]}
ROOT tuple = (f32[128,100,28,28]{3,2,1,0}, f32[128,200,28,28]{3,2,1,0}) tuple(slice1, slice2)
}
ENTRY entry {
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[128,100,28,28]{3,2,1,0}, f32[128,200,28,28]{3,2,1,0}) fusion(p1), kind=kInput, calls=fused_computation
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithScatter) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p0 = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} negate(p0)
ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(input_tensor, scatter_indices, updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, to_apply=scalar_add_computation
}
ENTRY entry {
param.0 = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
param.1 = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
param.2 = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
fusion = f32[50,49,48,47,46]{4,3,2,1,0} fusion(param.0, param.1, param.2), kind=kInput, calls=fused_computation
ROOT copy = f32[50,49,48,47,46]{4,3,2,1,0} copy(fusion)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionShouldNotRunOutsideEntryComputation) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation.549 {
param_0.8511 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} parameter(0)
bitcast.52601 = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} bitcast(param_0.8511)
slice = bf16[15,1,2,48,128,1]{5,4,3,2,1,0} slice(bitcast.52601), slice={[0:15:1], [0:1:1], [0:2:1], [0:48:1], [0:128:1], [0:1:1]}
bitcast = bf16[15,1,2,48,128]{4,3,2,1,0} bitcast(slice)
ROOT broadcast = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} broadcast(bitcast), dimensions={0,1,2,3,4}
}
condition {
constant_6915 = s32[] constant(15)
param.218 = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) parameter(0)
get-tuple-element.3714 = s32[] get-tuple-element(param.218), index=1
ROOT compare.1738 = pred[] compare(get-tuple-element.3714, constant_6915), direction=LT
}
body {
tuple_param = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) parameter(0)
param_0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} get-tuple-element(tuple_param), index=0
param_1 = s32[] get-tuple-element(tuple_param), index=1
fusion.549 = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} fusion(param_0), kind=kLoop, calls=fused_computation.549
bitcast = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} bitcast(fusion.549)
copy = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} copy(bitcast)
constant_one = s32[] constant(1)
add = s32[] add(param_1, constant_one), control-predecessors={fusion.549}
ROOT tuple = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) tuple(copy, add)
}
ENTRY main {
param_0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} parameter(0)
zero = s32[] constant(0)
copy.0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} copy(param_0)
copy.1 = s32[] copy(zero)
tuple = tuple(copy.0, copy.1)
ROOT while = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) while(tuple), condition=condition, body=body, backend_config="{\"known_trip_count\":{\"n\":\"15\"}}"
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithDynamicUpdateSliceInplace) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p.0 = f16[50,96,1024]{2,1,0} parameter(0)
p.1 = f16[1,96,1024]{2,1,0} parameter(1)
c.0 = s32[3]{0} constant({0, 0, 0})
ROOT %dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0)
}
ENTRY entry {
p0 = f16[50,96,1024]{2,1,0} parameter(0)
p1 = f16[1,96,1024]{2,1,0} parameter(1)
fusion = f16[50,96,1024]{2,1,0} fusion(p0, p1), kind=kInput, calls=fused_computation
copy.1 = f16[50,96,1024]{2,1,0} copy(fusion)
copy.2 = f16[50,96,1024]{2,1,0} copy(fusion)
ROOT root = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionWithDynamicUpdateSliceNotInplace) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
one = f32[] constant(1.0)
zero = f32[] constant(0.0)
p.0 = f16[50,96,1024]{2,1,0} broadcast(one), dimensions={}
p.1 = f16[1,96,1024]{2,1,0} broadcast(zero), dimensions={}
c.0 = s32[3]{0} constant({0, 0, 0})
dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0)
neg = f16[50,96,1024]{2,1,0} negate(dynamic-update-slice)
ROOT tuple = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) tuple(dynamic-update-slice, neg)
}
ENTRY entry {
fusion = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) fusion(), kind=kInput, calls=fused_computation
gte.0 = f16[50,96,1024]{2,1,0} get-tuple-element(fusion), index=0
gte.1 = f16[50,96,1024]{2,1,0} get-tuple-element(fusion), index=1
bitcast = f16[1,50,96,1024]{3,2,1,0} bitcast(gte.0)
copy = f16[1,50,96,1024]{3,2,1,0} copy(bitcast)
ROOT root = (f16[1,50,96,1024]{3,2,1,0}, f16[50,96,1024]{2,1,0}) tuple(copy, gte.1)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::DynamicUpdateSlice(), m::Negate(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionTransposeAndThreeCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
two = f32[] constant(2.0)
param_0.1 = f32[16,32]{1,0} broadcast(two), dimensions={}
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY entry {
fusion = f32[32,16]{1,0} fusion(), kind=kInput, calls=fused_computation
copy.1 = f32[32,16]{1,0} copy(fusion)
copy.2 = f32[32,16]{1,0} copy(fusion)
copy.3 = f32[32,16]{1,0} copy(fusion)
ROOT root = (f32[32,16]{1,0}, f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.1, copy.2, copy.3)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(), m::GetTupleElement())));
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Transpose(), m::Copy(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionRunWithOnlyOneCopy) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation
ROOT copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::GetTupleElement(m::Fusion(&fusion))));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionNegateAndTwoCopiesAndTransposeCopy) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
transpose = f32[128,512,28,28]{2,3,0,1} copy(fusion)
bitcast = f32[512,128,28,28]{3,2,1,0} bitcast(transpose)
copy.2 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, bitcast, copy.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::Bitcast(), m::GetTupleElement())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionRunWithOnlyOneNonTransposeCopy) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
transpose.1 = f32[128,512,28,28]{2,3,0,1} copy(fusion)
bitcast.1 = f32[512,128,28,28]{3,2,1,0} bitcast(transpose.1)
transpose.2 = f32[128,512,28,28]{2,3,0,1} copy(fusion)
bitcast.2 = f32[512,128,28,28]{3,2,1,0} bitcast(transpose.2)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}) tuple(copy.1, bitcast.1, bitcast.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::Bitcast(), m::Bitcast())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionSkipTupleCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul)
neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul)
ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation
copy.1 = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) copy(fusion)
copy.2 = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) copy(fusion)
ROOT root = ((f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}),(f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0})) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionTupleAndGetTuple) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul)
neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul)
ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation
gte.1 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0
gte.2 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=1
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte.1)
copy.2 = f32[128,512,28,28]{3,2,1,0} copy(gte.2)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Negate(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionWithFusionReturningTupleAndOtherUser) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul)
neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul)
ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation
gte.1 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0
gte.2 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=1
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte.1)
copy.2 = f32[128,512,28,28]{3,2,1,0} copy(gte.2)
transpose = f32[128,512,28,28]{2,3,0,1} copy(gte.1)
bitcast = f32[512,128,28,28]{3,2,1,0} bitcast(transpose)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, bitcast, copy.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Tuple(m::Copy(), m::Bitcast(),
m::GetTupleElement(m::Fusion(&fusion)))));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Negate(), m::Copy())));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/copy_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/copy_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8e55df92-c213-4cd5-876f-69f4af4f9644 | cpp | google/tsl | str_util | tsl/platform/str_util.cc | tsl/platform/str_util_test.cc | #include "tsl/platform/str_util.h"
#include <cctype>
#include <cstdint>
#include <string>
#include "absl/strings/ascii.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/stringpiece.h"
namespace tsl {
namespace str_util {
size_t RemoveLeadingWhitespace(absl::string_view* text) {
absl::string_view new_text = absl::StripLeadingAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
size_t RemoveTrailingWhitespace(absl::string_view* text) {
absl::string_view new_text = absl::StripTrailingAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
size_t RemoveWhitespaceContext(absl::string_view* text) {
absl::string_view new_text = absl::StripAsciiWhitespace(*text);
size_t count = text->size() - new_text.size();
*text = new_text;
return count;
}
bool ConsumeLeadingDigits(absl::string_view* s, uint64_t* val) {
const char* p = s->data();
const char* limit = p + s->size();
uint64_t v = 0;
while (p < limit) {
const char c = *p;
if (c < '0' || c > '9') break;
uint64_t new_v = (v * 10) + (c - '0');
if (new_v / 8 < v) {
return false;
}
v = new_v;
p++;
}
if (p > s->data()) {
s->remove_prefix(p - s->data());
*val = v;
return true;
} else {
return false;
}
}
bool ConsumeNonWhitespace(absl::string_view* s, absl::string_view* val) {
const char* p = s->data();
const char* limit = p + s->size();
while (p < limit) {
const char c = *p;
if (isspace(c)) break;
p++;
}
const size_t n = p - s->data();
if (n > 0) {
*val = absl::string_view(s->data(), n);
s->remove_prefix(n);
return true;
} else {
*val = absl::string_view();
return false;
}
}
void TitlecaseString(string* s, absl::string_view delimiters) {
bool upper = true;
for (string::iterator ss = s->begin(); ss != s->end(); ++ss) {
if (upper) {
*ss = toupper(*ss);
}
upper = (delimiters.find(*ss) != absl::string_view::npos);
}
}
string StringReplace(absl::string_view s, absl::string_view oldsub,
absl::string_view newsub, bool replace_all) {
string res(s);
size_t pos = 0;
while ((pos = res.find(oldsub.data(), pos, oldsub.size())) != string::npos) {
res.replace(pos, oldsub.size(), newsub.data(), newsub.size());
pos += newsub.size();
if (oldsub.empty()) {
pos++;
}
if (!replace_all) {
break;
}
}
return res;
}
size_t Strnlen(const char* str, const size_t string_max_len) {
size_t len = 0;
while (len < string_max_len && str[len] != '\0') {
++len;
}
return len;
}
string ArgDefCase(absl::string_view s) {
const size_t n = s.size();
size_t extra_us = 0;
size_t to_skip = 0;
for (size_t i = 0; i < n; ++i) {
if (i == to_skip && !isalpha(s[i])) {
++to_skip;
continue;
}
if (isupper(s[i]) && i != to_skip && i > 0 && isalnum(s[i - 1])) {
++extra_us;
}
}
string result(n + extra_us - to_skip, '_');
for (size_t i = to_skip, j = 0; i < n; ++i, ++j) {
DCHECK_LT(j, result.size());
char c = s[i];
if (isalnum(c)) {
if (isupper(c)) {
if (i != to_skip) {
DCHECK_GT(j, 0);
if (result[j - 1] != '_') ++j;
}
result[j] = tolower(c);
} else {
result[j] = c;
}
}
}
return result;
}
}
} | #include "tsl/platform/str_util.h"
#include <vector>
#include "tsl/platform/test.h"
namespace tsl {
TEST(CEscape, Basic) {
EXPECT_EQ(absl::CEscape("hello"), "hello");
EXPECT_EQ(absl::CEscape("hello\n"), "hello\\n");
EXPECT_EQ(absl::CEscape("hello\r"), "hello\\r");
EXPECT_EQ(absl::CEscape("\t\r\"'"), "\\t\\r\\\"\\'");
EXPECT_EQ(absl::CEscape("\320hi\200"), "\\320hi\\200");
}
string ExpectCUnescapeSuccess(absl::string_view source) {
string dest;
string error;
EXPECT_TRUE(absl::CUnescape(source, &dest, &error)) << error;
return dest;
}
TEST(CUnescape, Basic) {
EXPECT_EQ("hello", ExpectCUnescapeSuccess("hello"));
EXPECT_EQ("hello\n", ExpectCUnescapeSuccess("hello\\n"));
EXPECT_EQ("hello\r", ExpectCUnescapeSuccess("hello\\r"));
EXPECT_EQ("\t\r\"'", ExpectCUnescapeSuccess("\\t\\r\\\"\\'"));
EXPECT_EQ("\320hi\200", ExpectCUnescapeSuccess("\\320hi\\200"));
}
TEST(CUnescape, HandlesCopyOnWriteStrings) {
string dest = "hello";
string read = dest;
string error;
absl::string_view source = "llohe";
EXPECT_TRUE(absl::CUnescape(source, &dest, &error));
EXPECT_EQ("hello", read);
}
TEST(StripTrailingWhitespace, Basic) {
string test;
test = "hello";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "hello");
test = "foo ";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "foo");
test = " ";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "");
test = "";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, "");
test = " abc\t";
absl::StripTrailingAsciiWhitespace(&test);
EXPECT_EQ(test, " abc");
}
TEST(RemoveLeadingWhitespace, Basic) {
string text = " \t \n \r Quick\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 11);
EXPECT_EQ(data, absl::string_view("Quick\t"));
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view("Quick\t"));
}
TEST(RemoveLeadingWhitespace, TerminationHandling) {
string text = "\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 1);
EXPECT_EQ(data, absl::string_view(""));
EXPECT_EQ(str_util::RemoveLeadingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
TEST(RemoveTrailingWhitespace, Basic) {
string text = " \t \n \r Quick \t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 2);
EXPECT_EQ(data, absl::string_view(" \t \n \r Quick"));
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(" \t \n \r Quick"));
}
TEST(RemoveTrailingWhitespace, TerminationHandling) {
string text = "\t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 1);
EXPECT_EQ(data, absl::string_view(""));
EXPECT_EQ(str_util::RemoveTrailingWhitespace(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
TEST(RemoveWhitespaceContext, Basic) {
string text = " \t \n \r Quick \t";
absl::string_view data(text);
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 13);
EXPECT_EQ(data, absl::string_view("Quick"));
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0);
EXPECT_EQ(data, absl::string_view("Quick"));
text = "";
data = text;
EXPECT_EQ(str_util::RemoveWhitespaceContext(&data), 0);
EXPECT_EQ(data, absl::string_view(""));
}
void TestConsumeLeadingDigits(absl::string_view s, int64_t expected,
absl::string_view remaining) {
uint64 v;
absl::string_view input(s);
if (str_util::ConsumeLeadingDigits(&input, &v)) {
EXPECT_EQ(v, static_cast<uint64>(expected));
EXPECT_EQ(input, remaining);
} else {
EXPECT_LT(expected, 0);
EXPECT_EQ(input, remaining);
}
}
TEST(ConsumeLeadingDigits, Basic) {
using str_util::ConsumeLeadingDigits;
TestConsumeLeadingDigits("123", 123, "");
TestConsumeLeadingDigits("a123", -1, "a123");
TestConsumeLeadingDigits("9_", 9, "_");
TestConsumeLeadingDigits("11111111111xyz", 11111111111ll, "xyz");
TestConsumeLeadingDigits("1111111111111111111111111111111xyz", -1,
"1111111111111111111111111111111xyz");
TestConsumeLeadingDigits("18446744073709551616xyz", -1,
"18446744073709551616xyz");
TestConsumeLeadingDigits("18446744073709551615xyz", 18446744073709551615ull,
"xyz");
TestConsumeLeadingDigits("184467440737095516159yz", -1,
"184467440737095516159yz");
}
void TestConsumeNonWhitespace(absl::string_view s, absl::string_view expected,
absl::string_view remaining) {
absl::string_view v;
absl::string_view input(s);
if (str_util::ConsumeNonWhitespace(&input, &v)) {
EXPECT_EQ(v, expected);
EXPECT_EQ(input, remaining);
} else {
EXPECT_EQ(expected, "");
EXPECT_EQ(input, remaining);
}
}
TEST(ConsumeNonWhitespace, Basic) {
TestConsumeNonWhitespace("", "", "");
TestConsumeNonWhitespace(" ", "", " ");
TestConsumeNonWhitespace("abc", "abc", "");
TestConsumeNonWhitespace("abc ", "abc", " ");
}
TEST(ConsumePrefix, Basic) {
string s("abcdef");
absl::string_view input(s);
EXPECT_FALSE(absl::ConsumePrefix(&input, "abcdefg"));
EXPECT_EQ(input, "abcdef");
EXPECT_FALSE(absl::ConsumePrefix(&input, "abce"));
EXPECT_EQ(input, "abcdef");
EXPECT_TRUE(absl::ConsumePrefix(&input, ""));
EXPECT_EQ(input, "abcdef");
EXPECT_FALSE(absl::ConsumePrefix(&input, "abcdeg"));
EXPECT_EQ(input, "abcdef");
EXPECT_TRUE(absl::ConsumePrefix(&input, "abcdef"));
EXPECT_EQ(input, "");
input = s;
EXPECT_TRUE(absl::ConsumePrefix(&input, "abcde"));
EXPECT_EQ(input, "f");
}
TEST(StripPrefix, Basic) {
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdefg"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abce"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", ""), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdeg"), "abcdef");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcdef"), "");
EXPECT_EQ(absl::StripPrefix("abcdef", "abcde"), "f");
}
TEST(JoinStrings, Basic) {
std::vector<string> s;
s = {"hi"};
EXPECT_EQ(absl::StrJoin(s, " "), "hi");
s = {"hi", "there", "strings"};
EXPECT_EQ(absl::StrJoin(s, " "), "hi there strings");
std::vector<absl::string_view> sp;
sp = {"hi"};
EXPECT_EQ(absl::StrJoin(sp, ",,"), "hi");
sp = {"hi", "there", "strings"};
EXPECT_EQ(absl::StrJoin(sp, "--"), "hi--there--strings");
}
TEST(JoinStrings, Join3) {
std::vector<string> s;
s = {"hi"};
auto l1 = [](string* out, string s) { *out += s; };
EXPECT_EQ(str_util::Join(s, " ", l1), "hi");
s = {"hi", "there", "strings"};
auto l2 = [](string* out, string s) { *out += s[0]; };
EXPECT_EQ(str_util::Join(s, " ", l2), "h t s");
}
TEST(Split, Basic) {
EXPECT_TRUE(str_util::Split("", ',').empty());
EXPECT_EQ(absl::StrJoin(str_util::Split("a", ','), "|"), "a");
EXPECT_EQ(absl::StrJoin(str_util::Split(",", ','), "|"), "|");
EXPECT_EQ(absl::StrJoin(str_util::Split("a,b,c", ','), "|"), "a|b|c");
EXPECT_EQ(absl::StrJoin(str_util::Split("a,,,b,,c,", ','), "|"), "a|||b||c|");
EXPECT_EQ(absl::StrJoin(str_util::Split("a!,!b,!c,", ",!"), "|"),
"a|||b||c|");
EXPECT_EQ(absl::StrJoin(
str_util::Split("a,,,b,,c,", ',', str_util::SkipEmpty()), "|"),
"a|b|c");
EXPECT_EQ(
absl::StrJoin(
str_util::Split("a, ,b,,c,", ',', str_util::SkipWhitespace()), "|"),
"a|b|c");
EXPECT_EQ(absl::StrJoin(str_util::Split("a. !b,;c,", ".,;!",
str_util::SkipWhitespace()),
"|"),
"a|b|c");
}
TEST(Lowercase, Basic) {
EXPECT_EQ("", absl::AsciiStrToLower(""));
EXPECT_EQ("hello", absl::AsciiStrToLower("hello"));
EXPECT_EQ("hello world", absl::AsciiStrToLower("Hello World"));
}
TEST(Uppercase, Basic) {
EXPECT_EQ("", absl::AsciiStrToUpper(""));
EXPECT_EQ("HELLO", absl::AsciiStrToUpper("hello"));
EXPECT_EQ("HELLO WORLD", absl::AsciiStrToUpper("Hello World"));
}
TEST(SnakeCase, Basic) {
EXPECT_EQ("", str_util::ArgDefCase(""));
EXPECT_EQ("", str_util::ArgDefCase("!"));
EXPECT_EQ("", str_util::ArgDefCase("5"));
EXPECT_EQ("", str_util::ArgDefCase("!:"));
EXPECT_EQ("", str_util::ArgDefCase("5-5"));
EXPECT_EQ("", str_util::ArgDefCase("_!"));
EXPECT_EQ("", str_util::ArgDefCase("_5"));
EXPECT_EQ("a", str_util::ArgDefCase("_a"));
EXPECT_EQ("a", str_util::ArgDefCase("_A"));
EXPECT_EQ("i", str_util::ArgDefCase("I"));
EXPECT_EQ("i", str_util::ArgDefCase("i"));
EXPECT_EQ("i_", str_util::ArgDefCase("I%"));
EXPECT_EQ("i_", str_util::ArgDefCase("i%"));
EXPECT_EQ("i", str_util::ArgDefCase("%I"));
EXPECT_EQ("i", str_util::ArgDefCase("-i"));
EXPECT_EQ("i", str_util::ArgDefCase("3i"));
EXPECT_EQ("i", str_util::ArgDefCase("32i"));
EXPECT_EQ("i3", str_util::ArgDefCase("i3"));
EXPECT_EQ("i_a3", str_util::ArgDefCase("i_A3"));
EXPECT_EQ("i_i", str_util::ArgDefCase("II"));
EXPECT_EQ("i_i", str_util::ArgDefCase("I_I"));
EXPECT_EQ("i__i", str_util::ArgDefCase("I__I"));
EXPECT_EQ("i_i_32", str_util::ArgDefCase("II-32"));
EXPECT_EQ("ii_32", str_util::ArgDefCase("Ii-32"));
EXPECT_EQ("hi_there", str_util::ArgDefCase("HiThere"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi!Hi"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("HiHi"));
EXPECT_EQ("hihi", str_util::ArgDefCase("Hihi"));
EXPECT_EQ("hi_hi", str_util::ArgDefCase("Hi_Hi"));
}
TEST(TitlecaseString, Basic) {
string s = "sparse_lookup";
str_util::TitlecaseString(&s, "_");
ASSERT_EQ(s, "Sparse_Lookup");
s = "sparse_lookup";
str_util::TitlecaseString(&s, " ");
ASSERT_EQ(s, "Sparse_lookup");
s = "dense";
str_util::TitlecaseString(&s, " ");
ASSERT_EQ(s, "Dense");
}
TEST(StringReplace, Basic) {
EXPECT_EQ("XYZ_XYZ_XYZ", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ",
true));
}
TEST(StringReplace, OnlyFirst) {
EXPECT_EQ("XYZ_ABC_ABC", str_util::StringReplace("ABC_ABC_ABC", "ABC", "XYZ",
false));
}
TEST(StringReplace, IncreaseLength) {
EXPECT_EQ("a b c",
str_util::StringReplace("abc", "b", " b ", true));
}
TEST(StringReplace, IncreaseLengthMultipleMatches) {
EXPECT_EQ("a b b c",
str_util::StringReplace("abbc", "b", " b ", true));
}
TEST(StringReplace, NoChange) {
EXPECT_EQ("abc",
str_util::StringReplace("abc", "d", "X", true));
}
TEST(StringReplace, EmptyStringReplaceFirst) {
EXPECT_EQ("", str_util::StringReplace("", "a", "X", false));
}
TEST(StringReplace, EmptyStringReplaceAll) {
EXPECT_EQ("", str_util::StringReplace("", "a", "X", true));
}
TEST(Strnlen, Basic) {
EXPECT_EQ(0, str_util::Strnlen("ab", 0));
EXPECT_EQ(1, str_util::Strnlen("a", 1));
EXPECT_EQ(2, str_util::Strnlen("abcd", 2));
EXPECT_EQ(3, str_util::Strnlen("abc", 10));
EXPECT_EQ(4, str_util::Strnlen("a \t\n", 10));
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/str_util.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/str_util_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
a7e207fa-a826-4269-ba9a-fa266b94b2de | cpp | tensorflow/tensorflow | custom_kernel_fusion_autotuner | third_party/xla/xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.cc | third_party/xla/xla/service/gpu/autotuning/custom_kernel_fusion_autotuner_test.cc | #include "xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <tuple>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuning/autotuner_compile_util.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion.h"
#include "xla/service/shaped_buffer.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::StatusOr<std::unique_ptr<HloModule>> ExtractFusionModule(
HloInstruction* fusion_instruction, int64_t kernel_index) {
std::unique_ptr<HloModule> hlo_module =
ExtractInstructionIntoNewModule(*fusion_instruction);
HloInstruction* instruction =
hlo_module->entry_computation()->root_instruction();
GpuBackendConfig gpu_config =
instruction->backend_config<GpuBackendConfig>().value();
gpu_config.mutable_fusion_backend_config()
->mutable_custom_fusion_config()
->set_kernel_index(kernel_index);
TF_RETURN_IF_ERROR(instruction->set_backend_config(gpu_config));
return hlo_module;
}
absl::StatusOr<std::vector<std::tuple<int, absl::Duration>>> ProfileKernels(
std::vector<CustomKernel>& kernels, HloInstruction* fusion_instruction,
AutotunerCompileUtil& compile_util, const AutotuneConfig& autotune_config,
const DebugOptions& debug_options) {
se::StreamExecutor* stream_exec = autotune_config.GetExecutor();
std::vector<std::tuple<int, absl::Duration>> results;
for (int i = 0; i < kernels.size(); ++i) {
TF_ASSIGN_OR_RETURN(absl::StatusOr<std::unique_ptr<Executable>> executable,
compile_util.Compile([&](const DebugOptions& opt) {
return ExtractFusionModule(fusion_instruction, i);
}));
se::DeviceMemoryAllocator* allocator = autotune_config.GetAllocator();
std::unique_ptr<se::DeviceMemoryAllocator> owned_allocator;
if (allocator == nullptr) {
owned_allocator =
std::make_unique<se::StreamExecutorMemoryAllocator>(stream_exec);
allocator = owned_allocator.get();
}
TF_ASSIGN_OR_RETURN(se::Stream* const stream, autotune_config.GetStream());
TF_ASSIGN_OR_RETURN(auto rz_buffers,
RedzoneBuffers::FromInstruction(
*fusion_instruction, autotune_config, debug_options,
RedzoneBuffers::kAllInputs));
std::optional<ScopedShapedBuffer> reference_buffer;
std::optional<AutotunerCompileUtil::ProfilingOutput> profiling_output;
TF_ASSIGN_OR_RETURN(profiling_output, compile_util.ProfileExecutable(
executable->get(), stream,
rz_buffers.input_buffers(),
rz_buffers.input_shapes()));
results.push_back({i, profiling_output->duration});
}
return results;
}
absl::StatusOr<int> FindFastestKernel(
const std::vector<std::tuple<int, absl::Duration>>& results) {
auto iter = absl::c_min_element(
results, [](const std::tuple<int, absl::Duration>& lhs,
const std::tuple<int, absl::Duration>& rhs) {
return std::get<1>(lhs) < std::get<1>(rhs);
});
if (iter == results.end()) {
return absl::InternalError("Failed to find fastest kernel.");
}
return std::get<0>(*iter);
}
absl::Status UpdateFusionInstructionKernelIndex(
HloInstruction* fusion_instruction, int kernel_index) {
GpuBackendConfig gpu_config =
fusion_instruction->backend_config<GpuBackendConfig>().value();
gpu_config.mutable_fusion_backend_config()
->mutable_custom_fusion_config()
->set_kernel_index(kernel_index);
TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(gpu_config));
return absl::OkStatus();
}
absl::StatusOr<std::vector<CustomKernel>> LoadKernels(
const HloInstruction* fusion_instruction,
const AutotuneConfig& autotune_config) {
auto config = fusion_instruction->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.custom_fusion_config();
auto* registry = CustomKernelFusionRegistry::Default();
auto* custom_kernel_fusion = registry->Lookup(config.name());
if (custom_kernel_fusion == nullptr) {
return absl::InternalError(
absl::StrCat("Custom kernel fusion ", config.name(),
" not found in a default registry."));
}
se::StreamExecutor* stream_exec = autotune_config.GetExecutor();
if (!stream_exec->SynchronizeAllActivity()) {
return Internal("Failed to synchronize GPU for autotuning.");
}
se::DeviceDescription device_description =
stream_exec->GetDeviceDescription();
TF_ASSIGN_OR_RETURN(
std::vector<CustomKernel> kernels,
custom_kernel_fusion->LoadKernels(
device_description,
fusion_instruction->fused_instructions_computation()));
return kernels;
}
absl::StatusOr<bool> AutotuneCustomKernelFusion(
HloInstruction* fusion_instruction, const AutotuneConfig& autotune_config,
AutotunerCompileUtil& compile_util, const DebugOptions& debug_options) {
int previous_kernel_index =
fusion_instruction->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.custom_fusion_config()
.kernel_index();
TF_ASSIGN_OR_RETURN(std::vector<CustomKernel> kernels,
LoadKernels(fusion_instruction, autotune_config));
std::vector<std::tuple<int, absl::Duration>> results;
TF_ASSIGN_OR_RETURN(results,
ProfileKernels(kernels, fusion_instruction, compile_util,
autotune_config, debug_options));
TF_ASSIGN_OR_RETURN(int fastest_kernel_index, FindFastestKernel(results));
TF_RETURN_IF_ERROR(UpdateFusionInstructionKernelIndex(fusion_instruction,
fastest_kernel_index));
return previous_kernel_index != fastest_kernel_index;
}
bool IsCustomFusion(const HloComputation* computation) {
if (!computation->IsFusionComputation()) {
return false;
}
HloInstruction* instruction = computation->FusionInstruction();
absl::StatusOr<GpuBackendConfig> gpu_backend_config =
instruction->backend_config<GpuBackendConfig>();
if (!gpu_backend_config.ok()) {
return false;
}
if (instruction->fusion_kind() != HloInstruction::FusionKind::kCustom) {
return false;
}
if (!gpu_backend_config->has_fusion_backend_config()) {
return false;
}
return gpu_backend_config->fusion_backend_config().kind() ==
kCustomFusionKind;
}
}
absl::StatusOr<bool> CustomKernelFusionAutotuner::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (config_.IsDeviceless()) {
return false;
}
const DebugOptions& debug_options = module->config().debug_options();
TF_ASSIGN_OR_RETURN(std::optional<AutotunerCompileUtil> compile_util,
AutotunerCompileUtil::Create(config_, debug_options));
TF_RET_CHECK(compile_util.has_value());
bool hlo_changed = false;
for (const HloComputation* computation : module->computations()) {
if (IsCustomFusion(computation)) {
TF_ASSIGN_OR_RETURN(
bool instruction_changed,
AutotuneCustomKernelFusion(computation->FusionInstruction(), config_,
compile_util.value(), debug_options));
if (instruction_changed) {
hlo_changed = true;
}
}
}
return hlo_changed;
}
}
} | #include "xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.h"
#include <memory>
#include <string>
#include <utility>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/gpu/autotuning/autotuner_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
class CustomKernelFusionAutotunerTest : public HloTestBase {
public:
CustomKernelFusionAutotunerTest()
: HloTestBase(false,
true) {}
void SetUp() override { HloTestBase::SetUp(); }
void TearDown() override { HloTestBase::TearDown(); }
};
TEST_F(CustomKernelFusionAutotunerTest, DontRunOnNonCustomFusions) {
const std::string hlo_string = R"(
HloModule test_module, entry_computation_layout={(f32[20000,20000]{1,0}, f32[20000,20000]{1,0})->(f32[20000,20000]{1,0}, f32[20000,20000]{1,0})}
%fused_computation (p0.param_0: f32[20000,20000], p1.param_1: f32[20000,20000]) -> (f32[20000,20000], f32[20000,20000]) {
%p0.param_0 = f32[20000,20000]{1,0} parameter(0)
%p1.param_1 = f32[20000,20000]{1,0} parameter(1)
%add = f32[20000,20000]{1,0} add(f32[20000,20000]{1,0} %p0.param_0, f32[20000,20000]{1,0} %p1.param_1)
%mul = f32[20000,20000]{1,0} multiply(f32[20000,20000]{1,0} %p0.param_0, f32[20000,20000]{1,0} %p1.param_1)
ROOT %tuple = (f32[20000,20000]{1,0}, f32[20000,20000]{1,0}) tuple(f32[20000,20000]{1,0} %add, f32[20000,20000]{1,0} %mul)
}
ENTRY %BroadcastIntoAdd (p0: f32[20000,20000], p1: f32[20000,20000]) -> (f32[20000,20000], f32[20000,20000]) {
%p0 = f32[20000,20000]{1,0} parameter(0)
%p1 = f32[20000,20000]{1,0} parameter(1)
ROOT %fusion = (f32[20000,20000]{1,0}, f32[20000,20000]{1,0}) fusion(f32[20000,20000]{1,0} %p0, f32[20000,20000]{1,0} %p1), kind=kLoop, calls=%fused_computation
}
)";
std::unique_ptr<HloModule> hlo_module =
ParseAndReturnVerifiedModule(hlo_string).value();
HloPassPipeline pipeline("custom_kernel_fusion_autotuner");
DebugOptions debug_options;
AutotuneConfig autotune_config =
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
debug_options};
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
ASSERT_TRUE(pipeline.Run(hlo_module.get()).ok());
}
TEST_F(CustomKernelFusionAutotunerTest,
CustomKernelFusionAutotunerPassSucceeds) {
const std::string hlo_string = R"(
HloModule extracted
cutlass_gemm {
p0 = f32[15,19]{1,0} parameter(0)
p1 = f32[19,17]{1,0} parameter(1)
ROOT r = f32[15, 17]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY region_198.14436 {
p.0 = f32[15,19]{1,0} parameter(0)
p.1 = f32[19,17]{1,0} parameter(1)
ROOT cutlass_gemm = f32[15,17]{1,0} fusion(p.0, p.1), kind=kCustom, calls=cutlass_gemm, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm","kernel_index":0}},"force_earliest_schedule":false}
}
)";
std::unique_ptr<HloModule> hlo_module =
ParseAndReturnVerifiedModule(hlo_string).value();
HloPassPipeline pipeline("custom_kernel_fusion_autotuner");
DebugOptions debug_options;
AutotuneConfig autotune_config =
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
debug_options};
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
ASSERT_TRUE(pipeline.Run(hlo_module.get()).ok());
}
TEST_F(CustomKernelFusionAutotunerTest,
CustomKernelFusionAutotunerPassUpdatesUpdatesKernelIndex) {
const std::string hlo_string = R"(
HloModule extracted
cutlass_gemm {
p0 = f32[15,19]{1,0} parameter(0)
p1 = f32[19,17]{1,0} parameter(1)
ROOT r = f32[15, 17]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
}
ENTRY region_198.14436 {
p.0 = f32[15,19]{1,0} parameter(0)
p.1 = f32[19,17]{1,0} parameter(1)
ROOT cutlass_gemm = f32[15,17]{1,0} fusion(p.0, p.1), kind=kCustom,
calls=cutlass_gemm,
backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm","kernel_index":-1}},"force_earliest_schedule":false}
}
)";
HloPassPipeline pipeline("custom_kernel_fusion_autotuner");
DebugOptions debug_options;
AutotuneConfig autotune_config =
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
debug_options};
pipeline.AddPass<CustomKernelFusionAutotuner>(autotune_config);
std::string expected = R"(
CHECK: "kernel_index":0
)";
RunAndFilecheckHloRewrite(hlo_string, std::move(pipeline), expected);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/custom_kernel_fusion_autotuner.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/autotuning/custom_kernel_fusion_autotuner_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3cc7cc66-874f-47a7-a4f0-c1960ee89b92 | cpp | tensorflow/tensorflow | transitive_fanin | tensorflow/core/grappler/utils/transitive_fanin.cc | tensorflow/core/grappler/utils/transitive_fanin_test.cc | #include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include <queue>
#include <vector>
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/errors.h"
namespace tensorflow {
namespace grappler {
Status ComputeTransitiveFanin(
const GraphDef& graph, const std::vector<string>& terminal_nodes,
std::unordered_map<string, const NodeDef*>* name_to_fanin_node,
std::vector<const NodeDef*>* fanin_nodes) {
std::unordered_map<string, const NodeDef*> name_to_node;
std::unordered_map<string, const NodeDef*> name_to_send;
for (const auto& node : graph.node()) {
name_to_node[node.name()] = &node;
if (node.op() == "_Send") {
const auto& attr = node.attr();
name_to_send[attr.at("tensor_name").s()] = &node;
}
}
std::vector<const NodeDef*> queue;
for (const string& root : terminal_nodes) {
const NodeDef* node = name_to_node[NodeName(root)];
if (!node) {
return errors::InvalidArgument("Graph does not contain terminal node ",
root, ".");
}
queue.push_back(node);
}
std::unordered_set<const NodeDef*> visited;
while (!queue.empty()) {
const NodeDef* node = queue.back();
queue.pop_back();
if (!visited.insert(node).second) {
continue;
}
fanin_nodes->push_back(node);
if (name_to_fanin_node) {
name_to_fanin_node->insert(
std::pair<string, const NodeDef*>(node->name(), node));
}
for (const string& input : node->input()) {
const NodeDef* in = name_to_node[NodeName(input)];
if (!in) {
return errors::InvalidArgument("Graph does not contain input ",
NodeName(input), " of node ",
node->name(), ".");
}
queue.push_back(in);
}
if (node->op() == "_Recv") {
const auto& attr = node->attr();
const NodeDef* send = name_to_send[attr.at("tensor_name").s()];
if (send) {
queue.push_back(send);
}
}
}
return absl::OkStatus();
}
Status ComputeTransitiveFanin(const GraphDef& graph,
const std::vector<string>& terminal_nodes,
std::vector<const NodeDef*>* fanin_nodes) {
return ComputeTransitiveFanin(graph, terminal_nodes, nullptr, fanin_nodes);
}
Status SetTransitiveFaninGraph(const GraphDef& input_graph,
GraphDef* output_graph,
const std::vector<string>& terminal_nodes) {
std::vector<const NodeDef*> keep;
TF_RETURN_IF_ERROR(
ComputeTransitiveFanin(input_graph, terminal_nodes, &keep));
output_graph->mutable_node()->Reserve(keep.size());
for (int i = keep.size() - 1; i >= 0; --i) {
*output_graph->add_node() = *keep[i];
}
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/utils/transitive_fanin.h"
#include <vector>
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
class TransitiveFaninTest : public ::testing::Test {
protected:
struct NodeConfig {
NodeConfig(string name, std::vector<string> inputs)
: name(std::move(name)), inputs(std::move(inputs)) {}
NodeConfig(string name, string op, std::vector<string> inputs)
: name(std::move(name)), op(std::move(op)), inputs(std::move(inputs)) {}
string name;
string op;
std::vector<string> inputs;
};
static GraphDef CreateGraph(const std::vector<NodeConfig>& nodes) {
GraphDef graph;
for (const NodeConfig& node : nodes) {
NodeDef node_def;
node_def.set_name(node.name);
node_def.set_op(node.op);
for (const string& input : node.inputs) {
node_def.add_input(input);
}
*graph.add_node() = std::move(node_def);
}
return graph;
}
};
TEST_F(TransitiveFaninTest, NoPruning) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
}
TEST_F(TransitiveFaninTest, PruneNodesUnreachableFromSingleTerminalNode) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"1"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
ASSERT_FALSE(node_map.NodeExists("5"));
}
TEST_F(TransitiveFaninTest, PruneNodesUnreachableFromMultipleTerminalNodes) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"2"}},
{"6", {"1"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1", "5"};
TF_EXPECT_OK(SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes));
NodeMap node_map(&output_graph);
ASSERT_TRUE(node_map.NodeExists("1"));
ASSERT_TRUE(node_map.NodeExists("2"));
ASSERT_TRUE(node_map.NodeExists("3"));
ASSERT_TRUE(node_map.NodeExists("4"));
ASSERT_TRUE(node_map.NodeExists("5"));
ASSERT_FALSE(node_map.NodeExists("6"));
}
TEST_F(TransitiveFaninTest, InvalidGraphOrTerminalNodes) {
GraphDef graph = CreateGraph({
{"1", {"2"}},
{"2", {"3"}},
{"3", {"4"}},
{"4", {}},
{"5", {"6"}},
{"7", {"8"}}
});
GraphDef output_graph;
const std::vector<string> terminal_nodes = {"1", "5"};
auto s = SetTransitiveFaninGraph(graph, &output_graph, terminal_nodes);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Graph does not contain input 6 of node 5.");
const std::vector<string> invalid_terminal_nodes = {"0", "1", "5"};
s = SetTransitiveFaninGraph(graph, &output_graph, invalid_terminal_nodes);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Graph does not contain terminal node 0.");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/transitive_fanin.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/utils/transitive_fanin_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
17ebd479-a5a2-4872-9fc6-00ddd8f99eb1 | cpp | google/arolla | algorithms | arolla/util/algorithms.cc | arolla/util/algorithms_test.cc | #include "arolla/util/algorithms.h"
#include <array>
#include <cstdint>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace arolla {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
TEST(Algorithms, ExponentialLowerBound) {
std::vector<int> v{2, 4, 5, 6};
auto i1 = exp_lower_bound(v.begin(), v.end(), 4);
EXPECT_THAT(i1, Eq(v.begin() + 1));
}
TEST(Algorithms, InplaceLogicalAndWithOffsets) {
const std::array<uint32_t, 3> a = {0xf0ff0000, 0xff0fffff, 0x0000fff0};
int a_bit_offset = 16;
const std::array<uint32_t, 2> b = {0x87654321, 0x0fedcba9};
int b_bit_offset = 0;
const std::array<uint32_t, 3> c = {0x43210000, 0xcba98765, 0x00000fed};
int c_bit_offset = 16;
auto a_copy = a;
InplaceLogicalAndWithOffsets(64, b.data(), b_bit_offset, a_copy.data(),
a_bit_offset);
EXPECT_THAT(a_copy, ElementsAre(0x40210000, 0xcb098765, 0x00000fe0));
auto b_copy = b;
InplaceLogicalAndWithOffsets(64, a.data(), a_bit_offset, b_copy.data(),
b_bit_offset);
EXPECT_THAT(b_copy, ElementsAre(0x87654021, 0x0fe0cb09));
auto c_copy = c;
InplaceLogicalAndWithOffsets(64, a.data(), a_bit_offset, c_copy.data(),
c_bit_offset);
EXPECT_THAT(a_copy, ElementsAre(0x40210000, 0xcb098765, 0x00000fe0));
}
TEST(Algorithms, CopyBits) {
const std::array<uint32_t, 3> src = {0x3210dead, 0xba987654, 0xbeeffedc};
const std::array<uint32_t, 3> empty = {0x5a5a5a5a, 0x5a5a5a5a, 0x5a5a5a5a};
auto dest1 = empty;
CopyBits(64, src.data(), 16, dest1.data(), 16);
EXPECT_THAT(dest1, ElementsAre(0x32105a5a, 0xba987654, 0x5a5afedc));
auto dest2 = empty;
CopyBits(64, src.data(), 16, dest2.data(), 8);
EXPECT_THAT(dest2, ElementsAre(0x5432105a, 0xdcba9876, 0x5a5a5afe));
auto dest3 = empty;
CopyBits(64, src.data(), 16, dest3.data(), 24);
EXPECT_THAT(dest3, ElementsAre(0x105a5a5a, 0x98765432, 0x5afedcba));
uint32_t dest4 = 0xffffffff;
CopyBits(16, src.data(), 16, &dest4, 8);
EXPECT_THAT(dest4, Eq(0xff3210ff));
uint32_t src5 = 0xdcba;
std::array<uint32_t, 2> dest5 = {0xffffffff, 0xffffffff};
CopyBits(16, &src5, 0, dest5.data(), 24);
EXPECT_THAT(dest5, ElementsAre(0xbaffffff, 0xffffffdc));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/algorithms.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/algorithms_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
|
d5aedaf9-d758-4aa4-98f7-1f005b8f5a21 | cpp | google/quiche | priority_write_scheduler | quiche/http2/core/priority_write_scheduler.h | quiche/http2/core/priority_write_scheduler_test.cc | #ifndef QUICHE_HTTP2_CORE_PRIORITY_WRITE_SCHEDULER_H_
#define QUICHE_HTTP2_CORE_PRIORITY_WRITE_SCHEDULER_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_export.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/quiche_circular_deque.h"
namespace http2 {
namespace test {
template <typename StreamIdType>
class PriorityWriteSchedulerPeer;
}
struct QUICHE_EXPORT SpdyPriorityToSpdyPriority {
spdy::SpdyPriority operator()(spdy::SpdyPriority priority) {
return priority;
}
};
template <typename StreamIdType, typename PriorityType = spdy::SpdyPriority,
typename PriorityTypeToInt = SpdyPriorityToSpdyPriority,
typename IntToPriorityType = SpdyPriorityToSpdyPriority>
class QUICHE_EXPORT PriorityWriteScheduler {
public:
static constexpr int kHighestPriority = 0;
static constexpr int kLowestPriority = 7;
static_assert(spdy::kV3HighestPriority == kHighestPriority);
static_assert(spdy::kV3LowestPriority == kLowestPriority);
void RegisterStream(StreamIdType stream_id, PriorityType priority) {
auto stream_info = std::make_unique<StreamInfo>(
StreamInfo{std::move(priority), stream_id, false});
bool inserted =
stream_infos_.insert(std::make_pair(stream_id, std::move(stream_info)))
.second;
QUICHE_BUG_IF(spdy_bug_19_2, !inserted)
<< "Stream " << stream_id << " already registered";
}
void UnregisterStream(StreamIdType stream_id) {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_BUG(spdy_bug_19_3) << "Stream " << stream_id << " not registered";
return;
}
const StreamInfo* const stream_info = it->second.get();
if (stream_info->ready) {
bool erased =
Erase(&priority_infos_[PriorityTypeToInt()(stream_info->priority)]
.ready_list,
stream_info);
QUICHE_DCHECK(erased);
}
stream_infos_.erase(it);
}
bool StreamRegistered(StreamIdType stream_id) const {
return stream_infos_.find(stream_id) != stream_infos_.end();
}
PriorityType GetStreamPriority(StreamIdType stream_id) const {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_DVLOG(1) << "Stream " << stream_id << " not registered";
return IntToPriorityType()(kLowestPriority);
}
return it->second->priority;
}
void UpdateStreamPriority(StreamIdType stream_id, PriorityType priority) {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_DVLOG(1) << "Stream " << stream_id << " not registered";
return;
}
StreamInfo* const stream_info = it->second.get();
if (stream_info->priority == priority) {
return;
}
if (PriorityTypeToInt()(stream_info->priority) !=
PriorityTypeToInt()(priority) &&
stream_info->ready) {
bool erased =
Erase(&priority_infos_[PriorityTypeToInt()(stream_info->priority)]
.ready_list,
stream_info);
QUICHE_DCHECK(erased);
priority_infos_[PriorityTypeToInt()(priority)].ready_list.push_back(
stream_info);
++num_ready_streams_;
}
stream_info->priority = std::move(priority);
}
void RecordStreamEventTime(StreamIdType stream_id, absl::Time now) {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_BUG(spdy_bug_19_4) << "Stream " << stream_id << " not registered";
return;
}
PriorityInfo& priority_info =
priority_infos_[PriorityTypeToInt()(it->second->priority)];
priority_info.last_event_time =
std::max(priority_info.last_event_time, absl::make_optional(now));
}
std::optional<absl::Time> GetLatestEventWithPriority(
StreamIdType stream_id) const {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_BUG(spdy_bug_19_5) << "Stream " << stream_id << " not registered";
return std::nullopt;
}
std::optional<absl::Time> last_event_time;
const StreamInfo* const stream_info = it->second.get();
for (int p = kHighestPriority;
p < PriorityTypeToInt()(stream_info->priority); ++p) {
last_event_time =
std::max(last_event_time, priority_infos_[p].last_event_time);
}
return last_event_time;
}
StreamIdType PopNextReadyStream() {
return std::get<0>(PopNextReadyStreamAndPriority());
}
std::tuple<StreamIdType, PriorityType> PopNextReadyStreamAndPriority() {
for (int p = kHighestPriority; p <= kLowestPriority; ++p) {
ReadyList& ready_list = priority_infos_[p].ready_list;
if (!ready_list.empty()) {
StreamInfo* const info = ready_list.front();
ready_list.pop_front();
--num_ready_streams_;
QUICHE_DCHECK(stream_infos_.find(info->stream_id) !=
stream_infos_.end());
info->ready = false;
return std::make_tuple(info->stream_id, info->priority);
}
}
QUICHE_BUG(spdy_bug_19_6) << "No ready streams available";
return std::make_tuple(0, IntToPriorityType()(kLowestPriority));
}
bool ShouldYield(StreamIdType stream_id) const {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_BUG(spdy_bug_19_7) << "Stream " << stream_id << " not registered";
return false;
}
const StreamInfo* const stream_info = it->second.get();
for (int p = kHighestPriority;
p < PriorityTypeToInt()(stream_info->priority); ++p) {
if (!priority_infos_[p].ready_list.empty()) {
return true;
}
}
const auto& ready_list =
priority_infos_[PriorityTypeToInt()(it->second->priority)].ready_list;
if (ready_list.empty() || ready_list.front()->stream_id == stream_id) {
return false;
}
return true;
}
void MarkStreamReady(StreamIdType stream_id, bool add_to_front) {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_BUG(spdy_bug_19_8) << "Stream " << stream_id << " not registered";
return;
}
StreamInfo* const stream_info = it->second.get();
if (stream_info->ready) {
return;
}
ReadyList& ready_list =
priority_infos_[PriorityTypeToInt()(stream_info->priority)].ready_list;
if (add_to_front) {
ready_list.push_front(stream_info);
} else {
ready_list.push_back(stream_info);
}
++num_ready_streams_;
stream_info->ready = true;
}
void MarkStreamNotReady(StreamIdType stream_id) {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_BUG(spdy_bug_19_9) << "Stream " << stream_id << " not registered";
return;
}
StreamInfo* const stream_info = it->second.get();
if (!stream_info->ready) {
return;
}
bool erased = Erase(
&priority_infos_[PriorityTypeToInt()(stream_info->priority)].ready_list,
stream_info);
QUICHE_DCHECK(erased);
stream_info->ready = false;
}
bool HasReadyStreams() const { return num_ready_streams_ > 0; }
size_t NumReadyStreams() const { return num_ready_streams_; }
size_t NumRegisteredStreams() const { return stream_infos_.size(); }
std::string DebugString() const {
return absl::StrCat(
"PriorityWriteScheduler {num_streams=", stream_infos_.size(),
" num_ready_streams=", NumReadyStreams(), "}");
}
bool IsStreamReady(StreamIdType stream_id) const {
auto it = stream_infos_.find(stream_id);
if (it == stream_infos_.end()) {
QUICHE_DLOG(INFO) << "Stream " << stream_id << " not registered";
return false;
}
return it->second->ready;
}
private:
friend class test::PriorityWriteSchedulerPeer<StreamIdType>;
struct QUICHE_EXPORT StreamInfo {
PriorityType priority;
StreamIdType stream_id;
bool ready;
};
using ReadyList = quiche::QuicheCircularDeque<StreamInfo*>;
struct QUICHE_EXPORT PriorityInfo {
ReadyList ready_list;
std::optional<absl::Time> last_event_time;
};
using StreamInfoMap =
absl::flat_hash_map<StreamIdType, std::unique_ptr<StreamInfo>>;
bool Erase(ReadyList* ready_list, const StreamInfo* info) {
auto it = std::remove(ready_list->begin(), ready_list->end(), info);
if (it == ready_list->end()) {
return false;
}
ready_list->pop_back();
--num_ready_streams_;
return true;
}
size_t num_ready_streams_ = 0;
PriorityInfo priority_infos_[kLowestPriority + 1];
StreamInfoMap stream_infos_;
};
}
#endif | #include "quiche/http2/core/priority_write_scheduler.h"
#include "quiche/http2/core/spdy_protocol.h"
#include "quiche/http2/test_tools/spdy_test_utils.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
using ::spdy::SpdyPriority;
using ::spdy::SpdyStreamId;
using ::testing::Eq;
using ::testing::Optional;
template <typename StreamIdType>
class PriorityWriteSchedulerPeer {
public:
explicit PriorityWriteSchedulerPeer(
PriorityWriteScheduler<StreamIdType>* scheduler)
: scheduler_(scheduler) {}
size_t NumReadyStreams(SpdyPriority priority) const {
return scheduler_->priority_infos_[priority].ready_list.size();
}
private:
PriorityWriteScheduler<StreamIdType>* scheduler_;
};
namespace {
class PriorityWriteSchedulerTest : public quiche::test::QuicheTest {
public:
static constexpr int kLowestPriority =
PriorityWriteScheduler<SpdyStreamId>::kLowestPriority;
PriorityWriteSchedulerTest() : peer_(&scheduler_) {}
PriorityWriteScheduler<SpdyStreamId> scheduler_;
PriorityWriteSchedulerPeer<SpdyStreamId> peer_;
};
TEST_F(PriorityWriteSchedulerTest, RegisterUnregisterStreams) {
EXPECT_FALSE(scheduler_.HasReadyStreams());
EXPECT_FALSE(scheduler_.StreamRegistered(1));
EXPECT_EQ(0u, scheduler_.NumRegisteredStreams());
scheduler_.RegisterStream(1, 1);
EXPECT_TRUE(scheduler_.StreamRegistered(1));
EXPECT_EQ(1u, scheduler_.NumRegisteredStreams());
EXPECT_QUICHE_BUG(scheduler_.RegisterStream(1, 1),
"Stream 1 already registered");
EXPECT_EQ(1u, scheduler_.NumRegisteredStreams());
EXPECT_QUICHE_BUG(scheduler_.RegisterStream(1, 2),
"Stream 1 already registered");
EXPECT_EQ(1u, scheduler_.NumRegisteredStreams());
scheduler_.RegisterStream(2, 3);
EXPECT_EQ(2u, scheduler_.NumRegisteredStreams());
EXPECT_FALSE(scheduler_.HasReadyStreams());
scheduler_.UnregisterStream(1);
EXPECT_EQ(1u, scheduler_.NumRegisteredStreams());
scheduler_.UnregisterStream(2);
EXPECT_EQ(0u, scheduler_.NumRegisteredStreams());
EXPECT_QUICHE_BUG(scheduler_.UnregisterStream(1), "Stream 1 not registered");
EXPECT_QUICHE_BUG(scheduler_.UnregisterStream(2), "Stream 2 not registered");
EXPECT_EQ(0u, scheduler_.NumRegisteredStreams());
}
TEST_F(PriorityWriteSchedulerTest, GetStreamPriority) {
EXPECT_EQ(kLowestPriority, scheduler_.GetStreamPriority(1));
scheduler_.RegisterStream(1, 3);
EXPECT_EQ(3, scheduler_.GetStreamPriority(1));
EXPECT_QUICHE_BUG(scheduler_.RegisterStream(1, 4),
"Stream 1 already registered");
EXPECT_EQ(3, scheduler_.GetStreamPriority(1));
scheduler_.UpdateStreamPriority(1, 5);
EXPECT_EQ(5, scheduler_.GetStreamPriority(1));
scheduler_.MarkStreamReady(1, true);
EXPECT_EQ(5, scheduler_.GetStreamPriority(1));
EXPECT_EQ(1u, peer_.NumReadyStreams(5));
scheduler_.UpdateStreamPriority(1, 6);
EXPECT_EQ(6, scheduler_.GetStreamPriority(1));
EXPECT_EQ(0u, peer_.NumReadyStreams(5));
EXPECT_EQ(1u, peer_.NumReadyStreams(6));
EXPECT_EQ(1u, scheduler_.PopNextReadyStream());
EXPECT_EQ(6, scheduler_.GetStreamPriority(1));
scheduler_.UnregisterStream(1);
EXPECT_EQ(kLowestPriority, scheduler_.GetStreamPriority(1));
}
TEST_F(PriorityWriteSchedulerTest, PopNextReadyStreamAndPriority) {
scheduler_.RegisterStream(1, 3);
scheduler_.MarkStreamReady(1, true);
EXPECT_EQ(std::make_tuple(1u, 3), scheduler_.PopNextReadyStreamAndPriority());
scheduler_.UnregisterStream(1);
}
TEST_F(PriorityWriteSchedulerTest, UpdateStreamPriority) {
EXPECT_EQ(kLowestPriority, scheduler_.GetStreamPriority(3));
EXPECT_FALSE(scheduler_.StreamRegistered(3));
scheduler_.UpdateStreamPriority(3, 1);
EXPECT_FALSE(scheduler_.StreamRegistered(3));
EXPECT_EQ(kLowestPriority, scheduler_.GetStreamPriority(3));
scheduler_.RegisterStream(3, 1);
EXPECT_EQ(1, scheduler_.GetStreamPriority(3));
scheduler_.UpdateStreamPriority(3, 2);
EXPECT_EQ(2, scheduler_.GetStreamPriority(3));
scheduler_.UpdateStreamPriority(3, 2);
EXPECT_EQ(2, scheduler_.GetStreamPriority(3));
scheduler_.RegisterStream(4, 1);
scheduler_.MarkStreamReady(3, false);
EXPECT_TRUE(scheduler_.IsStreamReady(3));
scheduler_.MarkStreamReady(4, false);
EXPECT_TRUE(scheduler_.IsStreamReady(4));
EXPECT_EQ(4u, scheduler_.PopNextReadyStream());
EXPECT_FALSE(scheduler_.IsStreamReady(4));
EXPECT_EQ(3u, scheduler_.PopNextReadyStream());
EXPECT_FALSE(scheduler_.IsStreamReady(3));
scheduler_.MarkStreamReady(3, false);
scheduler_.MarkStreamReady(4, false);
scheduler_.UpdateStreamPriority(4, 3);
EXPECT_EQ(3u, scheduler_.PopNextReadyStream());
EXPECT_EQ(4u, scheduler_.PopNextReadyStream());
scheduler_.UnregisterStream(3);
}
TEST_F(PriorityWriteSchedulerTest, MarkStreamReadyBack) {
EXPECT_FALSE(scheduler_.HasReadyStreams());
EXPECT_QUICHE_BUG(scheduler_.MarkStreamReady(1, false),
"Stream 1 not registered");
EXPECT_FALSE(scheduler_.HasReadyStreams());
EXPECT_QUICHE_BUG(EXPECT_EQ(0u, scheduler_.PopNextReadyStream()),
"No ready streams available");
scheduler_.RegisterStream(1, 3);
scheduler_.MarkStreamReady(1, false);
EXPECT_TRUE(scheduler_.HasReadyStreams());
scheduler_.RegisterStream(2, 3);
scheduler_.MarkStreamReady(2, false);
scheduler_.RegisterStream(3, 3);
scheduler_.MarkStreamReady(3, false);
scheduler_.RegisterStream(4, 2);
scheduler_.MarkStreamReady(4, false);
scheduler_.RegisterStream(5, 5);
scheduler_.MarkStreamReady(5, false);
EXPECT_EQ(4u, scheduler_.PopNextReadyStream());
EXPECT_EQ(1u, scheduler_.PopNextReadyStream());
EXPECT_EQ(2u, scheduler_.PopNextReadyStream());
EXPECT_EQ(3u, scheduler_.PopNextReadyStream());
EXPECT_EQ(5u, scheduler_.PopNextReadyStream());
EXPECT_QUICHE_BUG(EXPECT_EQ(0u, scheduler_.PopNextReadyStream()),
"No ready streams available");
}
TEST_F(PriorityWriteSchedulerTest, MarkStreamReadyFront) {
EXPECT_FALSE(scheduler_.HasReadyStreams());
EXPECT_QUICHE_BUG(scheduler_.MarkStreamReady(1, true),
"Stream 1 not registered");
EXPECT_FALSE(scheduler_.HasReadyStreams());
EXPECT_QUICHE_BUG(EXPECT_EQ(0u, scheduler_.PopNextReadyStream()),
"No ready streams available");
scheduler_.RegisterStream(1, 3);
scheduler_.MarkStreamReady(1, true);
EXPECT_TRUE(scheduler_.HasReadyStreams());
scheduler_.RegisterStream(2, 3);
scheduler_.MarkStreamReady(2, true);
scheduler_.RegisterStream(3, 3);
scheduler_.MarkStreamReady(3, true);
scheduler_.RegisterStream(4, 2);
scheduler_.MarkStreamReady(4, true);
scheduler_.RegisterStream(5, 5);
scheduler_.MarkStreamReady(5, true);
EXPECT_EQ(4u, scheduler_.PopNextReadyStream());
EXPECT_EQ(3u, scheduler_.PopNextReadyStream());
EXPECT_EQ(2u, scheduler_.PopNextReadyStream());
EXPECT_EQ(1u, scheduler_.PopNextReadyStream());
EXPECT_EQ(5u, scheduler_.PopNextReadyStream());
EXPECT_QUICHE_BUG(EXPECT_EQ(0u, scheduler_.PopNextReadyStream()),
"No ready streams available");
}
TEST_F(PriorityWriteSchedulerTest, MarkStreamReadyBackAndFront) {
scheduler_.RegisterStream(1, 4);
scheduler_.RegisterStream(2, 3);
scheduler_.RegisterStream(3, 3);
scheduler_.RegisterStream(4, 3);
scheduler_.RegisterStream(5, 4);
scheduler_.RegisterStream(6, 1);
scheduler_.MarkStreamReady(1, true);
scheduler_.MarkStreamReady(2, true);
scheduler_.MarkStreamReady(3, false);
scheduler_.MarkStreamReady(4, true);
scheduler_.MarkStreamReady(5, false);
scheduler_.MarkStreamReady(6, true);
EXPECT_EQ(6u, scheduler_.PopNextReadyStream());
EXPECT_EQ(4u, scheduler_.PopNextReadyStream());
EXPECT_EQ(2u, scheduler_.PopNextReadyStream());
EXPECT_EQ(3u, scheduler_.PopNextReadyStream());
EXPECT_EQ(1u, scheduler_.PopNextReadyStream());
EXPECT_EQ(5u, scheduler_.PopNextReadyStream());
EXPECT_QUICHE_BUG(EXPECT_EQ(0u, scheduler_.PopNextReadyStream()),
"No ready streams available");
}
TEST_F(PriorityWriteSchedulerTest, MarkStreamNotReady) {
scheduler_.RegisterStream(1, 1);
EXPECT_EQ(0u, scheduler_.NumReadyStreams());
scheduler_.MarkStreamReady(1, false);
EXPECT_EQ(1u, scheduler_.NumReadyStreams());
scheduler_.MarkStreamNotReady(1);
EXPECT_EQ(0u, scheduler_.NumReadyStreams());
EXPECT_QUICHE_BUG(EXPECT_EQ(0u, scheduler_.PopNextReadyStream()),
"No ready streams available");
scheduler_.MarkStreamNotReady(1);
EXPECT_EQ(0u, scheduler_.NumReadyStreams());
EXPECT_QUICHE_BUG(scheduler_.MarkStreamNotReady(3),
"Stream 3 not registered");
}
TEST_F(PriorityWriteSchedulerTest, UnregisterRemovesStream) {
scheduler_.RegisterStream(3, 4);
scheduler_.MarkStreamReady(3, false);
EXPECT_EQ(1u, scheduler_.NumReadyStreams());
scheduler_.UnregisterStream(3);
EXPECT_EQ(0u, scheduler_.NumReadyStreams());
EXPECT_QUICHE_BUG(EXPECT_EQ(0u, scheduler_.PopNextReadyStream()),
"No ready streams available");
}
TEST_F(PriorityWriteSchedulerTest, ShouldYield) {
scheduler_.RegisterStream(1, 1);
scheduler_.RegisterStream(4, 4);
scheduler_.RegisterStream(5, 4);
scheduler_.RegisterStream(7, 7);
EXPECT_FALSE(scheduler_.ShouldYield(1));
scheduler_.MarkStreamReady(4, false);
EXPECT_FALSE(scheduler_.ShouldYield(4));
EXPECT_TRUE(scheduler_.ShouldYield(7));
EXPECT_TRUE(scheduler_.ShouldYield(5));
EXPECT_FALSE(scheduler_.ShouldYield(1));
scheduler_.MarkStreamReady(5, false);
EXPECT_FALSE(scheduler_.ShouldYield(4));
EXPECT_TRUE(scheduler_.ShouldYield(5));
}
TEST_F(PriorityWriteSchedulerTest, GetLatestEventWithPriority) {
EXPECT_QUICHE_BUG(
scheduler_.RecordStreamEventTime(3, absl::FromUnixMicros(5)),
"Stream 3 not registered");
EXPECT_QUICHE_BUG(
EXPECT_FALSE(scheduler_.GetLatestEventWithPriority(4).has_value()),
"Stream 4 not registered");
for (int i = 1; i < 5; ++i) {
scheduler_.RegisterStream(i, i);
}
for (int i = 1; i < 5; ++i) {
EXPECT_FALSE(scheduler_.GetLatestEventWithPriority(i).has_value());
}
for (int i = 1; i < 5; ++i) {
scheduler_.RecordStreamEventTime(i, absl::FromUnixMicros(i * 100));
}
EXPECT_FALSE(scheduler_.GetLatestEventWithPriority(1).has_value());
for (int i = 2; i < 5; ++i) {
EXPECT_THAT(scheduler_.GetLatestEventWithPriority(i),
Optional(Eq(absl::FromUnixMicros((i - 1) * 100))));
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/priority_write_scheduler.h | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/priority_write_scheduler_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
889988ce-fdc3-4f01-8bf7-b5a597041eae | cpp | tensorflow/tensorflow | mlir_dump | tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.cc | tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump_test.cc | #include "tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/log.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/file_system.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/stringpiece.h"
namespace tensorflow {
namespace quantization {
namespace {
absl::StatusOr<std::string> GetMlirDumpDir() {
auto dump_dir = std::string(
absl::NullSafeStringView(std::getenv("TF_QUANT_MLIR_DUMP_PREFIX")));
if (dump_dir.empty()) {
return absl::FailedPreconditionError(
"Environment variable not set: TF_QUANT_MLIR_DUMP_PREFIX, "
"IR dump file for TF quantization is not created.");
}
if (absl::EqualsIgnoreCase(dump_dir, "sponge")) {
if (!tsl::io::GetTestUndeclaredOutputsDir(&dump_dir)) {
return absl::FailedPreconditionError(
"Environment variable TF_QUANT_MLIR_DUMP_PREFIX=sponge but "
"TEST_UNDECLARED_OUTPUT_DIRS not set.");
}
}
return dump_dir;
}
class WritableFileWrapper : public llvm::raw_ostream {
public:
~WritableFileWrapper() override { flush(); }
static absl::StatusOr<std::unique_ptr<WritableFileWrapper>> Create(
const std::string& filepath) {
std::unique_ptr<tsl::WritableFile> file;
TF_RETURN_IF_ERROR(tsl::Env::Default()->NewWritableFile(filepath, &file));
return absl::WrapUnique(new WritableFileWrapper(std::move(file)));
}
private:
explicit WritableFileWrapper(std::unique_ptr<tsl::WritableFile> file)
: file_(std::move(file)) {
SetBuffered();
}
uint64_t current_pos() const override {
int64_t position;
if (file_->Tell(&position).ok()) {
return position;
} else {
return -1;
}
}
void write_impl(const char* ptr, size_t size) override {
if (file_ && !file_->Append(absl::string_view(ptr, size)).ok()) {
file_ = nullptr;
}
}
std::unique_ptr<tsl::WritableFile> file_;
};
absl::StatusOr<std::unique_ptr<llvm::raw_ostream>> CreateMlirDumpFile(
const absl::string_view dump_file_name) {
const absl::StatusOr<std::string> dump_dir = GetMlirDumpDir();
if (!dump_dir.ok()) {
return dump_dir.status();
}
auto* env = tsl::Env::Default();
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(*dump_dir));
const std::string dump_file_path =
tsl::io::JoinPath(*dump_dir, dump_file_name);
TF_ASSIGN_OR_RETURN(std::unique_ptr<llvm::raw_ostream> file,
WritableFileWrapper::Create(dump_file_path));
LOG(INFO) << "IR dump file created: " << dump_file_path;
return file;
}
class PrinterConfig : public mlir::PassManager::IRPrinterConfig {
public:
explicit PrinterConfig(
absl::string_view dump_file_prefix, bool print_module_scope = false,
bool print_after_only_on_change = true,
mlir::OpPrintingFlags op_printing_flags = mlir::OpPrintingFlags())
: mlir::PassManager::IRPrinterConfig(
print_module_scope, print_after_only_on_change,
false, op_printing_flags),
mlir_pass_count_(1),
dump_file_prefix_(dump_file_prefix) {}
void printBeforeIfEnabled(mlir::Pass* pass, mlir::Operation* op,
PrintCallbackFn print_callback) override {
Dump(pass, print_callback, true);
}
void printAfterIfEnabled(mlir::Pass* pass, mlir::Operation* op,
PrintCallbackFn print_callback) override {
Dump(pass, print_callback, false);
}
private:
int64_t mlir_pass_count_;
absl::string_view dump_file_prefix_;
llvm::DenseMap<mlir::Pass*, std::unique_ptr<llvm::raw_ostream>>
pass_to_dump_file_before_map_;
llvm::DenseMap<mlir::Pass*, std::unique_ptr<llvm::raw_ostream>>
pass_to_dump_file_after_map_;
llvm::DenseMap<mlir::Pass*, int64_t> pass_to_number_map_;
int64_t GetPassNumber(mlir::Pass* pass) {
if (!pass_to_number_map_.contains(pass)) {
pass_to_number_map_[pass] = mlir_pass_count_++;
}
return pass_to_number_map_[pass];
}
void Dump(mlir::Pass* pass, PrintCallbackFn print_callback, bool is_before) {
auto& pass_to_dump_file_map = is_before ? pass_to_dump_file_before_map_
: pass_to_dump_file_after_map_;
if (!pass_to_dump_file_map.contains(pass)) {
std::string filename = llvm::formatv(
"{0}_{1,0+4}_{2}_{3}.mlir", dump_file_prefix_, GetPassNumber(pass),
pass->getName().str(), is_before ? "before" : "after");
absl::StatusOr<std::unique_ptr<llvm::raw_ostream>> dump_file =
CreateMlirDumpFile(filename);
if (!dump_file.ok()) {
LOG(WARNING) << "Failed to dump MLIR module to " << filename;
return;
}
pass_to_dump_file_map[pass] = std::move(*dump_file);
}
return print_callback(*(pass_to_dump_file_map[pass]));
}
};
}
void EnableIrPrinting(mlir::PassManager& pm,
absl::string_view file_name_prefix) {
mlir::OpPrintingFlags flag{};
flag.useLocalScope().elideLargeElementsAttrs().enableDebugInfo();
if (pm.getContext()->isMultithreadingEnabled()) {
pm.getContext()->disableMultithreading();
}
pm.enableIRPrinting(std::make_unique<PrinterConfig>(
file_name_prefix, false,
true, flag));
}
absl::Status MaybeEnableIrPrinting(mlir::PassManager& pm,
absl::string_view file_name_prefix) {
if (!VLOG_IS_ON(1)) {
LOG(INFO) << "Verbosity level too low to enable IR printing.";
return absl::OkStatus();
}
EnableIrPrinting(pm, file_name_prefix);
LOG(INFO) << "IR dump for TensorFlow quantization pipeline enabled.";
return absl::OkStatus();
}
}
} | #include "tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.h"
#include <memory>
#include <string>
#include <vector>
#include "absl/cleanup/cleanup.h"
#include "absl/strings/string_view.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/LogicalResult.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinDialect.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Support/TypeID.h"
#include "mlir/Transforms/Passes.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/path.h"
#include "tsl/platform/test.h"
namespace tensorflow {
namespace quantization {
namespace mlir_dump_test {
class NoOpPass
: public mlir::PassWrapper<NoOpPass, mlir::OperationPass<mlir::ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(NoOpPass)
NoOpPass() = default;
llvm::StringRef getArgument() const final { return "no-op-pass"; }
void runOnOperation() override {
}
};
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateNoOpPass() {
return std::make_unique<NoOpPass>();
}
class ParentPass
: public mlir::PassWrapper<ParentPass,
mlir::OperationPass<mlir::ModuleOp>> {
public:
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ParentPass)
ParentPass() = default;
llvm::StringRef getArgument() const final { return "parent-pass"; }
void runOnOperation() override {
mlir::MLIRContext* ctx = &getContext();
mlir::ModuleOp module_op = getOperation();
mlir::PassManager pm(ctx);
pm.addPass(CreateNoOpPass());
EnableIrPrinting(pm, "dump2");
if (failed(pm.run(module_op))) {
signalPassFailure();
}
}
};
std::unique_ptr<mlir::OperationPass<mlir::ModuleOp>> CreateParentPass() {
return std::make_unique<ParentPass>();
}
}
namespace {
using namespace tensorflow::quantization::mlir_dump_test;
class EnableIrPrintingTest : public ::testing::Test {
protected:
EnableIrPrintingTest() : env_(tsl::Env::Default()) {
if (!tsl::io::GetTestUndeclaredOutputsDir(&test_dir_)) {
test_dir_ = tsl::testing::TmpDir();
}
}
void SetUp() override {
tsl::setenv("TF_QUANT_MLIR_DUMP_PREFIX", test_dir_.c_str(), 1);
mlir::DialectRegistry dialects;
dialects.insert<mlir::BuiltinDialect, mlir::func::FuncDialect,
mlir::stablehlo::StablehloDialect>();
ctx_ = std::make_unique<mlir::MLIRContext>(dialects);
ctx_->loadAllAvailableDialects();
}
void TearDown() override {
std::vector<std::string> files;
TF_ASSERT_OK(
env_->GetMatchingPaths(tsl::io::JoinPath(test_dir_, "*"), &files));
for (const std::string& file : files) {
TF_ASSERT_OK(env_->DeleteFile(file));
}
}
tsl::Env* env_;
std::string test_dir_;
std::unique_ptr<mlir::MLIRContext> ctx_;
};
TEST_F(EnableIrPrintingTest, PassSuccessfullyRuns) {
mlir::PassManager pm = {ctx_.get()};
pm.addPass(CreateNoOpPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
EnableIrPrinting(pm, "dump");
constexpr absl::string_view program = R"mlir(
module{
func.func @main(%arg0: tensor<10xf32>) -> tensor<10xf32> {
return %arg0 : tensor<10xf32>
}
func.func @func1(%arg0: tensor<10xf32>, %arg1: tensor<10xf32>) -> tensor<10xf32> {
%0 = stablehlo.add %arg0, %arg1 : tensor<10xf32>
%1 = stablehlo.add %arg0, %arg1 : tensor<10xf32>
return %0 : tensor<10xf32>
}
})mlir";
auto module_op = mlir::parseSourceString<mlir::ModuleOp>(program, ctx_.get());
const mlir::LogicalResult result = pm.run(module_op.get());
EXPECT_FALSE(failed(result));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump_0001_tensorflow::quantization::mlir_dump_test"
"::NoOpPass_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0002_Canonicalizer_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0002_Canonicalizer_after.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_, "dump_0003_Canonicalizer_before.mlir")));
}
TEST_F(EnableIrPrintingTest, NestedPassSuccessfullyRuns) {
mlir::MLIRContext ctx{};
mlir::PassManager pm = {&ctx};
pm.addPass(CreateParentPass());
EnableIrPrinting(pm, "dump");
mlir::OpBuilder builder(&ctx);
auto module_op = builder.create<mlir::ModuleOp>(builder.getUnknownLoc());
const absl::Cleanup module_op_cleanup = [module_op] { module_op->destroy(); };
const mlir::LogicalResult result = pm.run(module_op);
EXPECT_FALSE(failed(result));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump_0001_tensorflow::quantization::mlir_dump_test"
"::ParentPass_before.mlir")));
TF_EXPECT_OK(tsl::Env::Default()->FileExists(
tsl::io::JoinPath(test_dir_,
"dump2_0001_tensorflow::quantization::mlir_dump_test"
"::NoOpPass_before.mlir")));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/tensorflow/debugging/mlir_dump_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7cb8f19e-77cd-4976-a55e-25bc52421108 | cpp | google/cel-cpp | parsed_map_field_value | common/values/parsed_map_field_value.cc | common/values/parsed_map_field_value_test.cc | #include "common/values/parsed_map_field_value.h"
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include "absl/base/nullability.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/log/die_if_null.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/status_macros.h"
#include "google/protobuf/message.h"
namespace cel {
std::string ParsedMapFieldValue::DebugString() const {
if (ABSL_PREDICT_FALSE(field_ == nullptr)) {
return "INVALID";
}
return "VALID";
}
absl::Status ParsedMapFieldValue::SerializeTo(AnyToJsonConverter& converter,
absl::Cord& value) const {
return absl::UnimplementedError("SerializeTo is not yet implemented");
}
absl::StatusOr<Json> ParsedMapFieldValue::ConvertToJson(
AnyToJsonConverter& converter) const {
return absl::UnimplementedError("ConvertToJson is not yet implemented");
}
absl::StatusOr<JsonObject> ParsedMapFieldValue::ConvertToJsonObject(
AnyToJsonConverter& converter) const {
return absl::UnimplementedError("ConvertToJsonObject is not yet implemented");
}
absl::Status ParsedMapFieldValue::Equal(ValueManager& value_manager,
const Value& other,
Value& result) const {
return absl::UnimplementedError("Equal is not yet implemented");
}
absl::StatusOr<Value> ParsedMapFieldValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
bool ParsedMapFieldValue::IsZeroValue() const { return IsEmpty(); }
bool ParsedMapFieldValue::IsEmpty() const { return Size() == 0; }
size_t ParsedMapFieldValue::Size() const {
ABSL_DCHECK(*this);
if (ABSL_PREDICT_FALSE(field_ == nullptr)) {
return 0;
}
return static_cast<size_t>(
GetReflectionOrDie()->FieldSize(*message_, field_));
}
absl::Status ParsedMapFieldValue::Get(ValueManager& value_manager,
const Value& key, Value& result) const {
return absl::UnimplementedError("Get is not yet implemented");
}
absl::StatusOr<Value> ParsedMapFieldValue::Get(ValueManager& value_manager,
const Value& key) const {
Value result;
CEL_RETURN_IF_ERROR(Get(value_manager, key, result));
return result;
}
absl::StatusOr<bool> ParsedMapFieldValue::Find(ValueManager& value_manager,
const Value& key,
Value& result) const {
return absl::UnimplementedError("Find is not yet implemented");
}
absl::StatusOr<std::pair<Value, bool>> ParsedMapFieldValue::Find(
ValueManager& value_manager, const Value& key) const {
Value result;
CEL_ASSIGN_OR_RETURN(auto found, Find(value_manager, key, result));
if (found) {
return std::pair{std::move(result), found};
}
return std::pair{NullValue(), found};
}
absl::Status ParsedMapFieldValue::Has(ValueManager& value_manager,
const Value& key, Value& result) const {
return absl::UnimplementedError("Has is not yet implemented");
}
absl::StatusOr<Value> ParsedMapFieldValue::Has(ValueManager& value_manager,
const Value& key) const {
Value result;
CEL_RETURN_IF_ERROR(Has(value_manager, key, result));
return result;
}
absl::Status ParsedMapFieldValue::ListKeys(ValueManager& value_manager,
ListValue& result) const {
return absl::UnimplementedError("ListKeys is not yet implemented");
}
absl::StatusOr<ListValue> ParsedMapFieldValue::ListKeys(
ValueManager& value_manager) const {
ListValue result;
CEL_RETURN_IF_ERROR(ListKeys(value_manager, result));
return result;
}
absl::Status ParsedMapFieldValue::ForEach(ValueManager& value_manager,
ForEachCallback callback) const {
return absl::UnimplementedError("ForEach is not yet implemented");
}
absl::StatusOr<absl::Nonnull<std::unique_ptr<ValueIterator>>>
ParsedMapFieldValue::NewIterator(ValueManager& value_manager) const {
return absl::UnimplementedError("NewIterator is not yet implemented");
}
absl::Nonnull<const google::protobuf::Reflection*>
ParsedMapFieldValue::GetReflectionOrDie() const {
return ABSL_DIE_IF_NULL(message_->GetReflection());
}
} | #include "absl/base/nullability.h"
#include "absl/log/die_if_null.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/allocator.h"
#include "common/memory.h"
#include "common/type.h"
#include "common/type_reflector.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "common/value_manager.h"
#include "internal/parse_text_proto.h"
#include "internal/testing.h"
#include "internal/testing_descriptor_pool.h"
#include "internal/testing_message_factory.h"
#include "proto/test/v1/proto3/test_all_types.pb.h"
#include "google/protobuf/arena.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel {
namespace {
using ::absl_testing::StatusIs;
using ::cel::internal::DynamicParseTextProto;
using ::cel::internal::GetTestingDescriptorPool;
using ::cel::internal::GetTestingMessageFactory;
using ::testing::_;
using ::testing::PrintToStringParamName;
using ::testing::TestWithParam;
using TestAllTypesProto3 = ::google::api::expr::test::v1::proto3::TestAllTypes;
class ParsedMapFieldValueTest : public TestWithParam<AllocatorKind> {
public:
void SetUp() override {
switch (GetParam()) {
case AllocatorKind::kArena:
arena_.emplace();
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::Pooling(arena()),
NewThreadCompatibleTypeReflector(MemoryManager::Pooling(arena())));
break;
case AllocatorKind::kNewDelete:
value_manager_ = NewThreadCompatibleValueManager(
MemoryManager::ReferenceCounting(),
NewThreadCompatibleTypeReflector(
MemoryManager::ReferenceCounting()));
break;
}
}
void TearDown() override {
value_manager_.reset();
arena_.reset();
}
Allocator<> allocator() {
return arena_ ? ArenaAllocator(&*arena_) : NewDeleteAllocator();
}
absl::Nullable<google::protobuf::Arena*> arena() { return allocator().arena(); }
absl::Nonnull<const google::protobuf::DescriptorPool*> descriptor_pool() {
return GetTestingDescriptorPool();
}
absl::Nonnull<google::protobuf::MessageFactory*> message_factory() {
return GetTestingMessageFactory();
}
ValueManager& value_manager() { return **value_manager_; }
private:
absl::optional<google::protobuf::Arena> arena_;
absl::optional<Shared<ValueManager>> value_manager_;
};
TEST_P(ParsedMapFieldValueTest, Default) {
ParsedMapFieldValue value;
EXPECT_FALSE(value);
}
TEST_P(ParsedMapFieldValueTest, Field) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_TRUE(value);
}
TEST_P(ParsedMapFieldValueTest, Kind) {
ParsedMapFieldValue value;
EXPECT_EQ(value.kind(), ParsedMapFieldValue::kKind);
EXPECT_EQ(value.kind(), ValueKind::kMap);
}
TEST_P(ParsedMapFieldValueTest, GetTypeName) {
ParsedMapFieldValue value;
EXPECT_EQ(value.GetTypeName(), ParsedMapFieldValue::kName);
EXPECT_EQ(value.GetTypeName(), "map");
}
TEST_P(ParsedMapFieldValueTest, GetRuntimeType) {
ParsedMapFieldValue value;
EXPECT_EQ(value.GetRuntimeType(), MapType());
}
TEST_P(ParsedMapFieldValueTest, DebugString) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.DebugString(), _);
}
TEST_P(ParsedMapFieldValueTest, IsZeroValue) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_TRUE(valid_value.IsZeroValue());
}
TEST_P(ParsedMapFieldValueTest, SerializeTo) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
absl::Cord serialized;
EXPECT_THAT(valid_value.SerializeTo(value_manager(), serialized),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMapFieldValueTest, ConvertToJson) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.ConvertToJson(value_manager()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMapFieldValueTest, Equal) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.Equal(value_manager(), BoolValue()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMapFieldValueTest, Empty) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_TRUE(valid_value.IsEmpty());
}
TEST_P(ParsedMapFieldValueTest, Size) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_EQ(valid_value.Size(), 0);
}
TEST_P(ParsedMapFieldValueTest, Get) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.Get(value_manager(), BoolValue()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMapFieldValueTest, Find) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.Find(value_manager(), BoolValue()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMapFieldValueTest, Has) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.Has(value_manager(), BoolValue()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMapFieldValueTest, ListKeys) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.ListKeys(value_manager()),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMapFieldValueTest, ForEach) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.ForEach(value_manager(),
[](const Value&, const Value&)
-> absl::StatusOr<bool> { return true; }),
StatusIs(absl::StatusCode::kUnimplemented));
}
TEST_P(ParsedMapFieldValueTest, NewIterator) {
auto message = DynamicParseTextProto<TestAllTypesProto3>(
allocator(), R"pb()pb", descriptor_pool(), message_factory());
ParsedMapFieldValue valid_value(
message, ABSL_DIE_IF_NULL(message->GetDescriptor()->FindFieldByName(
"map_int64_int64")));
EXPECT_THAT(valid_value.NewIterator(value_manager()),
StatusIs(absl::StatusCode::kUnimplemented));
}
INSTANTIATE_TEST_SUITE_P(ParsedMapFieldValueTest, ParsedMapFieldValueTest,
::testing::Values(AllocatorKind::kArena,
AllocatorKind::kNewDelete),
PrintToStringParamName());
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_map_field_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/parsed_map_field_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
457e74e8-b51e-4693-99c9-01e8f9492e91 | cpp | tensorflow/tensorflow | composite_tensor_variant | tensorflow/core/kernels/composite_tensor_variant.cc | tensorflow/core/kernels/composite_tensor_variant_test.cc | #include "tensorflow/core/kernels/composite_tensor_variant.h"
#include "tensorflow/core/framework/variant_op_registry.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/protobuf/composite_tensor_variant.pb.h"
#include "tensorflow/core/protobuf/struct.pb.h"
namespace tensorflow {
constexpr const char CompositeTensorVariant::kTypeName[];
CompositeTensorVariant::CompositeTensorVariant(
const CompositeTensorVariantMetadata& metadata,
absl::Span<Tensor> flat_components)
: flat_components_(flat_components.begin(), flat_components.end()),
metadata_(new CompositeTensorVariantMetadata()) {
*metadata_ = metadata;
}
CompositeTensorVariant::CompositeTensorVariant()
: metadata_(new CompositeTensorVariantMetadata()) {}
CompositeTensorVariant::CompositeTensorVariant(
const CompositeTensorVariant& other)
: flat_components_(other.flat_components_),
metadata_(new CompositeTensorVariantMetadata()) {
*metadata_ = *other.metadata_;
}
void CompositeTensorVariant::Encode(VariantTensorData* data) const {
data->set_type_name(TypeName());
metadata_->SerializeToString(&data->metadata_string());
for (const Tensor& tensor : flat_components_) {
data->add_tensor(tensor);
}
}
bool CompositeTensorVariant::Decode(const VariantTensorData& data) {
if (!metadata_->ParseFromString(data.metadata_string())) {
return false;
}
flat_components_ = data.tensors();
return true;
}
string CompositeTensorVariant::DebugString() const {
string result("<CompositeTensorVariant type=");
result.append(TypeSpecProto::TypeSpecClass_Name(
metadata_->type_spec_proto().type_spec_class()));
result.append(", components=[");
for (const auto& tensor : flat_components_) {
if (&tensor != &flat_components_[0]) {
result.append(", ");
}
result.append(tensor.DebugString());
}
result.append("]>");
return result;
}
REGISTER_UNARY_VARIANT_DECODE_FUNCTION(CompositeTensorVariant,
CompositeTensorVariant::kTypeName);
} | #include "tensorflow/core/kernels/composite_tensor_variant.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/variant.h"
#include "tensorflow/core/framework/variant_encode_decode.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/composite_tensor_variant.pb.h"
namespace tensorflow {
namespace {
constexpr const char* k2DRaggedTensorSpec = R"(
type_spec_proto: {
type_spec_class: RAGGED_TENSOR_SPEC
type_state: {
tuple_value: {
values: [
{tensor_shape_value:
{dim: [{size: -1}, {size: -1}]}}, # shape
{tensor_dtype_value: DT_INT32}, # dtype
{int64_value: 1}, # ragged_rank
{tensor_dtype_value: DT_INT64} # row_splits_dtype
]
}
}
})";
CompositeTensorVariant Make2DRaggedTensor(const std::vector<int32>& values,
const std::vector<int64_t>& splits) {
CompositeTensorVariantMetadata metadata;
EXPECT_TRUE(
protobuf::TextFormat::ParseFromString(k2DRaggedTensorSpec, &metadata));
std::vector<Tensor> components;
components.push_back(test::AsTensor<int32>(values));
components.push_back(test::AsTensor<int64_t>(splits));
CompositeTensorVariant v(metadata, absl::MakeSpan(components));
return v;
}
TEST(CompositeTensorVariantTest, EncodeAndDecodeRagged) {
CompositeTensorVariant v = Make2DRaggedTensor(
{5, 5, 3, 4, 1, 8},
{0, 2, 3, 6});
Tensor t(DT_VARIANT, {});
t.flat<Variant>()(0) = v;
auto* decoded = t.flat<Variant>()(0).get<CompositeTensorVariant>();
EXPECT_EQ(v.metadata().SerializeAsString(),
decoded->metadata().SerializeAsString());
EXPECT_EQ(v.flat_components().size(), 2);
test::ExpectTensorEqual<int32>(v.flat_components()[0],
decoded->flat_components()[0]);
test::ExpectTensorEqual<int64_t>(v.flat_components()[1],
decoded->flat_components()[1]);
}
TEST(CompositeTensorVariantTest, DebugStringForDefaultConstructed) {
CompositeTensorVariant v;
EXPECT_EQ(v.DebugString(),
"<CompositeTensorVariant type=UNKNOWN, components=[]>");
}
TEST(CompositeTensorVariantTest, DebugStringForRagged) {
CompositeTensorVariant v = Make2DRaggedTensor(
{5, 5, 3, 4, 1},
{0, 2, 3, 5});
EXPECT_EQ(v.DebugString(),
"<CompositeTensorVariant type=RAGGED_TENSOR_SPEC, "
"components=[Tensor<type: int32 shape: [5] values: 5 5 3...>, "
"Tensor<type: int64 shape: [4] values: 0 2 3...>]>");
}
TEST(CompositeTensorVariantTest, TypeName) {
CompositeTensorVariant v;
EXPECT_EQ(v.TypeName(), "CompositeTensorVariant");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/composite_tensor_variant.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/composite_tensor_variant_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b145ff1e-2b2e-43d8-9b7d-e505ea1f4c60 | cpp | tensorflow/tensorflow | xla_platform_info | tensorflow/compiler/jit/xla_platform_info.cc | tensorflow/compiler/jit/xla_platform_info_test.cc | #include "tensorflow/compiler/jit/xla_platform_info.h"
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "tensorflow/compiler/jit/device_executable_persistor.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/pjrt_device_compiler_client.h"
#include "tensorflow/compiler/jit/xla_compile_util.h"
#include "tensorflow/compiler/jit/xla_device_compiler_client.h"
#include "xla/client/client_library.h"
#include "xla/client/local_client.h"
#include "xla/pjrt/pjrt_client.h"
#include "xla/service/compiler.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/tsl/framework/device_type.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "tensorflow/core/tfrt/common/global_state.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
using XlaDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceExecutablePersistor =
DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>;
XlaDeviceCompiler* CreateXlaDeviceCompiler(
const XlaDeviceExecutablePersistor::Config& persistor_config,
DeviceType compilation_device_type, xla::LocalClient* local_client) {
return new XlaDeviceCompiler(
std::make_unique<XlaDeviceExecutablePersistor>(
std::move(persistor_config), compilation_device_type),
std::make_unique<XlaDeviceCompilerClient>(local_client));
}
PjRtDeviceCompiler* CreatePjRtDeviceCompiler(DeviceType compilation_device_type,
xla::PjRtClient* pjrt_client) {
std::string persistent_cache_directory =
GetPersistentCacheDirectory(compilation_device_type);
PjRtDeviceExecutablePersistor::Config persistor_config(
persistent_cache_directory,
GetMarkForCompilationPassFlags()->tf_xla_disable_strict_signature_checks,
GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_prefix,
GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_read_only);
return new PjRtDeviceCompiler(
std::make_unique<PjRtDeviceExecutablePersistor>(
std::move(persistor_config), compilation_device_type),
std::make_unique<PjRtDeviceCompilerClient>(pjrt_client));
}
absl::StatusOr<std::optional<std::set<int>>> GetAllowedGpus(
FunctionLibraryRuntime* flr) {
std::optional<std::set<int>> gpu_ids = std::nullopt;
if (flr->config_proto()) {
string allowed_gpus =
flr->config_proto()->gpu_options().visible_device_list();
TF_ASSIGN_OR_RETURN(gpu_ids, ParseVisibleDeviceList(allowed_gpus));
}
return gpu_ids;
}
Status GetCompilationDeviceTypeAndPjRtClient(
const XlaPlatformInfo& platform_info, FunctionLibraryRuntime* flr,
DeviceType* compilation_device_type, xla::PjRtClient** pjrt_client) {
DeviceType device_type = platform_info.device_type();
if (platform_info.xla_device_metadata()) {
VLOG(2) << "Building PjRtDeviceCompiler using "
"platform_info.xla_device_metadata().";
*compilation_device_type =
platform_info.xla_device_metadata()->jit_device_type();
TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type));
return absl::OkStatus();
}
if (platform_info.pjrt_device_metadata()) {
VLOG(2) << "Building PjRtDeviceCompiler using "
"platform_info.pjrt_device_metadata().";
*compilation_device_type =
platform_info.pjrt_device_metadata()->jit_device_type();
TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type));
return absl::OkStatus();
}
if (device_type == DEVICE_TPU) {
*compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type));
return absl::OkStatus();
}
VLOG(2) << "platform_info.xla_device_metadata not found and "
"platform_info.device_type() != DEVICE_TPU. Building "
"PjRtDeviceCompiler for non-XLA device.";
const XlaOpRegistry::DeviceRegistration* registration;
if (!XlaOpRegistry::GetCompilationDevice(device_type.type(), ®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
device_type.type());
}
*compilation_device_type = DeviceType(registration->compilation_device_name);
TF_ASSIGN_OR_RETURN(auto allowed_gpus, GetAllowedGpus(flr));
TF_ASSIGN_OR_RETURN(*pjrt_client,
GetOrCreatePjRtClient(device_type, allowed_gpus));
return absl::OkStatus();
}
}
std::string GetPersistentCacheDirectory(
const DeviceType& compilation_device_type) {
if (!GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types.empty() &&
!absl::c_any_of(absl::StrSplit(GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types,
','),
[&](absl::string_view device) {
return compilation_device_type == DeviceType(device);
})) {
return "";
}
return GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_directory;
}
absl::StatusOr<std::optional<std::set<int>>> ParseVisibleDeviceList(
absl::string_view visible_device_list) {
std::set<int> gpu_ids;
if (visible_device_list.empty()) {
return {{std::nullopt}};
}
const std::vector<string> visible_devices =
absl::StrSplit(visible_device_list, ',');
for (const string& platform_device_id_str : visible_devices) {
int32_t platform_device_id;
if (!absl::SimpleAtoi(platform_device_id_str, &platform_device_id)) {
return errors::InvalidArgument(
"Could not parse entry in 'visible_device_list': '",
platform_device_id_str,
"'. visible_device_list = ", visible_device_list);
}
gpu_ids.insert(platform_device_id);
}
return {{gpu_ids}};
}
absl::StatusOr<DeviceType> GetCompilationDeviceType(
const DeviceType& platform_device_type) {
DeviceType compilation_device_type = platform_device_type;
const XlaOpRegistry::DeviceRegistration* registration = nullptr;
if (!XlaOpRegistry::GetCompilationDevice(platform_device_type.type(),
®istration)) {
return errors::InvalidArgument("No JIT device registered for ",
platform_device_type.type());
}
compilation_device_type = DeviceType(registration->compilation_device_name);
return compilation_device_type;
}
Status BuildXlaDeviceCompiler(DeviceBase* device, FunctionLibraryRuntime* flr,
const XlaPlatformInfo& platform_info,
DeviceType compilation_device_type,
XlaDeviceCompiler** xla_device_compiler) {
if (platform_info.platform_id() == nullptr &&
platform_info.device_type() == DEVICE_GPU) {
*xla_device_compiler = new XlaDeviceCompiler(nullptr,
nullptr);
return absl::OkStatus();
}
std::string persistent_cache_directory =
GetPersistentCacheDirectory(platform_info.device_type());
XlaDeviceExecutablePersistor::Config persistor_config(
persistent_cache_directory,
GetMarkForCompilationPassFlags()->tf_xla_disable_strict_signature_checks,
GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_prefix,
GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_read_only);
if (platform_info.xla_device_metadata()) {
*xla_device_compiler = CreateXlaDeviceCompiler(
persistor_config,
platform_info.xla_device_metadata()->jit_device_type(),
platform_info.xla_device_metadata()->client());
return absl::OkStatus();
}
if (platform_info.device_type() == DEVICE_TPU) {
*xla_device_compiler = CreateXlaDeviceCompiler(
persistor_config, DeviceType(DEVICE_TPU_XLA_JIT), nullptr);
return absl::OkStatus();
}
if (platform_info.platform_id() == nullptr) {
return errors::InvalidArgument("platform_id is null.");
}
auto platform =
se::PlatformManager::PlatformWithId(platform_info.platform_id());
if (!platform.ok()) {
return platform.status();
}
absl::StatusOr<xla::Compiler*> compiler_for_platform =
xla::Compiler::GetForPlatform(platform.value());
if (!compiler_for_platform.ok()) {
const Status& status = compiler_for_platform.status();
if (status.code() == error::NOT_FOUND) {
return errors::Unimplemented("Could not find compiler for platform ",
platform.value()->Name(), ": ",
status.ToString());
}
}
xla::LocalClientOptions client_options;
client_options.set_platform(platform.value());
if (device != nullptr) {
client_options.set_intra_op_parallelism_threads(
device->tensorflow_cpu_worker_threads()->num_threads);
}
if (flr != nullptr) {
TF_ASSIGN_OR_RETURN(auto allowed_gpus, GetAllowedGpus(flr));
client_options.set_allowed_devices(allowed_gpus);
}
TF_ASSIGN_OR_RETURN(
auto client, xla::ClientLibrary::GetOrCreateLocalClient(client_options));
*xla_device_compiler = CreateXlaDeviceCompiler(
persistor_config, compilation_device_type, client);
return absl::OkStatus();
}
Status GetOrCreatePjRtDeviceCompilerAndProfiler(
const XlaPlatformInfo& platform_info, ResourceMgr* rm,
FunctionLibraryRuntime* flr, PjRtDeviceCompiler** pjrt_device_compiler,
DeviceCompilationProfiler** profiler) {
const auto& device_type = platform_info.device_type();
const std::string& compiler_name =
GetPjRtDeviceCompilerResourceName(device_type);
const std::string& profiler_name =
GetPjRtDeviceCompilationProfilerResourceName(device_type);
bool deleted_old_device_compiler = false;
Status s = rm->Lookup<PjRtDeviceCompiler>(
rm->default_container(), compiler_name, pjrt_device_compiler);
if (s.ok() && device_type == DEVICE_TPU) {
auto* existing_pjrt_client = (*pjrt_device_compiler)->client();
TF_ASSIGN_OR_RETURN(auto* latest_pjrt_client, GetPjRtClient(device_type));
if (existing_pjrt_client != latest_pjrt_client) {
TF_RETURN_IF_ERROR(rm->Delete<PjRtDeviceCompiler>(rm->default_container(),
compiler_name));
TF_RETURN_IF_ERROR(rm->Delete<DeviceCompilationProfiler>(
rm->default_container(), profiler_name));
deleted_old_device_compiler = true;
}
}
if (!s.ok() || deleted_old_device_compiler) {
DeviceType compilation_device_type("");
xla::PjRtClient* pjrt_client = nullptr;
TF_RETURN_IF_ERROR(GetCompilationDeviceTypeAndPjRtClient(
platform_info, flr, &compilation_device_type, &pjrt_client));
TF_RETURN_IF_ERROR(rm->LookupOrCreate<PjRtDeviceCompiler>(
rm->default_container(), compiler_name, pjrt_device_compiler,
[&](PjRtDeviceCompiler** pjrt_device_compiler) {
*pjrt_device_compiler =
CreatePjRtDeviceCompiler(compilation_device_type, pjrt_client);
return absl::OkStatus();
}));
}
TF_RETURN_IF_ERROR(rm->LookupOrCreate<DeviceCompilationProfiler>(
rm->default_container(), profiler_name, profiler,
[](DeviceCompilationProfiler** profiler) {
*profiler = new DeviceCompilationProfiler();
return absl::OkStatus();
}));
return absl::OkStatus();
}
Status GetOrCreatePjRtDeviceCompilerAndProfiler(
const OpKernelContext& ctx, const XlaPlatformInfo& platform_info,
FunctionLibraryRuntime* flr,
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>**
pjrt_device_compiler,
DeviceCompilationProfiler** profiler) {
TF_ASSIGN_OR_RETURN(ResourceMgr * rm, GetResourceMgrForDeviceCompiler(
ctx, platform_info.device_type()));
return GetOrCreatePjRtDeviceCompilerAndProfiler(
platform_info, rm, flr, pjrt_device_compiler, profiler);
}
XlaPlatformInfo XlaPlatformInfoFromDevice(DeviceBase* device_base) {
se::Platform::Id platform_id = nullptr;
const XlaDevice::Metadata* xla_device_metadata = nullptr;
const PjRtBaseDevice::Metadata* pjrt_device_metadata = nullptr;
std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator;
const std::string& device_type = device_base->device_type();
if (device_type == DEVICE_CPU) {
platform_id = se::host::kHostPlatformId;
} else if (device_type == DEVICE_GPU) {
auto device = static_cast<Device*>(device_base);
platform_id = device->tensorflow_accelerator_device_info()
->stream->parent()
->GetPlatform()
->id();
} else if (XlaDevice::GetMetadataFromDevice(device_base, &xla_device_metadata)
.ok()) {
platform_id = xla_device_metadata->platform()->id();
custom_allocator =
xla_device_metadata->client()->backend().shared_memory_allocator();
} else if (auto metadata = PjRtBaseDevice::GetMetadataFromDevice(device_base);
metadata.ok()) {
pjrt_device_metadata = *metadata;
}
return XlaPlatformInfo(DeviceType(device_type), platform_id,
xla_device_metadata, pjrt_device_metadata,
custom_allocator);
}
std::shared_ptr<se::DeviceMemoryAllocator> GetAllocator(
DeviceBase* device, se::Stream* stream,
const XlaPlatformInfo& platform_info) {
if (platform_info.custom_allocator()) {
return platform_info.custom_allocator();
}
auto* alloc = device->GetAllocator({});
if (!stream) {
se::Platform* platform =
se::PlatformManager::PlatformWithId(platform_info.platform_id())
.value();
return std::make_shared<se::TfAllocatorAdapter>(alloc, platform);
}
return std::make_shared<se::TfAllocatorAdapter>(alloc, stream);
}
} | #include "tensorflow/compiler/jit/xla_platform_info.h"
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "xla/pjrt/tfrt_cpu_pjrt_client.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
#include "tensorflow/core/tfrt/common/create_pjrt_client_util.h"
#include "tensorflow/core/tfrt/common/pjrt_util.h"
#include "tensorflow/core/tpu/tpu_defs.h"
namespace tensorflow {
namespace {
using XlaDeviceCompiler =
DeviceCompiler<xla::LocalExecutable, xla::LocalClient>;
using PjRtDeviceCompiler =
DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>;
class XlaPlatformInfoTest : public ::testing::Test {
protected:
void SetUp() override {
tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true;
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = "";
}
DeviceSetup device_setup_;
};
class StubDevice : public DeviceBase {
public:
StubDevice() : DeviceBase(nullptr) {}
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerXlaDeviceMetadata) {
device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU);
const XlaDevice::Metadata* metadata = nullptr;
TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata));
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
TF_ASSERT_OK_AND_ASSIGN(
DeviceType compilation_device_type,
GetCompilationDeviceType(platform_info.device_type()));
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(),
platform_info, compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), metadata->jit_device_type());
EXPECT_EQ(xla_device_compiler->client(), metadata->client());
}
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerXlaDeviceCacheEnabled) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = DEVICE_XLA_GPU;
device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU});
Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU);
const XlaDevice::Metadata* metadata = nullptr;
TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata));
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
TF_ASSERT_OK_AND_ASSIGN(
DeviceType compilation_device_type,
GetCompilationDeviceType(platform_info.device_type()));
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(),
platform_info, compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), metadata->jit_device_type());
EXPECT_EQ(xla_device_compiler->client(), metadata->client());
EXPECT_EQ(xla_device_compiler->persistor()->persistent_cache_directory(),
"/tmp/xla_cache");
}
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerNonXlaDevice) {
device_setup_.AddDevicesAndSetUp({DEVICE_GPU});
Device* device = device_setup_.GetDevice(DEVICE_GPU);
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
TF_ASSERT_OK_AND_ASSIGN(
DeviceType compilation_device_type,
GetCompilationDeviceType(platform_info.device_type()));
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(),
platform_info, compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), DeviceType(DEVICE_GPU_XLA_JIT));
EXPECT_TRUE(xla_device_compiler->client() != nullptr);
}
TEST_F(XlaPlatformInfoTest, GetOrCreatePjRtDeviceCompilerAndProfilerXlaDevice) {
DeviceType device_type = DeviceType(DEVICE_XLA_GPU);
device_setup_.AddDevicesAndSetUp({device_type.type()});
Device* device = device_setup_.GetDevice(device_type.type());
const XlaDevice::Metadata* metadata = nullptr;
TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata));
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
params.device = device;
OpKernelContext ctx(¶ms, 0);
PjRtDeviceCompiler* pjrt_device_compiler = nullptr;
DeviceCompilationProfiler* profiler = nullptr;
TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler(
ctx, platform_info, device_setup_.flr(), &pjrt_device_compiler,
&profiler));
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
core::ScopedUnref profiler_ref(profiler);
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetOrCreatePjRtClient(device_type));
EXPECT_EQ(pjrt_device_compiler->device_type(), metadata->jit_device_type());
EXPECT_EQ(pjrt_device_compiler->client(), pjrt_client);
}
TEST_F(XlaPlatformInfoTest,
GetOrCreatePjRtDeviceCompilerAndProfilerGpuDeviceCacheEnabled) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = DEVICE_GPU_XLA_JIT;
device_setup_.AddDevicesAndSetUp({DEVICE_GPU});
Device* device = device_setup_.GetDevice(DEVICE_GPU);
XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device);
ResourceMgr resource_mgr("");
OpKernelContext::Params params;
params.resource_manager = &resource_mgr;
params.device = device;
OpKernelContext ctx(¶ms, 0);
PjRtDeviceCompiler* pjrt_device_compiler = nullptr;
DeviceCompilationProfiler* profiler = nullptr;
TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler(
ctx, platform_info, device_setup_.flr(), &pjrt_device_compiler,
&profiler));
EXPECT_EQ(pjrt_device_compiler->persistor()->persistent_cache_directory(),
"/tmp/xla_cache");
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
core::ScopedUnref profiler_ref(profiler);
}
#endif
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerTpuDevice) {
DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
Device* device = nullptr;
XlaPlatformInfo platform_info(DeviceType(DEVICE_TPU), nullptr,
nullptr,
nullptr,
nullptr);
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, nullptr, platform_info,
compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), compilation_device_type);
EXPECT_EQ(xla_device_compiler->client(), nullptr);
}
TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerNoCompilationCache) {
DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = DEVICE_XLA_GPU;
Device* device = nullptr;
XlaPlatformInfo platform_info(DeviceType(DEVICE_TPU), nullptr,
nullptr,
nullptr,
nullptr);
XlaDeviceCompiler* xla_device_compiler = nullptr;
TF_EXPECT_OK(BuildXlaDeviceCompiler(device, nullptr, platform_info,
compilation_device_type,
&xla_device_compiler));
core::ScopedUnref xla_device_compiler_ref(xla_device_compiler);
EXPECT_EQ(xla_device_compiler->device_type(), compilation_device_type);
EXPECT_TRUE(
xla_device_compiler->persistor()->persistent_cache_directory().empty());
}
TEST_F(XlaPlatformInfoTest,
GetOrCreatePjRtDeviceCompilerAndProfilerTpuDeviceNoCompilationCache) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = DEVICE_GPU_XLA_JIT;
DeviceType device_type = DeviceType(DEVICE_TPU);
DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT);
TF_CHECK_OK(SetPjRtClientInTFGlobalResourceManager(
device_type,
xla::GetTfrtCpuClient(true, 1)
.value()));
TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetOrCreatePjRtClient(device_type));
XlaPlatformInfo platform_info(device_type, nullptr,
nullptr,
nullptr,
nullptr);
OpKernelContext::Params params;
StubDevice stub_device;
params.device = &stub_device;
OpKernelContext ctx(¶ms, 0);
PjRtDeviceCompiler* pjrt_device_compiler = nullptr;
DeviceCompilationProfiler* profiler = nullptr;
TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler(
ctx, platform_info, nullptr, &pjrt_device_compiler, &profiler));
core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler);
core::ScopedUnref profiler_ref(profiler);
EXPECT_EQ(pjrt_device_compiler->device_type(), compilation_device_type);
EXPECT_EQ(pjrt_device_compiler->client(), pjrt_client);
EXPECT_TRUE(
pjrt_device_compiler->persistor()->persistent_cache_directory().empty());
}
TEST_F(XlaPlatformInfoTest, GetPersistentCacheDirectoryMultiple) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = "GPU,CPU";
DeviceType device_gpu = DeviceType(DEVICE_GPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_gpu), "/tmp/xla_cache");
DeviceType device_cpu = DeviceType(DEVICE_CPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_cpu), "/tmp/xla_cache");
DeviceType device_tpu = DeviceType(DEVICE_TPU);
EXPECT_TRUE(GetPersistentCacheDirectory(device_tpu).empty());
}
TEST_F(XlaPlatformInfoTest, GetPersistentCacheDirectoryNoDeviceTypes) {
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_directory = "/tmp/xla_cache";
tensorflow::GetMarkForCompilationPassFlags()
->tf_xla_persistent_cache_device_types = "";
DeviceType device_gpu = DeviceType(DEVICE_GPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_gpu), "/tmp/xla_cache");
DeviceType device_cpu = DeviceType(DEVICE_CPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_cpu), "/tmp/xla_cache");
DeviceType device_tpu = DeviceType(DEVICE_TPU);
EXPECT_EQ(GetPersistentCacheDirectory(device_tpu), "/tmp/xla_cache");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_platform_info.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/xla_platform_info_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
61b557ea-0031-4b41-95a5-59c75cafefa9 | cpp | google/tsl | stacktrace | tsl/platform/windows/stacktrace.cc | tsl/platform/stacktrace_test.cc | #include "tsl/platform/windows/stacktrace.h"
#include <windows.h>
#include <dbghelp.h>
#include <string>
#include "tsl/platform/mutex.h"
#pragma comment(lib, "dbghelp.lib")
namespace tsl {
static bool SymbolsAreAvailableInit() {
SymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS);
return SymInitialize(GetCurrentProcess(), NULL, true);
}
static bool SymbolsAreAvailable() {
static bool kSymbolsAvailable = SymbolsAreAvailableInit();
return kSymbolsAvailable;
}
std::string CurrentStackTrace() {
HANDLE current_process = GetCurrentProcess();
static constexpr int kMaxStackFrames = 64;
void* trace[kMaxStackFrames];
int num_frames = CaptureStackBackTrace(0, kMaxStackFrames, trace, NULL);
static mutex mu(tsl::LINKER_INITIALIZED);
std::string stacktrace;
for (int i = 0; i < num_frames; ++i) {
const char* symbol = "(unknown)";
if (SymbolsAreAvailable()) {
char symbol_info_buffer[sizeof(SYMBOL_INFO) +
MAX_SYM_NAME * sizeof(TCHAR)];
SYMBOL_INFO* symbol_ptr =
reinterpret_cast<SYMBOL_INFO*>(symbol_info_buffer);
symbol_ptr->SizeOfStruct = sizeof(SYMBOL_INFO);
symbol_ptr->MaxNameLen = MAX_SYM_NAME;
mutex_lock lock(mu);
if (SymFromAddr(current_process, reinterpret_cast<DWORD64>(trace[i]), 0,
symbol_ptr)) {
symbol = symbol_ptr->Name;
}
}
char buffer[256];
snprintf(buffer, sizeof(buffer), "0x%p\t%s", trace[i], symbol);
stacktrace += buffer;
stacktrace += "\n";
}
return stacktrace;
}
} | #include "tsl/platform/stacktrace.h"
#include <string>
#include "tsl/platform/logging.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace {
#if defined(TF_HAS_STACKTRACE)
TEST(StacktraceTest, StacktraceWorks) {
std::string stacktrace = CurrentStackTrace();
LOG(INFO) << "CurrentStackTrace():\n" << stacktrace;
std::string expected_frame = "testing::internal::UnitTestImpl::RunAllTests";
EXPECT_NE(stacktrace.find(expected_frame), std::string::npos);
}
#endif
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/windows/stacktrace.cc | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/stacktrace_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
513dcbb4-fb4b-4c9a-9804-aae7b9470e23 | cpp | google/cel-cpp | regex_precompilation | runtime/regex_precompilation.cc | runtime/regex_precompilation_test.cc | #include "runtime/regex_precompilation.h"
#include "absl/base/macros.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "common/native_type.h"
#include "eval/compiler/regex_precompilation_optimization.h"
#include "internal/casts.h"
#include "internal/status_macros.h"
#include "runtime/internal/runtime_friend_access.h"
#include "runtime/internal/runtime_impl.h"
#include "runtime/runtime.h"
#include "runtime/runtime_builder.h"
namespace cel::extensions {
namespace {
using ::cel::internal::down_cast;
using ::cel::runtime_internal::RuntimeFriendAccess;
using ::cel::runtime_internal::RuntimeImpl;
using ::google::api::expr::runtime::CreateRegexPrecompilationExtension;
absl::StatusOr<RuntimeImpl*> RuntimeImplFromBuilder(RuntimeBuilder& builder) {
Runtime& runtime = RuntimeFriendAccess::GetMutableRuntime(builder);
if (RuntimeFriendAccess::RuntimeTypeId(runtime) !=
NativeTypeId::For<RuntimeImpl>()) {
return absl::UnimplementedError(
"regex precompilation only supported on the default cel::Runtime "
"implementation.");
}
RuntimeImpl& runtime_impl = down_cast<RuntimeImpl&>(runtime);
return &runtime_impl;
}
}
absl::Status EnableRegexPrecompilation(RuntimeBuilder& builder) {
CEL_ASSIGN_OR_RETURN(RuntimeImpl * runtime_impl,
RuntimeImplFromBuilder(builder));
ABSL_ASSERT(runtime_impl != nullptr);
runtime_impl->expr_builder().AddProgramOptimizer(
CreateRegexPrecompilationExtension(
runtime_impl->expr_builder().options().regex_max_program_size));
return absl::OkStatus();
}
} | #include "runtime/regex_precompilation.h"
#include <string>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "base/function_adapter.h"
#include "common/value.h"
#include "extensions/protobuf/runtime_adapter.h"
#include "internal/testing.h"
#include "parser/parser.h"
#include "runtime/activation.h"
#include "runtime/constant_folding.h"
#include "runtime/managed_value_factory.h"
#include "runtime/register_function_helper.h"
#include "runtime/runtime_builder.h"
#include "runtime/runtime_options.h"
#include "runtime/standard_runtime_builder_factory.h"
namespace cel::extensions {
namespace {
using ::absl_testing::StatusIs;
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::parser::Parse;
using ::testing::_;
using ::testing::HasSubstr;
using ValueMatcher = testing::Matcher<Value>;
struct TestCase {
std::string name;
std::string expression;
ValueMatcher result_matcher;
absl::Status create_status;
};
MATCHER_P(IsIntValue, expected, "") {
const Value& value = arg;
return value->Is<IntValue>() && value.GetInt().NativeValue() == expected;
}
MATCHER_P(IsBoolValue, expected, "") {
const Value& value = arg;
return value->Is<BoolValue>() && value.GetBool().NativeValue() == expected;
}
MATCHER_P(IsErrorValue, expected_substr, "") {
const Value& value = arg;
return value->Is<ErrorValue>() &&
absl::StrContains(value.GetError().NativeValue().message(),
expected_substr);
}
class RegexPrecompilationTest : public testing::TestWithParam<TestCase> {};
TEST_P(RegexPrecompilationTest, Basic) {
RuntimeOptions options;
const TestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(cel::RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
auto status = RegisterHelper<BinaryFunctionAdapter<
absl::StatusOr<Value>, const StringValue&, const StringValue&>>::
RegisterGlobalOverload(
"prepend",
[](ValueManager& f, const StringValue& value,
const StringValue& prefix) {
return StringValue::Concat(f, prefix, value);
},
builder.function_registry());
ASSERT_OK(status);
ASSERT_OK(EnableRegexPrecompilation(builder));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse(test_case.expression));
auto program_or =
ProtobufRuntimeAdapter::CreateProgram(*runtime, parsed_expr);
if (!test_case.create_status.ok()) {
ASSERT_THAT(program_or.status(),
StatusIs(test_case.create_status.code(),
HasSubstr(test_case.create_status.message())));
return;
}
ASSERT_OK_AND_ASSIGN(auto program, std::move(program_or));
ManagedValueFactory value_factory(program->GetTypeProvider(),
MemoryManagerRef::ReferenceCounting());
Activation activation;
ASSERT_OK_AND_ASSIGN(auto var,
value_factory.get().CreateStringValue("string_var"));
activation.InsertOrAssignValue("string_var", var);
ASSERT_OK_AND_ASSIGN(Value value,
program->Evaluate(activation, value_factory.get()));
EXPECT_THAT(value, test_case.result_matcher);
}
TEST_P(RegexPrecompilationTest, WithConstantFolding) {
RuntimeOptions options;
const TestCase& test_case = GetParam();
ASSERT_OK_AND_ASSIGN(cel::RuntimeBuilder builder,
CreateStandardRuntimeBuilder(options));
auto status = RegisterHelper<BinaryFunctionAdapter<
absl::StatusOr<Value>, const StringValue&, const StringValue&>>::
RegisterGlobalOverload(
"prepend",
[](ValueManager& f, const StringValue& value,
const StringValue& prefix) {
return StringValue::Concat(f, prefix, value);
},
builder.function_registry());
ASSERT_OK(status);
ASSERT_OK(
EnableConstantFolding(builder, MemoryManagerRef::ReferenceCounting()));
ASSERT_OK(EnableRegexPrecompilation(builder));
ASSERT_OK_AND_ASSIGN(auto runtime, std::move(builder).Build());
ASSERT_OK_AND_ASSIGN(ParsedExpr parsed_expr, Parse(test_case.expression));
auto program_or =
ProtobufRuntimeAdapter::CreateProgram(*runtime, parsed_expr);
if (!test_case.create_status.ok()) {
ASSERT_THAT(program_or.status(),
StatusIs(test_case.create_status.code(),
HasSubstr(test_case.create_status.message())));
return;
}
ASSERT_OK_AND_ASSIGN(auto program, std::move(program_or));
ManagedValueFactory value_factory(program->GetTypeProvider(),
MemoryManagerRef::ReferenceCounting());
Activation activation;
ASSERT_OK_AND_ASSIGN(auto var,
value_factory.get().CreateStringValue("string_var"));
activation.InsertOrAssignValue("string_var", var);
ASSERT_OK_AND_ASSIGN(Value value,
program->Evaluate(activation, value_factory.get()));
EXPECT_THAT(value, test_case.result_matcher);
}
INSTANTIATE_TEST_SUITE_P(
Cases, RegexPrecompilationTest,
testing::ValuesIn(std::vector<TestCase>{
{"matches_receiver", R"(string_var.matches(r's\w+_var'))",
IsBoolValue(true)},
{"matches_receiver_false", R"(string_var.matches(r'string_var\d+'))",
IsBoolValue(false)},
{"matches_global_true", R"(matches(string_var, r's\w+_var'))",
IsBoolValue(true)},
{"matches_global_false", R"(matches(string_var, r'string_var\d+'))",
IsBoolValue(false)},
{"matches_bad_re2_expression", "matches('123', r'(?<!a)123')", _,
absl::InvalidArgumentError("unsupported RE2")},
{"matches_unsupported_call_signature",
"matches('123', r'(?<!a)123', 'gi')", _,
absl::InvalidArgumentError("No overloads")},
{"constant_computation",
"matches(string_var, r'string' + '_' + r'var')", IsBoolValue(true)},
}),
[](const testing::TestParamInfo<TestCase>& info) {
return info.param.name;
});
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/regex_precompilation.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/runtime/regex_precompilation_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
a820520b-9e83-4f87-bd88-7914067953c6 | cpp | google/quiche | oblivious_http_client | quiche/oblivious_http/oblivious_http_client.cc | quiche/oblivious_http/oblivious_http_client_test.cc | #include "quiche/oblivious_http/oblivious_http_client.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/common/quiche_crypto_logging.h"
namespace quiche {
namespace {
absl::Status ValidateClientParameters(
absl::string_view hpke_public_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config) {
bssl::UniquePtr<EVP_HPKE_CTX> client_ctx(EVP_HPKE_CTX_new());
if (client_ctx == nullptr) {
return SslErrorAsStatus(
"Failed to initialize HPKE ObliviousHttpClient Context.");
}
std::string encapsulated_key(EVP_HPKE_MAX_ENC_LENGTH, '\0');
size_t enc_len;
absl::string_view info = "verify if given HPKE public key is valid";
if (!EVP_HPKE_CTX_setup_sender(
client_ctx.get(), reinterpret_cast<uint8_t*>(encapsulated_key.data()),
&enc_len, encapsulated_key.size(), ohttp_key_config.GetHpkeKem(),
ohttp_key_config.GetHpkeKdf(), ohttp_key_config.GetHpkeAead(),
reinterpret_cast<const uint8_t*>(hpke_public_key.data()),
hpke_public_key.size(), reinterpret_cast<const uint8_t*>(info.data()),
info.size())) {
return SslErrorAsStatus(
"Failed to setup HPKE context with given public key param "
"hpke_public_key.");
}
return absl::OkStatus();
}
}
ObliviousHttpClient::ObliviousHttpClient(
std::string client_public_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config)
: hpke_public_key_(std::move(client_public_key)),
ohttp_key_config_(ohttp_key_config) {}
absl::StatusOr<ObliviousHttpClient> ObliviousHttpClient::Create(
absl::string_view hpke_public_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config) {
if (hpke_public_key.empty()) {
return absl::InvalidArgumentError("Invalid/Empty HPKE public key.");
}
auto is_valid_input =
ValidateClientParameters(hpke_public_key, ohttp_key_config);
if (!is_valid_input.ok()) {
return absl::InvalidArgumentError(
absl::StrCat("Invalid input received in method parameters. ",
is_valid_input.message()));
}
return ObliviousHttpClient(std::string(hpke_public_key), ohttp_key_config);
}
absl::StatusOr<ObliviousHttpRequest>
ObliviousHttpClient::CreateObliviousHttpRequest(
std::string plaintext_data) const {
return ObliviousHttpRequest::CreateClientObliviousRequest(
std::move(plaintext_data), hpke_public_key_, ohttp_key_config_);
}
absl::StatusOr<ObliviousHttpResponse>
ObliviousHttpClient::DecryptObliviousHttpResponse(
std::string encrypted_data,
ObliviousHttpRequest::Context& oblivious_http_request_context) const {
return ObliviousHttpResponse::CreateClientObliviousResponse(
std::move(encrypted_data), oblivious_http_request_context);
}
} | #include "quiche/oblivious_http/oblivious_http_client.h"
#include <stdint.h>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/platform/api/quiche_thread.h"
namespace quiche {
std::string GetHpkePrivateKey() {
absl::string_view hpke_key_hex =
"b77431ecfa8f4cfc30d6e467aafa06944dffe28cb9dd1409e33a3045f5adc8a1";
std::string hpke_key_bytes;
EXPECT_TRUE(absl::HexStringToBytes(hpke_key_hex, &hpke_key_bytes));
return hpke_key_bytes;
}
std::string GetHpkePublicKey() {
absl::string_view public_key =
"6d21cfe09fbea5122f9ebc2eb2a69fcc4f06408cd54aac934f012e76fcdcef62";
std::string public_key_bytes;
EXPECT_TRUE(absl::HexStringToBytes(public_key, &public_key_bytes));
return public_key_bytes;
}
const ObliviousHttpHeaderKeyConfig GetOhttpKeyConfig(uint8_t key_id,
uint16_t kem_id,
uint16_t kdf_id,
uint16_t aead_id) {
auto ohttp_key_config =
ObliviousHttpHeaderKeyConfig::Create(key_id, kem_id, kdf_id, aead_id);
EXPECT_TRUE(ohttp_key_config.ok());
return ohttp_key_config.value();
}
bssl::UniquePtr<EVP_HPKE_KEY> ConstructHpkeKey(
absl::string_view hpke_key,
const ObliviousHttpHeaderKeyConfig& ohttp_key_config) {
bssl::UniquePtr<EVP_HPKE_KEY> bssl_hpke_key(EVP_HPKE_KEY_new());
EXPECT_NE(bssl_hpke_key, nullptr);
EXPECT_TRUE(EVP_HPKE_KEY_init(
bssl_hpke_key.get(), ohttp_key_config.GetHpkeKem(),
reinterpret_cast<const uint8_t*>(hpke_key.data()), hpke_key.size()));
return bssl_hpke_key;
}
TEST(ObliviousHttpClient, TestEncapsulate) {
auto client = ObliviousHttpClient::Create(
GetHpkePublicKey(),
GetOhttpKeyConfig(8, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM));
ASSERT_TRUE(client.ok());
auto encrypted_req = client->CreateObliviousHttpRequest("test string 1");
ASSERT_TRUE(encrypted_req.ok());
auto serialized_encrypted_req = encrypted_req->EncapsulateAndSerialize();
ASSERT_FALSE(serialized_encrypted_req.empty());
}
TEST(ObliviousHttpClient, TestEncryptingMultipleRequestsWithSingleInstance) {
auto client = ObliviousHttpClient::Create(
GetHpkePublicKey(),
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM));
ASSERT_TRUE(client.ok());
auto ohttp_req_1 = client->CreateObliviousHttpRequest("test string 1");
ASSERT_TRUE(ohttp_req_1.ok());
auto serialized_ohttp_req_1 = ohttp_req_1->EncapsulateAndSerialize();
ASSERT_FALSE(serialized_ohttp_req_1.empty());
auto ohttp_req_2 = client->CreateObliviousHttpRequest("test string 2");
ASSERT_TRUE(ohttp_req_2.ok());
auto serialized_ohttp_req_2 = ohttp_req_2->EncapsulateAndSerialize();
ASSERT_FALSE(serialized_ohttp_req_2.empty());
EXPECT_NE(serialized_ohttp_req_1, serialized_ohttp_req_2);
}
TEST(ObliviousHttpClient, TestInvalidHPKEKey) {
EXPECT_EQ(ObliviousHttpClient::Create(
"Invalid HPKE key",
GetOhttpKeyConfig(50, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM))
.status()
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_EQ(ObliviousHttpClient::Create(
"",
GetOhttpKeyConfig(50, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM))
.status()
.code(),
absl::StatusCode::kInvalidArgument);
}
TEST(ObliviousHttpClient,
TestTwoSamePlaintextsWillGenerateDifferentEncryptedPayloads) {
auto client = ObliviousHttpClient::Create(
GetHpkePublicKey(),
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM));
ASSERT_TRUE(client.ok());
auto encrypted_request_1 =
client->CreateObliviousHttpRequest("same plaintext");
ASSERT_TRUE(encrypted_request_1.ok());
auto serialized_encrypted_request_1 =
encrypted_request_1->EncapsulateAndSerialize();
ASSERT_FALSE(serialized_encrypted_request_1.empty());
auto encrypted_request_2 =
client->CreateObliviousHttpRequest("same plaintext");
ASSERT_TRUE(encrypted_request_2.ok());
auto serialized_encrypted_request_2 =
encrypted_request_2->EncapsulateAndSerialize();
ASSERT_FALSE(serialized_encrypted_request_2.empty());
EXPECT_NE(serialized_encrypted_request_1, serialized_encrypted_request_2);
}
TEST(ObliviousHttpClient, TestObliviousResponseHandling) {
auto ohttp_key_config =
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM);
auto encapsulate_req_on_client =
ObliviousHttpRequest::CreateClientObliviousRequest(
"test", GetHpkePublicKey(), ohttp_key_config);
ASSERT_TRUE(encapsulate_req_on_client.ok());
auto decapsulate_req_on_gateway =
ObliviousHttpRequest::CreateServerObliviousRequest(
encapsulate_req_on_client->EncapsulateAndSerialize(),
*(ConstructHpkeKey(GetHpkePrivateKey(), ohttp_key_config)),
ohttp_key_config);
ASSERT_TRUE(decapsulate_req_on_gateway.ok());
auto gateway_request_context =
std::move(decapsulate_req_on_gateway.value()).ReleaseContext();
auto encapsulate_resp_on_gateway =
ObliviousHttpResponse::CreateServerObliviousResponse(
"test response", gateway_request_context);
ASSERT_TRUE(encapsulate_resp_on_gateway.ok());
auto client =
ObliviousHttpClient::Create(GetHpkePublicKey(), ohttp_key_config);
ASSERT_TRUE(client.ok());
auto client_request_context =
std::move(encapsulate_req_on_client.value()).ReleaseContext();
auto decapsulate_resp_on_client = client->DecryptObliviousHttpResponse(
encapsulate_resp_on_gateway->EncapsulateAndSerialize(),
client_request_context);
ASSERT_TRUE(decapsulate_resp_on_client.ok());
EXPECT_EQ(decapsulate_resp_on_client->GetPlaintextData(), "test response");
}
TEST(ObliviousHttpClient,
DecryptResponseReceivedByTheClientUsingServersObliviousContext) {
auto ohttp_key_config =
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM);
auto encapsulate_req_on_client =
ObliviousHttpRequest::CreateClientObliviousRequest(
"test", GetHpkePublicKey(), ohttp_key_config);
ASSERT_TRUE(encapsulate_req_on_client.ok());
auto decapsulate_req_on_gateway =
ObliviousHttpRequest::CreateServerObliviousRequest(
encapsulate_req_on_client->EncapsulateAndSerialize(),
*(ConstructHpkeKey(GetHpkePrivateKey(), ohttp_key_config)),
ohttp_key_config);
ASSERT_TRUE(decapsulate_req_on_gateway.ok());
auto gateway_request_context =
std::move(decapsulate_req_on_gateway.value()).ReleaseContext();
auto encapsulate_resp_on_gateway =
ObliviousHttpResponse::CreateServerObliviousResponse(
"test response", gateway_request_context);
ASSERT_TRUE(encapsulate_resp_on_gateway.ok());
auto client =
ObliviousHttpClient::Create(GetHpkePublicKey(), ohttp_key_config);
ASSERT_TRUE(client.ok());
auto decapsulate_resp_on_client = client->DecryptObliviousHttpResponse(
encapsulate_resp_on_gateway->EncapsulateAndSerialize(),
gateway_request_context);
ASSERT_TRUE(decapsulate_resp_on_client.ok());
EXPECT_EQ(decapsulate_resp_on_client->GetPlaintextData(), "test response");
}
TEST(ObliviousHttpClient, TestWithMultipleThreads) {
class TestQuicheThread : public QuicheThread {
public:
TestQuicheThread(const ObliviousHttpClient& client,
std::string request_payload,
ObliviousHttpHeaderKeyConfig ohttp_key_config)
: QuicheThread("client_thread"),
client_(client),
request_payload_(request_payload),
ohttp_key_config_(ohttp_key_config) {}
protected:
void Run() override {
auto encrypted_request =
client_.CreateObliviousHttpRequest(request_payload_);
ASSERT_TRUE(encrypted_request.ok());
ASSERT_FALSE(encrypted_request->EncapsulateAndSerialize().empty());
auto decapsulate_req_on_gateway =
ObliviousHttpRequest::CreateServerObliviousRequest(
encrypted_request->EncapsulateAndSerialize(),
*(ConstructHpkeKey(GetHpkePrivateKey(), ohttp_key_config_)),
ohttp_key_config_);
ASSERT_TRUE(decapsulate_req_on_gateway.ok());
auto gateway_request_context =
std::move(decapsulate_req_on_gateway.value()).ReleaseContext();
auto encapsulate_resp_on_gateway =
ObliviousHttpResponse::CreateServerObliviousResponse(
"test response", gateway_request_context);
ASSERT_TRUE(encapsulate_resp_on_gateway.ok());
ASSERT_FALSE(
encapsulate_resp_on_gateway->EncapsulateAndSerialize().empty());
auto client_request_context =
std::move(encrypted_request.value()).ReleaseContext();
auto decrypted_response = client_.DecryptObliviousHttpResponse(
encapsulate_resp_on_gateway->EncapsulateAndSerialize(),
client_request_context);
ASSERT_TRUE(decrypted_response.ok());
ASSERT_FALSE(decrypted_response->GetPlaintextData().empty());
}
private:
const ObliviousHttpClient& client_;
std::string request_payload_;
ObliviousHttpHeaderKeyConfig ohttp_key_config_;
};
auto ohttp_key_config =
GetOhttpKeyConfig(1, EVP_HPKE_DHKEM_X25519_HKDF_SHA256,
EVP_HPKE_HKDF_SHA256, EVP_HPKE_AES_256_GCM);
auto client =
ObliviousHttpClient::Create(GetHpkePublicKey(), ohttp_key_config);
TestQuicheThread t1(*client, "test request 1", ohttp_key_config);
TestQuicheThread t2(*client, "test request 2", ohttp_key_config);
t1.Start();
t2.Start();
t1.Join();
t2.Join();
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/oblivious_http/oblivious_http_client.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/oblivious_http/oblivious_http_client_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
2007a2bf-7f3f-423a-b1e9-e55eef2b0485 | cpp | google/quiche | hpack_varint_encoder | quiche/http2/hpack/varint/hpack_varint_encoder.cc | quiche/http2/hpack/varint/hpack_varint_encoder_test.cc | #include "quiche/http2/hpack/varint/hpack_varint_encoder.h"
#include <limits>
#include <string>
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
void HpackVarintEncoder::Encode(uint8_t high_bits, uint8_t prefix_length,
uint64_t varint, std::string* output) {
QUICHE_DCHECK_LE(1u, prefix_length);
QUICHE_DCHECK_LE(prefix_length, 8u);
const uint8_t prefix_mask = (1 << prefix_length) - 1;
QUICHE_DCHECK_EQ(0, high_bits & prefix_mask);
if (varint < prefix_mask) {
unsigned char first_byte = high_bits | static_cast<unsigned char>(varint);
output->push_back(first_byte);
return;
}
unsigned char first_byte = high_bits | prefix_mask;
output->push_back(first_byte);
varint -= prefix_mask;
while (varint >= 128) {
output->push_back(0b10000000 | (varint % 128));
varint >>= 7;
}
output->push_back(varint);
}
} | #include "quiche/http2/hpack/varint/hpack_varint_encoder.h"
#include <cstddef>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
struct {
uint8_t high_bits;
uint8_t prefix_length;
uint64_t value;
uint8_t expected_encoding;
} kShortTestData[] = {{0b10110010, 1, 0, 0b10110010},
{0b10101100, 2, 2, 0b10101110},
{0b10100000, 3, 6, 0b10100110},
{0b10110000, 4, 13, 0b10111101},
{0b10100000, 5, 8, 0b10101000},
{0b11000000, 6, 48, 0b11110000},
{0b10000000, 7, 99, 0b11100011},
{0b00000000, 5, 10, 0b00001010}};
TEST(HpackVarintEncoderTest, Short) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(kShortTestData); ++i) {
std::string output;
HpackVarintEncoder::Encode(kShortTestData[i].high_bits,
kShortTestData[i].prefix_length,
kShortTestData[i].value, &output);
ASSERT_EQ(1u, output.size());
EXPECT_EQ(kShortTestData[i].expected_encoding,
static_cast<uint8_t>(output[0]));
}
}
struct {
uint8_t high_bits;
uint8_t prefix_length;
uint64_t value;
const char* expected_encoding;
} kLongTestData[] = {
{0b10011000, 3, 103, "9f60"},
{0b10010000, 4, 57, "9f2a"},
{0b11000000, 5, 158, "df7f"},
{0b01000000, 6, 65, "7f02"},
{0b00000000, 7, 200, "7f49"},
{0b10011000, 3, 12345, "9fb260"},
{0b10010000, 4, 5401, "9f8a2a"},
{0b11000000, 5, 16327, "dfa87f"},
{0b01000000, 6, 399, "7fd002"},
{0b00000000, 7, 9598, "7fff49"},
{0b10011000, 3, 1579281, "9f8ab260"},
{0b10010000, 4, 689488, "9fc18a2a"},
{0b11000000, 5, 2085964, "dfada87f"},
{0b01000000, 6, 43103, "7fa0d002"},
{0b00000000, 7, 1212541, "7ffeff49"},
{0b10011000, 3, 202147110, "9f9f8ab260"},
{0b10010000, 4, 88252593, "9fa2c18a2a"},
{0b11000000, 5, 266999535, "dfd0ada87f"},
{0b01000000, 6, 5509304, "7ff9a0d002"},
{0b00000000, 7, 155189149, "7f9efeff49"},
{0b10011000, 3, 3311978140938, "9f83aa9f8ab260"},
{0b10010000, 4, 1445930244223, "9ff0b0a2c18a2a"},
{0b11000000, 5, 4374519874169, "dfda84d0ada87f"},
{0b01000000, 6, 90263420404, "7fb5fbf9a0d002"},
{0b00000000, 7, 2542616951118, "7fcff19efeff49"},
{0b10011000, 3, 54263449861016696, "9ff19883aa9f8ab260"},
{0b10010000, 4, 23690121121119891, "9f84fdf0b0a2c18a2a"},
{0b11000000, 5, 71672133617889215, "dfa0dfda84d0ada87f"},
{0b01000000, 6, 1478875878881374, "7f9ff0b5fbf9a0d002"},
{0b00000000, 7, 41658236125045114, "7ffbc1cff19efeff49"},
{0b10011000, 3, 12832019021693745307u, "9f94f1f19883aa9f8ab201"},
{0b10010000, 4, 9980690937382242223u, "9fa08f84fdf0b0a2c18a01"},
{0b11000000, 5, 12131360551794650846u, "dfbfdda0dfda84d0ada801"},
{0b01000000, 6, 15006530362736632796u, "7f9dc79ff0b5fbf9a0d001"},
{0b00000000, 7, 18445754019193211014u, "7f8790fbc1cff19efeff01"},
{0b10011000, 3, 18446744073709551615u, "9ff8ffffffffffffffff01"},
{0b10010000, 4, 18446744073709551615u, "9ff0ffffffffffffffff01"},
{0b11000000, 5, 18446744073709551615u, "dfe0ffffffffffffffff01"},
{0b01000000, 6, 18446744073709551615u, "7fc0ffffffffffffffff01"},
{0b00000000, 7, 18446744073709551615u, "7f80ffffffffffffffff01"},
{0b00000000, 5, 1337, "1f9a0a"},
};
TEST(HpackVarintEncoderTest, Long) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(kLongTestData); ++i) {
std::string expected_encoding;
ASSERT_TRUE(absl::HexStringToBytes(kLongTestData[i].expected_encoding,
&expected_encoding));
std::string output;
HpackVarintEncoder::Encode(kLongTestData[i].high_bits,
kLongTestData[i].prefix_length,
kLongTestData[i].value, &output);
EXPECT_EQ(expected_encoding, output);
}
}
struct {
uint8_t high_bits;
uint8_t prefix_length;
uint64_t value;
uint8_t expected_encoding_first_byte;
} kLastByteIsZeroTestData[] = {
{0b10110010, 1, 1, 0b10110011}, {0b10101100, 2, 3, 0b10101111},
{0b10101000, 3, 7, 0b10101111}, {0b10110000, 4, 15, 0b10111111},
{0b10100000, 5, 31, 0b10111111}, {0b11000000, 6, 63, 0b11111111},
{0b10000000, 7, 127, 0b11111111}, {0b00000000, 8, 255, 0b11111111}};
TEST(HpackVarintEncoderTest, LastByteIsZero) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(kLastByteIsZeroTestData); ++i) {
std::string output;
HpackVarintEncoder::Encode(kLastByteIsZeroTestData[i].high_bits,
kLastByteIsZeroTestData[i].prefix_length,
kLastByteIsZeroTestData[i].value, &output);
ASSERT_EQ(2u, output.size());
EXPECT_EQ(kLastByteIsZeroTestData[i].expected_encoding_first_byte,
static_cast<uint8_t>(output[0]));
EXPECT_EQ(0b00000000, output[1]);
}
}
TEST(HpackVarintEncoderTest, Append) {
std::string output("foo");
std::string expected_encoding;
ASSERT_TRUE(absl::HexStringToBytes("666f6f", &expected_encoding));
EXPECT_EQ(expected_encoding, output);
HpackVarintEncoder::Encode(0b10011000, 3, 103, &output);
ASSERT_TRUE(absl::HexStringToBytes("666f6f9f60", &expected_encoding));
EXPECT_EQ(expected_encoding, output);
HpackVarintEncoder::Encode(0b10100000, 5, 8, &output);
ASSERT_TRUE(absl::HexStringToBytes("666f6f9f60a8", &expected_encoding));
EXPECT_EQ(expected_encoding, output);
HpackVarintEncoder::Encode(0b10011000, 3, 202147110, &output);
ASSERT_TRUE(
absl::HexStringToBytes("666f6f9f60a89f9f8ab260", &expected_encoding));
EXPECT_EQ(expected_encoding, output);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/varint/hpack_varint_encoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/varint/hpack_varint_encoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
131efe9b-b444-418c-9737-d0c1475dd8de | cpp | tensorflow/tensorflow | pjrt_attribute_map_util | third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util.cc | third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util_test.cc | #include "xla/python/pjrt_ifrt/pjrt_attribute_map_util.h"
#include <cstdint>
#include <string>
#include <type_traits>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt/attribute_map.h"
namespace xla {
namespace ifrt {
AttributeMap FromPjRtAttributeMap(
absl::flat_hash_map<std::string, xla::PjRtValueType> attributes) {
AttributeMap::Map result;
result.reserve(attributes.size());
for (auto& item : attributes) {
std::visit(
[&](auto& value) {
using T = std::decay_t<decltype(value)>;
const auto& key = item.first;
if constexpr (std::is_same_v<T, std::string>) {
result.insert({key, AttributeMap::StringValue(std::move(value))});
} else if constexpr (std::is_same_v<T, bool>) {
result.insert({key, AttributeMap::BoolValue(value)});
} else if constexpr (std::is_same_v<T, int64_t>) {
result.insert({key, AttributeMap::Int64Value(value)});
} else if constexpr (std::is_same_v<T, std::vector<int64_t>>) {
result.insert(
{key, AttributeMap::Int64ListValue(std::move(value))});
} else if constexpr (std::is_same_v<T, float>) {
result.insert({key, AttributeMap::FloatValue(value)});
}
},
item.second);
}
return AttributeMap(std::move(result));
}
absl::flat_hash_map<std::string, xla::PjRtValueType> ToPjRtAttributeMap(
AttributeMap attributes) {
absl::flat_hash_map<std::string, xla::PjRtValueType> result;
result.reserve(attributes.map().size());
for (auto& item : attributes.map()) {
std::visit(
[&](auto& value) {
using T = std::decay_t<decltype(value)>;
const auto& key = item.first;
if constexpr (std::is_same_v<T, AttributeMap::StringValue>) {
result.insert({key, std::move(value.value)});
} else if constexpr (std::is_same_v<T, AttributeMap::BoolValue>) {
result.insert({key, value.value});
} else if constexpr (std::is_same_v<T, AttributeMap::Int64Value>) {
result.insert({key, value.value});
} else if constexpr (std::is_same_v<T,
AttributeMap::Int64ListValue>) {
result.insert({key, std::move(value.value)});
} else if constexpr (std::is_same_v<T, AttributeMap::FloatValue>) {
result.insert({key, value.value});
}
},
item.second);
}
return result;
}
}
} | #include "xla/python/pjrt_ifrt/pjrt_attribute_map_util.h"
#include <cstdint>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "xla/pjrt/pjrt_common.h"
#include "xla/python/ifrt/attribute_map.h"
namespace xla {
namespace ifrt {
namespace {
TEST(PjRtAttributeMapUtilTest, FromPjRtAttributeMap) {
absl::flat_hash_map<std::string, PjRtValueType> pjrt_map({
{"string", xla::PjRtValueType(std::string("value"))},
{"bool", xla::PjRtValueType(true)},
{"int64", xla::PjRtValueType(int64_t{123})},
{"int64_list",
xla::PjRtValueType(std::vector<int64_t>({int64_t{1}, int64_t{2}}))},
{"float", xla::PjRtValueType(1.23f)},
});
EXPECT_EQ(FromPjRtAttributeMap(pjrt_map).map(),
AttributeMap::Map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list",
AttributeMap::Int64ListValue({int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
}));
}
TEST(PjRtAttributeMapUtilTest, ToPjRtAttributeMap) {
AttributeMap map({
{"string", AttributeMap::StringValue("value")},
{"bool", AttributeMap::BoolValue(true)},
{"int64", AttributeMap::Int64Value(123)},
{"int64_list", AttributeMap::Int64ListValue({int64_t{1}, int64_t{2}})},
{"float", AttributeMap::FloatValue(1.23f)},
});
EXPECT_EQ(
ToPjRtAttributeMap(map),
(absl::flat_hash_map<std::string, xla::PjRtValueType>({
{"string", xla::PjRtValueType(std::string("value"))},
{"bool", xla::PjRtValueType(true)},
{"int64", xla::PjRtValueType(int64_t{123})},
{"int64_list",
xla::PjRtValueType(std::vector<int64_t>({int64_t{1}, int64_t{2}}))},
{"float", xla::PjRtValueType(1.23f)},
})));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/python/pjrt_ifrt/pjrt_attribute_map_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a8cc5b4-5166-4312-8960-1ad8d88de028 | cpp | google/arolla | refcount | arolla/util/refcount.h | arolla/util/refcount_test.cc | #ifndef AROLLA_UTIL_REFCOUNT_H_
#define AROLLA_UTIL_REFCOUNT_H_
#include <atomic>
#include <cstdint>
namespace arolla {
class Refcount {
public:
constexpr Refcount() noexcept : count_{1} {}
void increment() noexcept { count_.fetch_add(1, std::memory_order_relaxed); }
[[nodiscard]] bool decrement() noexcept {
return count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
}
[[nodiscard]] bool skewed_decrement() noexcept {
auto refcount = count_.load(std::memory_order_acquire);
return refcount != 1 && count_.fetch_sub(1, std::memory_order_acq_rel) != 1;
}
struct TestOnly {};
constexpr Refcount(TestOnly, int initial_count) noexcept
: count_{initial_count} {}
private:
std::atomic<int32_t> count_;
};
}
#endif | #include "arolla/util/refcount.h"
#include "gtest/gtest.h"
namespace arolla {
namespace {
TEST(RefcountTest, Decrement) {
{
Refcount refcount;
EXPECT_FALSE(refcount.decrement());
}
{
Refcount refcount;
EXPECT_FALSE(refcount.skewed_decrement());
}
}
TEST(RefcountTest, IncrementDecrement) {
constexpr int N = 10;
{
Refcount refcount;
for (int i = 0; i < N; ++i) {
refcount.increment();
}
for (int i = 0; i < N; ++i) {
ASSERT_TRUE(refcount.decrement());
}
ASSERT_FALSE(refcount.decrement());
}
{
Refcount refcount;
for (int i = 0; i < N; ++i) {
refcount.increment();
}
for (int i = 0; i < N; ++i) {
ASSERT_TRUE(refcount.skewed_decrement());
}
ASSERT_FALSE(refcount.skewed_decrement());
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/refcount.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/refcount_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
c592c6a0-9729-40a5-b62b-7619780bc1a4 | cpp | abseil/abseil-cpp | leak_check | absl/debugging/leak_check.cc | absl/debugging/leak_check_test.cc | #include "absl/debugging/leak_check.h"
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#if defined(ABSL_HAVE_LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
#if ABSL_HAVE_ATTRIBUTE_WEAK
extern "C" ABSL_ATTRIBUTE_WEAK int __lsan_is_turned_off();
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
bool HaveLeakSanitizer() { return true; }
#if ABSL_HAVE_ATTRIBUTE_WEAK
bool LeakCheckerIsActive() {
return !(&__lsan_is_turned_off && __lsan_is_turned_off());
}
#else
bool LeakCheckerIsActive() { return true; }
#endif
bool FindAndReportLeaks() { return __lsan_do_recoverable_leak_check() != 0; }
void DoIgnoreLeak(const void* ptr) { __lsan_ignore_object(ptr); }
void RegisterLivePointers(const void* ptr, size_t size) {
__lsan_register_root_region(ptr, size);
}
void UnRegisterLivePointers(const void* ptr, size_t size) {
__lsan_unregister_root_region(ptr, size);
}
LeakCheckDisabler::LeakCheckDisabler() { __lsan_disable(); }
LeakCheckDisabler::~LeakCheckDisabler() { __lsan_enable(); }
ABSL_NAMESPACE_END
}
#else
namespace absl {
ABSL_NAMESPACE_BEGIN
bool HaveLeakSanitizer() { return false; }
bool LeakCheckerIsActive() { return false; }
void DoIgnoreLeak(const void*) { }
void RegisterLivePointers(const void*, size_t) { }
void UnRegisterLivePointers(const void*, size_t) { }
LeakCheckDisabler::LeakCheckDisabler() = default;
LeakCheckDisabler::~LeakCheckDisabler() = default;
ABSL_NAMESPACE_END
}
#endif | #include <string>
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/debugging/leak_check.h"
#include "absl/log/log.h"
namespace {
TEST(LeakCheckTest, IgnoreLeakSuppressesLeakedMemoryErrors) {
if (!absl::LeakCheckerIsActive()) {
GTEST_SKIP() << "LeakChecker is not active";
}
auto foo = absl::IgnoreLeak(new std::string("some ignored leaked string"));
LOG(INFO) << "Ignoring leaked string " << foo;
}
TEST(LeakCheckTest, LeakCheckDisablerIgnoresLeak) {
if (!absl::LeakCheckerIsActive()) {
GTEST_SKIP() << "LeakChecker is not active";
}
absl::LeakCheckDisabler disabler;
auto foo = new std::string("some string leaked while checks are disabled");
LOG(INFO) << "Ignoring leaked string " << foo;
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/leak_check.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/debugging/leak_check_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
7681854c-90d0-4abf-9fa0-5e36c5c9fe1f | cpp | google/cel-cpp | activation_bind_helper | eval/public/activation_bind_helper.cc | eval/public/activation_bind_helper_test.cc | #include "eval/public/activation_bind_helper.h"
#include "absl/status/status.h"
#include "eval/public/containers/field_access.h"
#include "eval/public/containers/field_backed_list_impl.h"
#include "eval/public/containers/field_backed_map_impl.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using google::protobuf::Arena;
using google::protobuf::Message;
using google::protobuf::FieldDescriptor;
using google::protobuf::Descriptor;
absl::Status CreateValueFromField(const google::protobuf::Message* msg,
const FieldDescriptor* field_desc,
google::protobuf::Arena* arena, CelValue* result) {
if (field_desc->is_map()) {
*result = CelValue::CreateMap(google::protobuf::Arena::Create<FieldBackedMapImpl>(
arena, msg, field_desc, arena));
return absl::OkStatus();
} else if (field_desc->is_repeated()) {
*result = CelValue::CreateList(google::protobuf::Arena::Create<FieldBackedListImpl>(
arena, msg, field_desc, arena));
return absl::OkStatus();
} else {
return CreateValueFromSingleField(msg, field_desc, arena, result);
}
}
}
absl::Status BindProtoToActivation(const Message* message, Arena* arena,
Activation* activation,
ProtoUnsetFieldOptions options) {
if (arena == nullptr) {
return absl::InvalidArgumentError(
"arena must not be null for BindProtoToActivation.");
}
const Descriptor* desc = message->GetDescriptor();
const google::protobuf::Reflection* reflection = message->GetReflection();
for (int i = 0; i < desc->field_count(); i++) {
CelValue value;
const FieldDescriptor* field_desc = desc->field(i);
if (options == ProtoUnsetFieldOptions::kSkip) {
if (!field_desc->is_repeated() &&
!reflection->HasField(*message, field_desc)) {
continue;
}
}
auto status = CreateValueFromField(message, field_desc, arena, &value);
if (!status.ok()) {
return status;
}
activation->InsertValue(field_desc->name(), value);
}
return absl::OkStatus();
}
}
}
}
} | #include "eval/public/activation_bind_helper.h"
#include "absl/status/status.h"
#include "eval/public/activation.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using testutil::EqualsProto;
TEST(ActivationBindHelperTest, TestSingleBoolBind) {
TestMessage message;
message.set_bool_value(true);
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation));
auto result = activation.FindValue("bool_value", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsBool());
EXPECT_EQ(value.BoolOrDie(), true);
}
TEST(ActivationBindHelperTest, TestSingleInt32Bind) {
TestMessage message;
message.set_int32_value(42);
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation));
auto result = activation.FindValue("int32_value", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsInt64());
EXPECT_EQ(value.Int64OrDie(), 42);
}
TEST(ActivationBindHelperTest, TestUnsetRepeatedIsEmptyList) {
TestMessage message;
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation));
auto result = activation.FindValue("int32_list", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsList());
EXPECT_TRUE(value.ListOrDie()->empty());
}
TEST(ActivationBindHelperTest, TestSkipUnsetFields) {
TestMessage message;
message.set_int32_value(42);
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation,
ProtoUnsetFieldOptions::kSkip));
auto result = activation.FindValue("int32_value", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsInt64());
EXPECT_EQ(value.Int64OrDie(), 42);
result = activation.FindValue("message_value", &arena);
ASSERT_FALSE(result.has_value());
}
TEST(ActivationBindHelperTest, TestBindDefaultFields) {
TestMessage message;
message.set_int32_value(42);
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK(BindProtoToActivation(&message, &arena, &activation,
ProtoUnsetFieldOptions::kBindDefault));
auto result = activation.FindValue("int32_value", &arena);
ASSERT_TRUE(result.has_value());
CelValue value = result.value();
ASSERT_TRUE(value.IsInt64());
EXPECT_EQ(value.Int64OrDie(), 42);
result = activation.FindValue("message_value", &arena);
ASSERT_TRUE(result.has_value());
EXPECT_NE(nullptr, result->MessageOrDie());
EXPECT_THAT(TestMessage::default_instance(),
EqualsProto(*result->MessageOrDie()));
}
TEST(ActivationBindHelperTest, RejectsNullArena) {
TestMessage message;
message.set_bool_value(true);
Activation activation;
ASSERT_EQ(BindProtoToActivation(&message, nullptr, &activation),
absl::InvalidArgumentError(
"arena must not be null for BindProtoToActivation."));
}
}
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/activation_bind_helper.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/activation_bind_helper_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
ecb37921-f9b0-4f3c-870b-c6cfd0c1c1f1 | cpp | tensorflow/tensorflow | remapper | tensorflow/core/grappler/optimizers/remapper.cc | tensorflow/core/grappler/optimizers/remapper_test.cc | #include "tensorflow/core/grappler/optimizers/remapper.h"
#include <algorithm>
#include <cstdlib>
#include <map>
#include <set>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/costs/graph_properties.h"
#include "tensorflow/core/grappler/graph_view.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/constant_folding.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/pattern_utils.h"
#include "tensorflow/core/grappler/utils/symbolic_shapes.h"
#include "tensorflow/core/grappler/utils/topological_sort.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/protobuf/rewriter_config.pb.h"
#include "tensorflow/core/util/env_var.h"
#include "tensorflow/core/util/use_cudnn.h"
#include "tsl/platform/errors.h"
#ifdef INTEL_MKL
#include "tensorflow/core/util/mkl_heuristics.h"
#endif
#include "tensorflow/core/util/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#endif
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kFusedConv2D[] = "_FusedConv2D";
constexpr char kFusedConv3D[] = "_FusedConv3D";
constexpr char kFusedMatMul[] = "_FusedMatMul";
constexpr char kFusedDepthwiseConv2dNative[] = "_FusedDepthwiseConv2dNative";
constexpr char kFusedBatchNormEx[] = "_FusedBatchNormEx";
constexpr char kFusedBatchNormGradEx[] = "_FusedBatchNormGradEx";
constexpr char kTensorToHashBucket[] = "_TensorToHashBucketFast";
constexpr char kLeakyRelu[] = "LeakyRelu";
constexpr char kMklFusedMish[] = "_MklFusedMish";
constexpr char kRelu[] = "Relu";
constexpr char kRelu6[] = "Relu6";
constexpr char kElu[] = "Elu";
constexpr char kDataFormat[] = "data_format";
constexpr char kIsTraining[] = "is_training";
constexpr char kWidth[] = "width";
constexpr char kFill[] = "fill";
constexpr int kMissingIndex = -1;
struct RemapperContext {
explicit RemapperContext(GrapplerItem* item, Status* status,
RewriterConfig::CpuLayout cpu_layout_conversion,
bool xla_auto_clustering_on,
bool xla_cpu_jit_disable_fusion)
: nodes_to_preserve(item->NodesToPreserve()),
graph_view(&item->graph, status),
graph_properties(*item),
inferred_graph_properties(false),
cpu_layout_conversion(cpu_layout_conversion),
xla_auto_clustering_on(xla_auto_clustering_on),
xla_cpu_jit_disable_fusion(xla_cpu_jit_disable_fusion) {}
std::unordered_set<string> nodes_to_preserve;
utils::MutableGraphView graph_view;
GraphProperties graph_properties;
bool inferred_graph_properties;
RewriterConfig::CpuLayout cpu_layout_conversion;
bool xla_auto_clustering_on;
bool xla_cpu_jit_disable_fusion;
};
struct FusedBatchNorm {
FusedBatchNorm() = default;
explicit FusedBatchNorm(int fused_batch_norm)
: fused_batch_norm(fused_batch_norm) {}
int fused_batch_norm = kMissingIndex;
};
struct FusedBatchNormEx {
FusedBatchNormEx() = default;
int fused_batch_norm = kMissingIndex;
int side_input = kMissingIndex;
int activation = kMissingIndex;
int invalidated = kMissingIndex;
};
struct FusedBatchNormGradEx {
int fused_batch_norm_grad = kMissingIndex;
int activation_grad = kMissingIndex;
int side_input_grad = kMissingIndex;
int fwd_fused_batch_norm = kMissingIndex;
};
struct TensorToHashBucket {
TensorToHashBucket() = default;
explicit TensorToHashBucket(int op1, int op2, int op3)
: pre_as_string(op1), as_string(op2), string_to_hash_bucket(op3) {}
int pre_as_string = kMissingIndex;
int as_string = kMissingIndex;
int string_to_hash_bucket = kMissingIndex;
};
struct PadWithConv3D {
PadWithConv3D() = default;
PadWithConv3D(int contraction_idx, int pad_idx, int padding_const_idx)
: contraction_idx(contraction_idx),
pad_idx(pad_idx),
padding_const_idx(padding_const_idx) {}
int contraction_idx = kMissingIndex;
int pad_idx = kMissingIndex;
int padding_const_idx = kMissingIndex;
};
struct ContractionWithBiasAdd {
ContractionWithBiasAdd() = default;
ContractionWithBiasAdd(int contraction, int bias_add, int bias_port)
: contraction(contraction), bias_add(bias_add), bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int bias_port = 1;
};
struct ContractionWithActivation {
ContractionWithActivation() = default;
ContractionWithActivation(int contraction, int activation)
: contraction(contraction), activation(activation) {}
int contraction = kMissingIndex;
int activation = kMissingIndex;
};
struct ContractionWithBiasAddAndActivation {
ContractionWithBiasAddAndActivation() = default;
ContractionWithBiasAddAndActivation(int contraction, int bias_add,
int activation, int bias_port)
: contraction(contraction),
bias_add(bias_add),
activation(activation),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int activation = kMissingIndex;
int bias_port = 1;
};
struct ContractionWithSqueezeAndBiasAdd {
ContractionWithSqueezeAndBiasAdd() = default;
ContractionWithSqueezeAndBiasAdd(int contraction, int squeeze, int bias_add)
: contraction(contraction), squeeze(squeeze), bias_add(bias_add) {}
int contraction = kMissingIndex;
int squeeze = kMissingIndex;
int bias_add = kMissingIndex;
};
struct ContractionWithBatchNorm {
ContractionWithBatchNorm() = default;
ContractionWithBatchNorm(int contraction, int fused_batch_norm,
float epsilon = 0.0)
: contraction(contraction),
fused_batch_norm(fused_batch_norm),
epsilon(epsilon) {}
int contraction = kMissingIndex;
int fused_batch_norm = kMissingIndex;
float epsilon = 0.0;
};
struct ContractionWithBatchNormAndActivation {
ContractionWithBatchNormAndActivation() = default;
ContractionWithBatchNormAndActivation(int contraction, int fused_batch_norm,
int activation, float epsilon = 0.0)
: contraction(contraction),
fused_batch_norm(fused_batch_norm),
activation(activation),
epsilon(epsilon) {}
int contraction = kMissingIndex;
int fused_batch_norm = kMissingIndex;
int activation = kMissingIndex;
float epsilon = 0.0;
};
struct ContractionWithBiasAddAndAdd {
ContractionWithBiasAddAndAdd() = default;
ContractionWithBiasAddAndAdd(int contraction, int bias_add, int add,
int port_id, int bias_port)
: contraction(contraction),
bias_add(bias_add),
add(add),
port_id(port_id),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int add = kMissingIndex;
int port_id = 0;
int bias_port = 1;
};
struct ContractionWithBiasAndAddActivation {
ContractionWithBiasAndAddActivation() = default;
ContractionWithBiasAndAddActivation(int contraction, int bias_add, int add,
int port_id, int activation,
int bias_port)
: contraction(contraction),
bias_add(bias_add),
add(add),
port_id(port_id),
activation(activation),
bias_port(bias_port) {}
int contraction = kMissingIndex;
int bias_add = kMissingIndex;
int add = kMissingIndex;
int port_id = 0;
int activation = kMissingIndex;
int bias_port = 1;
};
bool IsInPreserveSet(const RemapperContext& ctx, const NodeDef* node) {
return ctx.nodes_to_preserve.count(node->name()) > 0;
}
bool HaveSameDataType(const NodeDef* lhs, const NodeDef* rhs,
const string& type_attr = "T") {
DataType lhs_attr = GetDataTypeFromAttr(*lhs, type_attr);
DataType rhs_attr = GetDataTypeFromAttr(*rhs, type_attr);
return lhs_attr != DT_INVALID && rhs_attr != DT_INVALID &&
lhs_attr == rhs_attr;
}
bool HasDataType(const NodeDef* node, const DataType& expected,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*node, type_attr);
return dtype == expected;
}
bool IsCpuCompatibleDataType(const NodeDef* contraction,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*contraction, type_attr);
bool is_one_dnn_enabled = IsMKLEnabled();
if (is_one_dnn_enabled) {
bool is_supported_matmul = false;
if (IsMatMul(*contraction)) {
is_supported_matmul = (dtype == DT_BFLOAT16)
? contraction->attr().contains("transpose_a") &&
!contraction->attr().at("transpose_a").b()
: true;
}
return ((IsConv2D(*contraction) || IsDepthwiseConv2dNative(*contraction) ||
IsConv3D(*contraction) || IsAnyBatchMatMul(*contraction) ||
is_supported_matmul) &&
IsDataTypeSupportedByOneDNNOnThisCPU(dtype));
}
if (IsConv2D(*contraction)) {
return dtype == DT_FLOAT || dtype == DT_DOUBLE;
} else if (IsMatMul(*contraction)) {
return dtype == DT_FLOAT;
} else {
return false;
}
}
bool IsGpuCompatibleDataType(const NodeDef* contraction,
const string& type_attr = "T") {
DataType dtype = GetDataTypeFromAttr(*contraction, type_attr);
if (IsConv2D(*contraction) || IsMatMul(*contraction)) {
return dtype == DT_FLOAT || dtype == DT_HALF;
} else {
return false;
}
}
bool IsCpuCompatibleDataFormat(const RemapperContext& ctx,
const NodeDef* conv_node) {
const string& data_format = conv_node->attr().at(kDataFormat).s();
if (IsConv2D(*conv_node)) {
return data_format == "NHWC" || (IsMKLEnabled() && data_format == "NCHW") ||
(ctx.cpu_layout_conversion == RewriterConfig::NHWC_TO_NCHW &&
data_format == "NCHW");
} else if (IsConv3D(*conv_node)) {
return data_format == "NDHWC" || (IsMKLEnabled() && data_format == "NCDHW");
} else {
return false;
}
}
bool BlasLtMatmulEnabled() {
static bool is_enabled = [] {
bool is_enabled = false;
TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar(
"TF_USE_CUBLASLT", false, &is_enabled));
return is_enabled;
}();
return is_enabled;
}
bool IsGpuCompatibleDataFormat(const RemapperContext& ctx,
const NodeDef* conv2d) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
const string& data_format = conv2d->attr().at(kDataFormat).s();
return data_format == "NHWC" || data_format == "NCHW";
}
bool IsCpuCompatibleConv2D(const RemapperContext& ctx, const NodeDef* conv2d) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
return NodeIsOnCpu(conv2d) && IsCpuCompatibleDataType(conv2d) &&
IsCpuCompatibleDataFormat(ctx, conv2d);
}
bool IsCpuCompatibleConv3D(const RemapperContext& ctx, const NodeDef* conv3d) {
DCHECK(IsConv3D(*conv3d)) << "Expected Conv3D op";
return NodeIsOnCpu(conv3d) && IsCpuCompatibleDataType(conv3d) &&
IsCpuCompatibleDataFormat(ctx, conv3d);
}
bool IsGpuCompatibleConv2D(const RemapperContext& ctx, const NodeDef* conv2d,
const NodeDef* activation) {
DCHECK(IsConv2D(*conv2d)) << "Expected Conv2D op";
if (IsRelu(*activation)) {
return NodeIsOnGpu(conv2d) && IsGpuCompatibleDataType(conv2d) &&
IsGpuCompatibleDataFormat(ctx, conv2d);
} else if (IsRelu6(*activation) || IsElu(*activation) ||
IsLeakyRelu(*activation)) {
DataType dtype = GetDataTypeFromAttr(*conv2d, "T");
const string& data_format = conv2d->attr().at(kDataFormat).s();
return NodeIsOnGpu(conv2d) && dtype == DT_HALF && data_format == "NHWC";
}
return false;
}
bool IsGpuCompatibleMatMul(const RemapperContext& ctx, const NodeDef* matmul,
const NodeDef* activation) {
DCHECK(IsMatMul(*matmul)) << "Expected MatMul op";
if (activation == nullptr || IsRelu(*activation)) {
return BlasLtMatmulEnabled() && NodeIsOnGpu(matmul) &&
IsGpuCompatibleDataType(matmul);
} else if (IsTanh(*activation) || IsSigmoid(*activation)) {
DataType dtype = GetDataTypeFromAttr(*matmul, "T");
return NodeIsOnGpu(matmul) && dtype == DT_HALF;
}
return false;
}
bool IsCpuCompatibleMatMul(const RemapperContext& ctx, const NodeDef* matmul) {
DCHECK(IsMatMul(*matmul)) << "Expected MatMul op";
return NodeIsOnCpu(matmul) && IsCpuCompatibleDataType(matmul);
}
bool IsCpuCompatibleDepthwiseConv2dNative(const NodeDef* dw_conv2d) {
DCHECK(IsDepthwiseConv2dNative(*dw_conv2d))
<< "Expected DepthwiseConv2dNative op";
return NodeIsOnCpu(dw_conv2d) && IsCpuCompatibleDataType(dw_conv2d);
}
template <typename Pattern>
bool IsCpuCompatible(const RemapperContext& ctx, const Pattern& matched) {
if (ctx.xla_cpu_jit_disable_fusion) return false;
const NodeDef& node = ctx.graph_view.graph()->node(matched.contraction);
if (IsConv2D(node)) {
return IsCpuCompatibleConv2D(ctx, &node);
} else if (IsDepthwiseConv2dNative(node)) {
return (IsMKLEnabled() && IsCpuCompatibleDepthwiseConv2dNative(&node));
} else if (IsMatMul(node)) {
return IsCpuCompatibleMatMul(ctx, &node);
} else if (IsConv3D(node)) {
return (IsMKLEnabled() && IsCpuCompatibleConv3D(ctx, &node));
} else {
return false;
}
}
bool RuntimeFusionEnabled(const Cluster* cluster) {
static bool is_enabled = [&] {
#if CUDNN_VERSION >= 8400
if (!cluster) return false;
auto devices = cluster->GetDevices();
int num_gpus = 0;
int num_ampere = 0;
for (const auto& d : devices) {
if (d.second.type() == "GPU") {
num_gpus++;
auto cc_it = d.second.environment().find("architecture");
if (cc_it != d.second.environment().end()) {
double compute_capability = 0.0;
if (absl::SimpleAtod(cc_it->second, &compute_capability) &&
compute_capability >= 8.0) {
num_ampere++;
}
}
}
}
bool runtime_fusion_enabled = CudnnUseRuntimeFusion() &&
CudnnUseFrontend() && num_gpus > 0 &&
num_gpus == num_ampere;
if (CudnnUseRuntimeFusion() && !runtime_fusion_enabled) {
VLOG(1) << "Enabling Cudnn with runtime compilation requires the "
<< "Cudnn frontend and Ampere GPUs or later, but we got "
<< "Cudnn frontend is "
<< (CudnnUseFrontend() ? "enabled" : "disabled") << " and "
<< num_ampere << " Ampere GPU(s) out of total " << num_gpus
<< " GPU(s)";
}
return runtime_fusion_enabled;
#else
return false;
#endif
}();
return is_enabled;
}
bool IsSupportedActivation(const NodeDef& node, const Cluster* cluster) {
bool is_default_supported =
IsRelu(node) || IsRelu6(node) || IsElu(node) || IsLeakyRelu(node);
bool is_device_specific = (IsMKLEnabled() || RuntimeFusionEnabled(cluster)) &&
(IsTanh(node) || IsSigmoid(node));
return (is_default_supported || is_device_specific);
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithBiasAddAndActivation& matched,
const Cluster* cluster) {
#if TENSORFLOW_USE_ROCM
return false;
#endif
if (ctx.xla_auto_clustering_on) return false;
const GraphDef* graph = ctx.graph_view.graph();
const NodeDef& activation_node = graph->node(matched.activation);
if (!IsSupportedActivation(activation_node, cluster)) return false;
const NodeDef& contraction_node = graph->node(matched.contraction);
if (IsConv2D(contraction_node)) {
const std::vector<OpInfo::TensorProperties>& input_props =
ctx.graph_properties.GetInputProperties(contraction_node.name());
const TensorShapeProto& filter_shape =
input_props.size() >= 2 ? input_props[1].shape() : TensorShapeProto();
bool is_spatial_conv = Rank(filter_shape) == 4 &&
IsKnown(filter_shape.dim(0)) &&
IsKnown(filter_shape.dim(1)) &&
filter_shape.dim(0).size() != 1 &&
filter_shape.dim(1).size() != 1;
bool valid_channels = Rank(filter_shape) == 4 &&
IsKnown(filter_shape.dim(2)) &&
IsKnown(filter_shape.dim(3)) &&
filter_shape.dim(2).size() % 2 == 0 &&
filter_shape.dim(3).size() % 2 == 0;
return is_spatial_conv &&
(IsRelu(activation_node) ||
(RuntimeFusionEnabled(cluster) && valid_channels)) &&
IsGpuCompatibleConv2D(ctx, &contraction_node, &activation_node);
} else if (IsMatMul(contraction_node)) {
const std::vector<OpInfo::TensorProperties>& input_props =
ctx.graph_properties.GetInputProperties(contraction_node.name());
const TensorShapeProto& a_shape =
!input_props.empty() ? input_props[0].shape() : TensorShapeProto();
const TensorShapeProto& b_shape =
!input_props.empty() ? input_props[1].shape() : TensorShapeProto();
bool valid_dims = Rank(a_shape) == 2 && Rank(b_shape) == 2 &&
IsKnown(a_shape.dim(1)) &&
IsKnown(b_shape.dim(1)) &&
a_shape.dim(1).size() % 2 == 0 &&
b_shape.dim(1).size() % 2 == 0;
return (IsRelu(activation_node) ||
(RuntimeFusionEnabled(cluster) && valid_dims)) &&
IsGpuCompatibleMatMul(ctx, &contraction_node, &activation_node);
}
return false;
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithBiasAdd& matched,
const Cluster* cluster) {
#if TENSORFLOW_USE_ROCM && !TF_HIPBLASLT
return false;
#endif
if (ctx.xla_auto_clustering_on) return false;
const GraphDef* graph = ctx.graph_view.graph();
const NodeDef& contraction_node = graph->node(matched.contraction);
if (!IsMatMul(contraction_node)) return false;
return IsGpuCompatibleMatMul(ctx, &contraction_node, nullptr);
}
bool IsGpuCompatible(const RemapperContext& ctx,
const ContractionWithSqueezeAndBiasAdd& matched,
const Cluster* cluster) {
return false;
}
template <typename Pattern>
bool IsDeviceCompatible(const RemapperContext& ctx, Pattern& matched,
Cluster* cluster = nullptr) {
return IsCpuCompatible(ctx, matched) ||
IsGpuCompatible(ctx, matched, cluster);
}
std::string GetActivationName(const std::string& s) {
if (s == kMklFusedMish) {
return "Mish";
} else {
return s;
}
}
inline bool HasControlFaninOrFanout(const utils::MutableNodeView& node_view) {
return node_view.NumControllingFanins() > 0 ||
node_view.NumControlledFanouts() > 0;
}
inline bool HasAtMostOneFanoutAtPort0(const utils::MutableNodeView& node_view) {
return node_view.GetRegularFanout(0).size() <= 1;
}
inline bool HasAtMostOneDataFanoutAtPort0(
const utils::MutableNodeView& node_view) {
const auto predicate = [](const auto& fanout) -> bool {
const NodeDef* node = fanout.node_view()->node();
return !IsShape(*node) && !IsRank(*node);
};
return absl::c_count_if(node_view.GetRegularFanout(0), predicate) <= 1;
}
bool IsConvOrMatMul(const NodeDef& node) {
return IsConv2D(node) || IsDepthwiseConv2dNative(node) || IsMatMul(node) ||
IsConv3D(node);
}
bool IsBiasSemanticAdd(const RemapperContext& ctx,
const utils::MutableNodeView& node_view,
int& bias_port) {
if (!IsMKLEnabled()) return false;
const auto* node_def = node_view.node();
if (!NodeIsOnCpu(node_def)) return false;
if (!IsAdd(*node_def) || node_view.NumRegularFanins() != 2) return false;
const auto& props = ctx.graph_properties.GetInputProperties(node_def->name());
if (props.size() < 2) return false;
const auto& regular_fanin_0 = node_view.GetRegularFanin(0);
const auto* node_view_0 = regular_fanin_0.node_view();
const auto* node_def_0 = node_view_0->node();
const auto& regular_fanin_1 = node_view.GetRegularFanin(1);
const auto* node_view_1 = regular_fanin_1.node_view();
const auto* node_def_1 = node_view_1->node();
if (!IsConvOrMatMul(*node_def_0) && !IsConvOrMatMul(*node_def_1))
return false;
auto is_channel_last_format = [](const NodeDef& node) -> bool {
if (node.attr().contains("data_format")) {
const string data_format = node.attr().at("data_format").s();
return (data_format == "NHWC" || data_format == "NDHWC");
}
return true;
};
if (!is_channel_last_format(*node_def_0) ||
!is_channel_last_format(*node_def_1))
return false;
const TensorShapeProto& prot0_shape = props[0].shape();
const TensorShapeProto& prot1_shape = props[1].shape();
if (prot0_shape.unknown_rank() || prot1_shape.unknown_rank() ||
prot0_shape.dim_size() < 1 || prot1_shape.dim_size() < 1 ||
!IsKnown(prot0_shape.dim(prot0_shape.dim_size() - 1)) ||
!IsKnown(prot1_shape.dim(prot1_shape.dim_size() - 1)))
return false;
const auto is_supported_shape =
[&](const TensorShapeProto& shape,
const TensorShapeProto& bcast_shape) -> bool {
int conv_channel_dim;
conv_channel_dim = shape.dim(shape.dim_size() - 1).size();
if (shape.dim_size() == 4 && bcast_shape.dim_size() > 4) return false;
if (shape.dim_size() == 5 && bcast_shape.dim_size() > 5) return false;
if (shape.dim_size() < 2) return false;
if (conv_channel_dim != bcast_shape.dim(bcast_shape.dim_size() - 1).size())
return false;
for (int i = 0; i < bcast_shape.dim_size() - 1; i++) {
if (1 != bcast_shape.dim(i).size()) return false;
}
return true;
};
if (ShapesSymbolicallyEqual(prot0_shape, prot1_shape) ||
!ShapesBroadcastable(prot0_shape, prot1_shape))
return false;
if (IsConvOrMatMul(*node_def_0)) {
bias_port = 1;
return (is_supported_shape(prot0_shape, prot1_shape));
} else if (IsConvOrMatMul(*node_def_1)) {
bias_port = 0;
return (is_supported_shape(prot1_shape, prot0_shape));
}
return false;
}
void AddInputShapesAttr(const RemapperContext& ctx, int node_index) {
auto mutable_node = ctx.graph_view.graph()->mutable_node(node_index);
AttrValue attr_input_shape;
auto tensor_properties =
ctx.graph_properties.GetInputProperties(mutable_node->name());
for (const auto& tensor_property : tensor_properties) {
TensorShapeProto* proto = attr_input_shape.mutable_list()->add_shape();
*proto = tensor_property.shape();
}
if (IsMKLEnabled() && !tensor_properties.empty()) {
(*mutable_node->mutable_attr())["_input_shapes"] =
std::move(attr_input_shape);
}
}
bool FindContractionWithBias(const RemapperContext& ctx, int node_index,
ContractionWithBiasAdd* matched,
bool check_device_compatible = true) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
int bias_port = 1;
if (!IsBiasAdd(*node_def) && !IsBiasSemanticAdd(ctx, *node_view, bias_port))
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(1 - bias_port);
const auto* contraction_node_view = regular_fanin_0.node_view();
const auto* contraction_node_def = contraction_node_view->node();
bool is_contraction = IsConv2D(*contraction_node_def) ||
(IsConv3D(*contraction_node_def) && IsMKLEnabled()) ||
IsMatMul(*contraction_node_def) ||
IsDepthwiseConv2dNative(*contraction_node_def);
#ifdef DNNL_AARCH64_USE_ACL
if (IsDepthwiseConv2dNative(*contraction_node_def)) is_contraction = false;
#endif
if (!is_contraction || !HaveSameDataType(node_def, contraction_node_def) ||
HasControlFaninOrFanout(*contraction_node_view) ||
!HasAtMostOneFanoutAtPort0(*contraction_node_view) ||
IsInPreserveSet(ctx, contraction_node_def))
return false;
const ContractionWithBiasAdd pattern{contraction_node_view->node_index(),
node_index, bias_port};
if (check_device_compatible && !IsDeviceCompatible(ctx, pattern))
return false;
*matched = pattern;
return true;
}
bool FindFusedConvWithFusedActivation(const RemapperContext& ctx,
int node_index,
ContractionWithActivation* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!NodeIsOnCpu(node_def) && !IsMKLEnabled()) return false;
if (!IsLeakyRelu(*node_def) && !IsMklFusedMish(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* contraction_node_view = regular_fanin_0.node_view();
const auto* contraction_node_def = contraction_node_view->node();
if (!(contraction_node_def->op() == kFusedConv2D ||
contraction_node_def->op() == kFusedConv3D))
return false;
auto contraction_fused_ops_list =
contraction_node_def->attr().at("fused_ops").list().s();
for (auto it = contraction_fused_ops_list.begin();
it != contraction_fused_ops_list.end(); it++) {
if (*it == kLeakyRelu || *it == kMklFusedMish || *it == kRelu ||
*it == kRelu6 || *it == kElu) {
return false;
}
}
const ContractionWithActivation pattern{contraction_node_view->node_index(),
node_view->node_index()};
*matched = pattern;
return true;
}
bool FindContractionWithBiasAndActivation(
const RemapperContext& ctx, Cluster* cluster, int node_index,
ContractionWithBiasAddAndActivation* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!IsSupportedActivation(*node_def, cluster)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* bias_add_node_view = regular_fanin_0.node_view();
const auto* bias_add_node_def = bias_add_node_view->node();
ContractionWithBiasAdd base;
if (!FindContractionWithBias(ctx, bias_add_node_view->node_index(), &base,
false) ||
!HasAtMostOneFanoutAtPort0(*bias_add_node_view) ||
!HaveSameDataType(node_def, bias_add_node_def) ||
IsInPreserveSet(ctx, bias_add_node_def))
return false;
const auto* contraction_node_view =
bias_add_node_view->GetRegularFanin(1 - base.bias_port).node_view();
const auto* contraction_node_def = contraction_node_view->node();
if (!IsMatMul(*contraction_node_def) &&
(IsTanh(*node_def) || IsSigmoid(*node_def)))
return false;
if (!(IsConv2D(*contraction_node_def) || IsMatMul(*contraction_node_def) ||
(IsConv3D(*contraction_node_def) && IsMKLEnabled())) &&
IsLeakyRelu(*node_def))
return false;
const ContractionWithBiasAddAndActivation pattern{
base.contraction, base.bias_add, node_index, base.bias_port};
if (!IsDeviceCompatible(ctx, pattern, cluster)) return false;
*matched = pattern;
return true;
}
bool FindConvWithSqueezeAndBias(const RemapperContext& ctx, int node_index,
ContractionWithSqueezeAndBiasAdd* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!IsBiasAdd(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* squeeze_node_view = regular_fanin_0.node_view();
const auto* squeeze_node_def = squeeze_node_view->node();
if (!IsSqueeze(*squeeze_node_def) ||
!HaveSameDataType(node_def, squeeze_node_def, "T") ||
HasControlFaninOrFanout(*squeeze_node_view) ||
!HasAtMostOneFanoutAtPort0(*squeeze_node_view) ||
IsInPreserveSet(ctx, squeeze_node_def))
return false;
if (squeeze_node_view->NumRegularFanins() < 1) return false;
const auto& squeeze_regular_fanin_0 = squeeze_node_view->GetRegularFanin(0);
const auto* conv_node_view = squeeze_regular_fanin_0.node_view();
const auto* conv_node_def = conv_node_view->node();
if (!(IsConv2D(*conv_node_def) ||
(IsConv3D(*conv_node_def) && IsMKLEnabled())) ||
!HaveSameDataType(node_def, conv_node_def, "T") ||
HasControlFaninOrFanout(*conv_node_view) ||
!HasAtMostOneFanoutAtPort0(*conv_node_view) ||
IsInPreserveSet(ctx, conv_node_def))
return false;
std::vector<int32> dims;
if (!TryGetNodeAttr(*squeeze_node_def, "squeeze_dims", &dims)) return false;
for (auto dim : dims) {
if ((dim == 3 && IsConv2D(*conv_node_def)) ||
(dim == 4 && IsConv3D(*conv_node_def)))
return false;
}
const ContractionWithSqueezeAndBiasAdd pattern{
conv_node_view->node_index(), squeeze_node_view->node_index(),
node_index};
if (!IsDeviceCompatible(ctx, pattern)) return false;
*matched = pattern;
return true;
}
bool FindConv2DWithBatchNorm(const RemapperContext& ctx, int node_index,
ContractionWithBatchNorm* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (!IsFusedBatchNorm(*node_def)) return false;
bool dtypeU_is_float = HasDataType(node_def, DT_FLOAT, "U");
bool dtypeT_is_bf16 = HasDataType(node_def, DT_BFLOAT16, "T");
bool dtypeT_is_mkl_fp16 =
IsMKLEnabled() && HasDataType(node_def, DT_HALF, "T");
if (node_view->GetOp() != "FusedBatchNorm" &&
(!dtypeU_is_float || dtypeT_is_bf16 || dtypeT_is_mkl_fp16)) {
return false;
}
const auto* training_attr = node_view->GetAttr(kIsTraining);
if (training_attr != nullptr && training_attr->b()) return false;
if (HasControlFaninOrFanout(*node_view) ||
!node_view->GetRegularFanout(1).empty() ||
!node_view->GetRegularFanout(2).empty() ||
!node_view->GetRegularFanout(3).empty() ||
!node_view->GetRegularFanout(4).empty())
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* conv2d_node_view = regular_fanin_0.node_view();
const auto* conv2d_node_def = conv2d_node_view->node();
if (NodeIsOnCpu(conv2d_node_def) && ctx.xla_cpu_jit_disable_fusion) {
return false;
}
if (!IsConv2D(*conv2d_node_def) || !NodeIsOnCpu(conv2d_node_def) ||
!HaveSameDataType(node_def, conv2d_node_def) ||
!IsCpuCompatibleDataType(conv2d_node_def) ||
!IsCpuCompatibleDataFormat(ctx, conv2d_node_def) ||
HasControlFaninOrFanout(*conv2d_node_view) ||
!HasAtMostOneFanoutAtPort0(*conv2d_node_view) ||
IsInPreserveSet(ctx, conv2d_node_def))
return false;
matched->contraction = conv2d_node_view->node_index();
matched->fused_batch_norm = node_index;
if (!TryGetNodeAttr(*node_def, "epsilon", &matched->epsilon)) return false;
return true;
}
bool FindConv2DWithBatchNormAndActivation(
const RemapperContext& ctx, int node_index,
ContractionWithBatchNormAndActivation* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!IsSupportedActivation(*node_def, nullptr)) return false;
if (IsSigmoid(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* batch_norm_node_view = regular_fanin_0.node_view();
ContractionWithBatchNorm base;
if (!FindConv2DWithBatchNorm(ctx, batch_norm_node_view->node_index(), &base))
return false;
const auto* fused_batch_norm_node_view =
ctx.graph_view.GetNode(base.fused_batch_norm);
const auto* fused_batch_norm_node_def = fused_batch_norm_node_view->node();
if (!HasAtMostOneFanoutAtPort0(*fused_batch_norm_node_view) ||
!HaveSameDataType(node_def, fused_batch_norm_node_def) ||
IsInPreserveSet(ctx, fused_batch_norm_node_def))
return false;
matched->contraction = base.contraction;
matched->fused_batch_norm = base.fused_batch_norm;
matched->activation = node_index;
matched->epsilon = base.epsilon;
return true;
}
bool FindContractionWithBiasInPort(const RemapperContext& ctx,
const utils::MutableNodeView& add_node_view,
const NodeDef& add_node_def, int port_id,
ContractionWithBiasAdd* base,
const int allowed_fanouts = 1) {
if (add_node_view.NumRegularFanins() < port_id + 1) return false;
const auto& bias_add_node_view =
add_node_view.GetRegularFanin(port_id).node_view();
if (bias_add_node_view == nullptr) return false;
const auto* bias_add_node_def = bias_add_node_view->node();
if (!FindContractionWithBias(ctx, bias_add_node_view->node_index(), base,
false))
return false;
if (bias_add_node_view->GetRegularFanout(0).size() > allowed_fanouts ||
!HaveSameDataType(&add_node_def, bias_add_node_def) ||
IsInPreserveSet(ctx, bias_add_node_def))
return false;
return true;
}
bool IsAddWithNoBroadcast(const RemapperContext& ctx, const NodeDef& node) {
if (!IsAdd(node)) return false;
const auto& props = ctx.graph_properties.GetInputProperties(node.name());
if (props.size() == 2 &&
ShapesSymbolicallyEqual(props[0].shape(), props[1].shape())) {
return true;
}
return false;
}
bool FindPadWithConv3D(const RemapperContext& ctx, int node_index,
PadWithConv3D* matched) {
if (!IsMKLEnabled()) return false;
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (!NodeIsOnCpu(node_def)) return false;
if (!(IsConv3D(*node_def) || node_def->op() == kFusedConv3D)) return false;
if (!(HasDataType(node_def, DT_FLOAT) || HasDataType(node_def, DT_BFLOAT16) ||
HasDataType(node_def, DT_HALF)))
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* pad_node_view = regular_fanin_0.node_view();
const auto* pad_node_def = pad_node_view->node();
const auto& padding_const = pad_node_view->GetRegularFanin(1);
const auto* padding_const_node_view = padding_const.node_view();
if (!(pad_node_def->op() == "Pad") ||
!HaveSameDataType(node_def, pad_node_def))
return false;
const PadWithConv3D pattern{node_view->node_index(),
pad_node_view->node_index(),
padding_const_node_view->node_index()};
*matched = pattern;
return true;
}
bool FindContractionWithBiasAddAndAdd(const RemapperContext& ctx,
const utils::MutableNodeView& node_view,
ContractionWithBiasAddAndAdd* matched) {
if (HasControlFaninOrFanout(node_view) || node_view.NumRegularFanins() != 2)
return false;
const auto* node_def = node_view.node();
if (!IsAddN(*node_def) && !IsAddWithNoBroadcast(ctx, *node_def)) return false;
if (!NodeIsOnCpu(node_def)) return false;
if (!(HasDataType(node_def, DT_FLOAT) || HasDataType(node_def, DT_BFLOAT16) ||
HasDataType(node_def, DT_HALF)))
return false;
ContractionWithBiasAdd base;
matched->port_id = 0;
if (!FindContractionWithBiasInPort(ctx, node_view, *node_def,
matched->port_id, &base)) {
matched->port_id = 1;
if (!FindContractionWithBiasInPort(ctx, node_view, *node_def,
matched->port_id, &base)) {
return false;
}
}
matched->contraction = base.contraction;
matched->bias_add = base.bias_add;
matched->add = node_view.node_index();
matched->bias_port = base.bias_port;
return true;
}
bool FindContractionWithBiasAddAndAdd(const RemapperContext& ctx,
int node_index,
ContractionWithBiasAddAndAdd* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
return FindContractionWithBiasAddAndAdd(ctx, *node_view, matched);
}
bool FindContractionWithBiasAndAddActivation(
const RemapperContext& ctx, int node_index,
ContractionWithBiasAndAddActivation* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (node_def == nullptr) return false;
if (!IsSupportedActivation(*node_def, nullptr)) return false;
if (!NodeIsOnCpu(node_def)) return false;
if (IsTanh(*node_def)) return false;
if (IsSigmoid(*node_def)) return false;
if (!(HasDataType(node_def, DT_FLOAT) || HasDataType(node_def, DT_BFLOAT16) ||
HasDataType(node_def, DT_HALF)))
return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* add_node_view = regular_fanin_0.node_view();
ContractionWithBiasAddAndAdd base;
if (!FindContractionWithBiasAddAndAdd(ctx, *add_node_view, &base)) {
return false;
}
const auto* bias_add_node_view =
add_node_view->GetRegularFanin(base.port_id).node_view();
const auto* contraction_node_view =
bias_add_node_view->GetRegularFanin(0).node_view();
const auto* contraction_node_def = contraction_node_view->node();
if (!(IsConv2D(*contraction_node_def) || IsConv3D(*contraction_node_def)) &&
IsLeakyRelu(*node_def))
return false;
if (IsConv3D(*contraction_node_def) && !IsMKLEnabled()) return false;
const ContractionWithBiasAndAddActivation pattern{
base.contraction, base.bias_add, base.add,
base.port_id, node_index, base.bias_port};
*matched = pattern;
return true;
}
bool FindConv2DSwish(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern conv2dbiasaddswish_pattern{
"Mul", "mulToswish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "BiasAdd", "biasadd", NodeStatus::kRemove,
{
{ "Conv2D", "conv", NodeStatus::kRemove},
{ "*", "bias", NodeStatus::kRemain}
}
}
}
},
{ "BiasAdd", "biasadd", NodeStatus::kRemove}
}
};
utils::OpTypePattern conv2dbatchnormswish_pattern{
"Mul", "mulToswish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "FusedBatchNorm", "fusebatchnorm", NodeStatus::kRemove,
{
{ "Conv2D", "conv", NodeStatus::kRemove},
{ "*", "scale", NodeStatus::kRemain},
{ "*", "offset", NodeStatus::kRemain},
{ "*", "mean", NodeStatus::kRemain},
{ "*", "var", NodeStatus::kRemain}
}
}
}
},
{ "FusedBatchNorm", "fusebatchnorm", NodeStatus::kRemove}
}
};
utils::OpTypePattern conv2dbatchnormv2swish_pattern{
"Mul", "mulToswish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "FusedBatchNormV2", "fusebatchnorm", NodeStatus::kRemove,
{
{ "Conv2D", "conv", NodeStatus::kRemove},
{ "*", "scale", NodeStatus::kRemain},
{ "*", "offset", NodeStatus::kRemain},
{ "*", "mean", NodeStatus::kRemain},
{ "*", "var", NodeStatus::kRemain}
}
}
}
},
{ "FusedBatchNormV2", "fusebatchnorm", NodeStatus::kRemove}
}
};
utils::OpTypePattern conv2dbatchnormv3swish_pattern{
"Mul", "mulToswish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "FusedBatchNormV3", "fusebatchnorm", NodeStatus::kRemove,
{
{ "Conv2D", "conv", NodeStatus::kRemove},
{ "*", "scale", NodeStatus::kRemain},
{ "*", "offset", NodeStatus::kRemain},
{ "*", "mean", NodeStatus::kRemain},
{ "*", "var", NodeStatus::kRemain}
}
}
}
},
{ "FusedBatchNormV3", "fusebatchnorm", NodeStatus::kRemove}
}
};
auto* mul_node_def = ctx->graph_view.GetNode(node_index)->node();
if (!(HasDataType(mul_node_def, DT_FLOAT) ||
HasDataType(mul_node_def, DT_HALF) ||
HasDataType(mul_node_def, DT_BFLOAT16)))
return false;
if (!NodeIsOnCpu(mul_node_def)) return false;
bool found_op_type_match = false;
bool is_biasadd_pattern = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
conv2dbiasaddswish_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
is_biasadd_pattern = found_op_type_match;
if (!found_op_type_match) {
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
conv2dbatchnormswish_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
}
if (!found_op_type_match) {
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
conv2dbatchnormv2swish_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
}
if (!found_op_type_match) {
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
conv2dbatchnormv3swish_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
}
if (found_op_type_match) {
NodeDef* conv2d_node =
ctx->graph_view.GetNode(matched_nodes_map->at("conv"))->node();
if (!IsCpuCompatibleConv2D(*ctx, conv2d_node)) return false;
if (!is_biasadd_pattern) {
NodeDef* fusedbatchnorm_node =
ctx->graph_view.GetNode(matched_nodes_map->at("fusebatchnorm"))
->node();
bool is_training = true;
if (!TryGetNodeAttr(*fusedbatchnorm_node, kIsTraining, &is_training) ||
is_training)
return false;
if (fusedbatchnorm_node->op() != "FusedBatchNorm" &&
(!HasDataType(fusedbatchnorm_node, DT_FLOAT, "U") ||
(HasDataType(fusedbatchnorm_node, DT_FLOAT, "U") &&
!HasDataType(fusedbatchnorm_node, DT_FLOAT, "T")))) {
return false;
}
}
}
return found_op_type_match;
}
inline bool VerifyConstants(RemapperContext* ctx,
std::map<string, int>* nodes_map,
std::map<string, float>* values_map) {
using utils::MutableNodeView;
for (auto it = values_map->begin(); it != values_map->end(); ++it) {
int node_idx = nodes_map->at(it->first);
MutableNodeView* node_view = ctx->graph_view.GetNode(node_idx);
NodeDef* node_def = node_view->node();
Tensor const_tensor;
if (node_def != nullptr && node_def->op() == "Cast") {
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* regular_node_view = regular_fanin_0.node_view();
node_def = regular_node_view->node();
}
if (node_def == nullptr || node_def->op() != "Const" ||
!const_tensor.FromProto(node_def->attr().at("value").tensor()) ||
const_tensor.NumElements() != 1) {
return false;
}
DataType dtype = const_tensor.dtype();
float const_value;
if (dtype == DT_FLOAT) {
const_value = const_tensor.flat<float>()(0);
} else if (dtype == DT_BFLOAT16) {
const_value = static_cast<float>(const_tensor.flat<bfloat16>()(0));
} else if (dtype == DT_HALF) {
const_value = static_cast<float>(const_tensor.flat<Eigen::half>()(0));
} else {
return false;
}
if (std::abs(const_value - it->second) > 1e-2) return false;
}
return true;
}
bool IsMatchedMatMulBiasAddAndGeluExact(
RemapperContext& ctx, int node_index,
std::map<string, int>* matched_nodes_map = nullptr,
std::set<int>* remove_node_indices = nullptr) {
auto* node_view = ctx.graph_view.GetNode(node_index);
using utils::MatchingDirection;
using utils::NodeStatus;
static utils::OpTypePattern* gelu_exact_pattern = new utils::OpTypePattern
{"Mul", "output", NodeStatus::kReplace,
{
{"Mul", "erf_plus_one_times_one_half", NodeStatus::kRemove,
{
{"Add|AddV2", "erf_plus_one", NodeStatus::kRemove,
{
{"Erf", "erf", NodeStatus::kRemove,
{
{"Mul", "bias_add_x_sqrt_one_half",
NodeStatus::kRemove,
{
{"BiasAdd", "bias_add", NodeStatus::kRemove},
{"Cast|Const", "sqrt_one_half", NodeStatus::kRemain}
}
}
}
},
{"Cast|Const", "one", NodeStatus::kRemain}
}
},
{"Cast|Const", "one_half", NodeStatus::kRemain}
}
},
{"BiasAdd", "bias_add", NodeStatus::kRemove,
{
{"MatMul", "matmul", NodeStatus::kRemove},
{"*", "bias", NodeStatus::kRemain}
}
}
}
};
static utils::OpTypePattern* gelu_exact_pattern2 = new utils::OpTypePattern
{"Mul", "output", NodeStatus::kReplace,
{
{"Add|AddV2", "erf_plus_one", NodeStatus::kRemove,
{
{"Erf", "erf", NodeStatus::kRemove,
{
{"Mul", "bias_add_x_sqrt_one_half", NodeStatus::kRemove,
{
{"BiasAdd", "bias_add", NodeStatus::kRemove},
{"Cast|Const", "sqrt_one_half", NodeStatus::kRemain}
}
}
}
},
{"Cast|Const", "one", NodeStatus::kRemain}
}
},
{"Mul", "erf_plus_one_times_one_half", NodeStatus::kRemove,
{
{"BiasAdd", "bias_add", NodeStatus::kRemove,
{
{"MatMul", "matmul", NodeStatus::kRemove},
{"*", "bias", NodeStatus::kRemain}
}
},
{"Cast|Const", "one_half", NodeStatus::kRemain}
}
}
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx.graph_view));
std::map<string, int> dummy_matched_nodes_map;
std::set<int> dummy_remove_node_indices;
if (!matched_nodes_map) matched_nodes_map = &dummy_matched_nodes_map;
if (!remove_node_indices) remove_node_indices = &dummy_remove_node_indices;
if (graph_matcher.GetMatchedNodes(*gelu_exact_pattern, ctx.nodes_to_preserve,
node_view, matched_nodes_map,
remove_node_indices)) {
return true;
}
matched_nodes_map->clear();
remove_node_indices->clear();
return graph_matcher.GetMatchedNodes(*gelu_exact_pattern2,
ctx.nodes_to_preserve, node_view,
matched_nodes_map, remove_node_indices);
}
bool FindMatMulBiasAddAndGelu(RemapperContext* ctx, int node_index,
const Cluster* cluster,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices,
bool* is_gelu_approximate) {
if (!IsMKLEnabled() && !BlasLtMatmulEnabled() &&
!RuntimeFusionEnabled(cluster))
return false;
using utils::MatchingDirection;
using utils::NodeStatus;
bool found_gelu_exact = false;
bool found_gelu_approximate = false;
matched_nodes_map->clear();
remove_node_indices->clear();
found_gelu_exact = IsMatchedMatMulBiasAddAndGeluExact(
*ctx, node_index, matched_nodes_map, remove_node_indices);
if (!found_gelu_exact) {
utils::OpTypePattern subgraph_gpu =
{"Mul", "mul", NodeStatus::kRemove,
{
{"Pow", "pow", NodeStatus::kRemove,
{
{"_FusedMatMul", "matmul", NodeStatus::kRemove},
{"Const", "three", NodeStatus::kRemain}
}
},
{"Const", "empirical_const", NodeStatus::kRemain}
}
};
utils::OpTypePattern subgraph_cpu =
{"Mul", "mul", NodeStatus::kRemove,
{
{"Mul", "empirical_const_times_matmul", NodeStatus::kRemove,
{
{"Const", "empirical_const", NodeStatus::kRemain},
{"_FusedMatMul", "matmul", NodeStatus::kRemove}
}
},
{"Square", "square", NodeStatus::kRemove,
{
{"_FusedMatMul", "matmul", NodeStatus::kRemove}
}
}
}
};
utils::MutableNodeView* node_view = ctx->graph_view.GetNode(node_index);
const NodeDef* node_def = node_view->node();
bool root_on_gpu = NodeIsOnGpu(node_def);
utils::OpTypePattern* subgraph_pattern =
root_on_gpu ? &subgraph_gpu : &subgraph_cpu;
utils::OpTypePattern gelu_approximate_pattern =
{"Mul", "output", NodeStatus::kReplace,
{
{"Mul", "tanh_plus_one_times_one_half", NodeStatus::kRemove,
{
{"AddV2", "tanh_plus_one", NodeStatus::kRemove,
{
{"Tanh", "tanh", NodeStatus::kRemove,
{
{"Mul", "matmul_plus_mul_times_square_root_two_over_pi", NodeStatus::kRemove,
{
{"AddV2", "matmul_plus_mul", NodeStatus::kRemove,
{
{"_FusedMatMul", "matmul", NodeStatus::kRemove},
*subgraph_pattern
}
},
{"Const", "square_root_two_over_pi", NodeStatus::kRemain}
}
}
}
},
{"Const", "one", NodeStatus::kRemain}
}
},
{"Const", "one_half", NodeStatus::kRemain}
}
},
{"_FusedMatMul", "matmul", NodeStatus::kRemove}
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_gelu_approximate = graph_matcher.GetMatchedNodes(
gelu_approximate_pattern, ctx->nodes_to_preserve, node_view,
matched_nodes_map, remove_node_indices);
}
if (found_gelu_exact) {
NodeDef* matmul_node =
ctx->graph_view.GetNode(matched_nodes_map->at("matmul"))->node();
if (NodeIsOnCpu(matmul_node) && ctx->xla_cpu_jit_disable_fusion) {
return false;
}
DataType matmul_dtype = GetDataTypeFromAttr(*matmul_node, "T");
bool cpu_ok = IsMKLEnabled() && IsCpuCompatibleMatMul(*ctx, matmul_node);
cpu_ok = cpu_ok && matmul_node->attr().contains("transpose_a") &&
!matmul_node->attr().at("transpose_a").b();
bool gpu_ok = NodeIsOnGpu(matmul_node) && RuntimeFusionEnabled(cluster) &&
matmul_dtype == DT_HALF;
if (!cpu_ok && !gpu_ok) return false;
if (gpu_ok) {
const std::vector<OpInfo::TensorProperties>& input_props =
ctx->graph_properties.GetInputProperties(matmul_node->name());
const TensorShapeProto& a_shape =
!input_props.empty() ? input_props[0].shape() : TensorShapeProto();
const TensorShapeProto& b_shape =
!input_props.empty() ? input_props[1].shape() : TensorShapeProto();
bool valid_dims = Rank(a_shape) == 2 && Rank(b_shape) == 2 &&
IsKnown(a_shape.dim(1)) &&
IsKnown(b_shape.dim(1)) &&
a_shape.dim(1).size() % 2 == 0 &&
b_shape.dim(1).size() % 2 == 0;
if (!valid_dims) return false;
}
std::map<string, float> values_map = {
{"sqrt_one_half", 0.707106}, {"one", 1.0}, {"one_half", 0.5}};
if (!VerifyConstants(ctx, matched_nodes_map, &values_map)) return false;
} else if (found_gelu_approximate) {
NodeDef* matmul_node =
ctx->graph_view.GetNode(matched_nodes_map->at("matmul"))->node();
if (NodeIsOnCpu(matmul_node) && ctx->xla_cpu_jit_disable_fusion) {
return false;
}
if (!IsMKLEnabled() && !NodeIsOnGpu(matmul_node)) return false;
if (NodeIsOnCpu(matmul_node) &&
matmul_node->attr().contains("transpose_a") &&
matmul_node->attr().at("transpose_a").b()) {
return false;
}
auto fused_ops = matmul_node->attr().at("fused_ops").list().s();
if (fused_ops.size() == 1) {
if (fused_ops.at(0) != "BiasAdd") return false;
} else {
return false;
}
std::map<string, float> values_map = {{"square_root_two_over_pi", 0.797884},
{"one", 1.0},
{"one_half", 0.5},
{"empirical_const", 0.044715}};
if (NodeIsOnGpu(matmul_node)) {
values_map["three"] = 3.0;
}
if (!VerifyConstants(ctx, matched_nodes_map, &values_map)) return false;
} else {
return false;
}
*is_gelu_approximate = found_gelu_approximate ? true : false;
return (found_gelu_exact || found_gelu_approximate);
}
bool FindMulAndMaximum(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices, float* alpha) {
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern mulmax_pattern{
"Maximum", "max_to_leakyrelu", NodeStatus::kReplace,
{
{ "Mul", "mul", NodeStatus::kRemove,
{
{ "*", "input", NodeStatus::kRemain},
{ "Const|Cast", "alpha", NodeStatus::kRemain}
}
},
{ "*", "input", NodeStatus::kRemain}
}
};
auto* max_node_def = ctx->graph_view.GetNode(node_index)->node();
if (!HasDataType(max_node_def, DT_HALF) &&
!HasDataType(max_node_def, DT_BFLOAT16) &&
!HasDataType(max_node_def, DT_FLOAT) &&
!HasDataType(max_node_def, DT_DOUBLE))
return false;
if (!NodeIsOnCpu(max_node_def) && !IsMKLEnabled()) return false;
bool found_op_type_match = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
mulmax_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) {
const auto* alpha_node_view =
ctx->graph_view.GetNode(matched_nodes_map->at("alpha"));
const auto* alpha_node_def = alpha_node_view->node();
if (alpha_node_def != nullptr && alpha_node_def->op() == "Cast") {
const auto& regular_fanin_0 = alpha_node_view->GetRegularFanin(0);
const auto* regular_node_view = regular_fanin_0.node_view();
alpha_node_def = regular_node_view->node();
}
Tensor alpha_tensor;
if (alpha_node_def == nullptr || alpha_node_def->op() != "Const" ||
!alpha_tensor.FromProto(alpha_node_def->attr().at("value").tensor()) ||
alpha_tensor.NumElements() != 1) {
return false;
}
DataType dtype = alpha_tensor.dtype();
float alpha_val;
if (dtype == DT_FLOAT) {
alpha_val = alpha_tensor.flat<float>()(0);
} else if (dtype == DT_BFLOAT16) {
alpha_val = static_cast<float>(alpha_tensor.flat<bfloat16>()(0));
} else if (dtype == DT_HALF) {
alpha_val = static_cast<float>(alpha_tensor.flat<Eigen::half>()(0));
} else {
return false;
}
if (alpha_val < 0) return false;
*alpha = alpha_val;
}
return found_op_type_match;
}
bool FindSigmoidAndMul(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern sigmoidmul_pattern{
"Mul", "mul_to_swish", NodeStatus::kReplace,
{
{ "Sigmoid", "sigmoid", NodeStatus::kRemove,
{
{ "*", "input", NodeStatus::kRemain}
}
},
{ "*", "input", NodeStatus::kRemain}
}
};
auto* mul_node_def = ctx->graph_view.GetNode(node_index)->node();
if (!(HasDataType(mul_node_def, DT_FLOAT) ||
HasDataType(mul_node_def, DT_HALF) ||
HasDataType(mul_node_def, DT_BFLOAT16)))
return false;
if (!NodeIsOnCpu(mul_node_def)) return false;
bool found_op_type_match = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
sigmoidmul_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) {
NodeDef* matched_sigmoid_node =
ctx->graph_view.GetNode(matched_nodes_map->at("sigmoid"))->node();
auto in_tensor_sigmoid = matched_sigmoid_node->input(0);
if ((mul_node_def->input(0) != in_tensor_sigmoid) &&
(mul_node_def->input(1) != in_tensor_sigmoid)) {
found_op_type_match = false;
}
}
return found_op_type_match;
}
bool IsCommonNormPattern(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern subgraph_pattern =
{"Rsqrt", "rsqrt", NodeStatus::kRemove,
{
{"AddV2|Add", "add", NodeStatus::kRemove,
{
{"Mean", "mean0", NodeStatus::kRemove,
{
{"SquaredDifference", "squareddiff", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Mean", "mean1", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Const", "r_indices1", NodeStatus::kRemain}
}
}
}
},
{"Const", "r_indices0", NodeStatus::kRemain}
}
},
{"Const", "epsilon", NodeStatus::kRemain}
}
}
}
};
utils::OpTypePattern common_norm_pattern =
{"AddV2|Add", "output", NodeStatus::kReplace,
{
{"Mul", "mul0", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Mul", "mul1", NodeStatus::kRemove,
{
subgraph_pattern,
{"Const", "gamma", NodeStatus::kRemain}
}
}
}
},
{"Sub", "sub0", NodeStatus::kRemove,
{
{"Const", "beta", NodeStatus::kRemain},
{"Mul", "mul2", NodeStatus::kRemove,
{
{"Mul", "mul1", NodeStatus::kRemove},
{"Mean", "mean1", NodeStatus::kRemove}
}
},
}
}
}
};
utils::OpTypePattern common_norm_pattern_1 =
{"AddV2|Add", "output", NodeStatus::kReplace,
{
{"Mul", "mul0", NodeStatus::kRemove,
{
{"Mul", "mul1", NodeStatus::kRemove,
{
{"Sub", "sub0", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Mean", "mean1", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Const", "r_indices1", NodeStatus::kRemain}
}
}
}
},
subgraph_pattern
}
},
{"*", "gamma", NodeStatus::kRemain}
}
},
{"*", "beta", NodeStatus::kRemain},
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
bool found_op_type_match =
graph_matcher.GetMatchedNodes(common_norm_pattern, {},
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices) ||
graph_matcher.GetMatchedNodes(common_norm_pattern_1, {},
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
return found_op_type_match;
}
bool FindMklLayerNorm(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices,
std::vector<string>* input_node_names, float* epsilon) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern layer_norm_pattern =
{"AddV2", "output", NodeStatus::kReplace,
{
{"*", "beta", NodeStatus::kRemain},
{"Mul", "scale", NodeStatus::kRemove,
{
{"Reshape", "post_reshape", NodeStatus::kRemove,
{
{"FusedBatchNormV3", "fused_batch_norm", NodeStatus::kRemove,
{
{"Reshape", "pre_reshape", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"*", "pre_shape", NodeStatus::kRemain}
}
},
{"Fill", "fill_scale", NodeStatus::kRemove,
{
{"*", "dims_fill_scale", NodeStatus::kRemain},
{"Const", "unit_gamma", NodeStatus::kRemain}
}
},
{"Fill", "fill_offset", NodeStatus::kRemove,
{
{"*", "dims_fill_offset", NodeStatus::kRemain},
{"Const", "zero_beta", NodeStatus::kRemain}
}
},
{"Const", "empty", NodeStatus::kRemain},
{"Const", "empty", NodeStatus::kRemain}
}
},
{"*", "post_shape", NodeStatus::kRemain}
}
},
{"*", "gamma", NodeStatus::kRemain}
}
}
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
bool found_op_type_match = false;
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match =
graph_matcher.GetMatchedNodes(layer_norm_pattern, ctx->nodes_to_preserve,
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (!found_op_type_match) {
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = IsCommonNormPattern(
ctx, node_index, matched_nodes_map, remove_node_indices);
}
if (found_op_type_match) {
if (!ctx->inferred_graph_properties) {
Status s = ctx->graph_properties.InferStatically(
true,
false,
true,
true);
if (!s.ok()) return false;
ctx->inferred_graph_properties = true;
}
*epsilon = 0.001;
if (matched_nodes_map->count("fused_batch_norm")) {
NodeDef* fused_batch_norm_node =
ctx->graph_view.GetNode(matched_nodes_map->at("fused_batch_norm"))
->node();
if (fused_batch_norm_node->attr().count("epsilon")) {
*epsilon = fused_batch_norm_node->attr().at("epsilon").f();
}
bool is_training = false;
if (!TryGetNodeAttr(*fused_batch_norm_node, kIsTraining, &is_training) ||
!is_training)
return false;
NodeDef* empty_const_node =
ctx->graph_view.GetNode(matched_nodes_map->at("empty"))->node();
Tensor const_tensor;
if (empty_const_node != nullptr && empty_const_node->op() == "Const" &&
const_tensor.FromProto(
empty_const_node->attr().at("value").tensor())) {
if (const_tensor.NumElements() != 0) return false;
} else {
return false;
}
auto* pre_reshape_node =
ctx->graph_view.GetNode(matched_nodes_map->at("pre_reshape"))->node();
auto* scale_node =
ctx->graph_view.GetNode(matched_nodes_map->at("gamma"))->node();
auto* beta_node =
ctx->graph_view.GetNode(matched_nodes_map->at("beta"))->node();
input_node_names->clear();
input_node_names->resize(3);
input_node_names->at(0) = pre_reshape_node->input(0);
input_node_names->at(1) = scale_node->name();
input_node_names->at(2) = beta_node->name();
} else {
NodeDef* mean1_node =
ctx->graph_view.GetNode(matched_nodes_map->at("mean1"))->node();
bool keep_dims = false;
if (!mean1_node ||
!TryGetNodeAttr(*mean1_node, "keep_dims", &keep_dims) || !keep_dims)
return false;
NodeDef* mean_axis_node =
ctx->graph_view.GetNode(matched_nodes_map->at("r_indices1"))->node();
if (!mean_axis_node) {
VLOG(1) << "Unable to find reduction axis node";
return false;
}
Tensor mean_axis_tensor;
if (!mean_axis_tensor.FromProto(
mean_axis_node->attr().at("value").tensor())) {
return false;
}
DataType dtype = mean_axis_tensor.dtype();
if (dtype != DT_INT32 && dtype != DT_INT64) return false;
int expected_axis_count = 1;
if (mean_axis_tensor.NumElements() != expected_axis_count) return false;
NodeDef* input_node =
ctx->graph_view.GetNode(matched_nodes_map->at("input"))->node();
auto input_node_props =
ctx->graph_properties.GetOutputProperties(input_node->name());
int rank = Rank(input_node_props[0].shape());
if (dtype == DT_INT32) {
if (static_cast<int32>(rank - 1) != mean_axis_tensor.flat<int32>()(0))
return false;
} else {
if (static_cast<int64>(rank - 1) != mean_axis_tensor.flat<int64>()(0))
return false;
}
auto* gamma_node =
ctx->graph_view.GetNode(matched_nodes_map->at("gamma"))->node();
auto* beta_node =
ctx->graph_view.GetNode(matched_nodes_map->at("beta"))->node();
input_node_names->clear();
input_node_names->resize(3);
input_node_names->at(0) = mean1_node->input(0);
input_node_names->at(1) = gamma_node->name();
input_node_names->at(2) = beta_node->name();
}
NodeDef* input_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("input"))->node();
auto input_props =
ctx->graph_properties.GetOutputProperties(input_node_def->name());
NodeDef* output_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("output"))->node();
auto output_props =
ctx->graph_properties.GetOutputProperties(output_node_def->name());
if (ShapesSymbolicallyEqual(input_props[0].shape(),
output_props[0].shape())) {
int rank = Rank(input_props[0].shape());
if (rank < 2 || rank > 3) return false;
} else {
return false;
}
}
return found_op_type_match;
}
bool FindFusedBatchNorm(const RemapperContext& ctx, int node_index,
FusedBatchNorm* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (ctx.xla_cpu_jit_disable_fusion && NodeIsOnCpu(node_def)) return false;
if (!IsFusedBatchNorm(*node_def)) return false;
if (GetDataTypeFromAttr(*node_def, "T") != DT_FLOAT) return false;
bool is_training = true;
if (!TryGetNodeAttr(*node_def, kIsTraining, &is_training)) return false;
if (is_training) return false;
const auto& props = ctx.graph_properties.GetInputProperties(node_def->name());
bool const_scaling_factor =
props.size() == 5 &&
props[1].has_value() &&
props[4].has_value();
auto const_inputs = std::count_if(
props.begin(), props.end(),
[](const OpInfo::TensorProperties& props) { return props.has_value(); });
bool can_remap = const_scaling_factor || const_inputs >= 4;
if (!can_remap) return false;
if (node_view->GetRegularFanouts().size() > 1) {
return false;
}
matched->fused_batch_norm = node_index;
return true;
}
bool BatchnormSpatialPersistentEnabled() {
#if CUDNN_VERSION >= 7402
static bool is_enabled = [] {
bool is_enabled = false;
TF_CHECK_OK(tensorflow::ReadBoolFromEnvVar(
"TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT",
false, &is_enabled));
return is_enabled;
}();
return is_enabled;
#else
return false;
#endif
}
bool FindFusedBatchNormEx(const RemapperContext& ctx, int node_index,
FusedBatchNormEx* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (!IsRelu(*node_def) || HasControlFaninOrFanout(*node_view)) return false;
const auto valid_batch_norm =
[&](const utils::MutableNodeView& fused_batch_norm) -> bool {
const auto* fused_batch_norm_node_def = fused_batch_norm.node();
if (!IsFusedBatchNorm(*fused_batch_norm_node_def)) return false;
if (!IsMKLEnabled() && !NodeIsOnGpu(fused_batch_norm_node_def))
return false;
DataType t_dtype = GetDataTypeFromAttr(*fused_batch_norm_node_def, "T");
if (NodeIsOnGpu(fused_batch_norm_node_def)) {
if (t_dtype != DT_FLOAT && t_dtype != DT_HALF) return false;
} else {
if (ctx.xla_cpu_jit_disable_fusion) return false;
if (IsMKLEnabled() && !IsDataTypeSupportedByOneDNNOnThisCPU(t_dtype))
return false;
}
bool is_training;
if (!GetNodeAttr(*fused_batch_norm_node_def, kIsTraining, &is_training)
.ok())
return false;
string data_format;
if (!GetNodeAttr(*fused_batch_norm_node_def, kDataFormat, &data_format)
.ok())
return false;
if (data_format != "NHWC" && data_format != "NCHW") return false;
if (is_training && NodeIsOnGpu(fused_batch_norm_node_def)) {
if (data_format != "NHWC") return false;
if (t_dtype != DT_HALF) return false;
const auto& props = ctx.graph_properties.GetInputProperties(
fused_batch_norm_node_def->name());
const bool valid_channel_dim = !props.empty() &&
props[0].shape().dim_size() == 4 &&
props[0].shape().dim(3).size() % 4 == 0;
if (!valid_channel_dim) return false;
if (!BatchnormSpatialPersistentEnabled()) return false;
}
if ((fused_batch_norm_node_def->op() != "FusedBatchNorm") &&
!HasDataType(fused_batch_norm_node_def, DT_FLOAT, "U"))
return false;
if (HasControlFaninOrFanout(fused_batch_norm) ||
!HasAtMostOneDataFanoutAtPort0(fused_batch_norm) ||
IsInPreserveSet(ctx, fused_batch_norm_node_def))
return false;
return true;
};
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* relu_fanin_0_node_view = regular_fanin_0.node_view();
const auto* relu_fanin_0_node_def = relu_fanin_0_node_view->node();
if (valid_batch_norm(*relu_fanin_0_node_view)) {
matched->activation = node_index;
matched->fused_batch_norm = regular_fanin_0.node_index();
return true;
}
if (IsAdd(*relu_fanin_0_node_def)) {
if (IsMKLEnabled() && !NodeIsOnGpu(node_def)) return false;
if (HasControlFaninOrFanout(*relu_fanin_0_node_view) ||
!HasAtMostOneFanoutAtPort0(*relu_fanin_0_node_view) ||
IsInPreserveSet(ctx, relu_fanin_0_node_def))
return false;
const auto& props =
ctx.graph_properties.GetInputProperties(relu_fanin_0_node_def->name());
if (props.size() < 2 ||
!ShapesSymbolicallyEqual(props[0].shape(), props[1].shape()))
return false;
if (relu_fanin_0_node_view->NumRegularFanins() < 2) return false;
const auto& add_regular_fanin_0 =
relu_fanin_0_node_view->GetRegularFanin(0);
const auto& add_regular_fanin_1 =
relu_fanin_0_node_view->GetRegularFanin(1);
if (valid_batch_norm(*add_regular_fanin_0.node_view())) {
matched->activation = node_index;
matched->side_input = add_regular_fanin_1.node_index();
matched->fused_batch_norm = add_regular_fanin_0.node_index();
matched->invalidated = regular_fanin_0.node_index();
return true;
}
if (valid_batch_norm(*add_regular_fanin_1.node_view())) {
matched->activation = node_index;
matched->side_input = add_regular_fanin_0.node_index();
matched->fused_batch_norm = add_regular_fanin_1.node_index();
matched->invalidated = regular_fanin_0.node_index();
return true;
}
}
return false;
}
bool FindFusedBatchNormGradEx(const RemapperContext& ctx, int node_index,
FusedBatchNormGradEx* matched) {
const utils::MutableNodeView* node_view = ctx.graph_view.GetNode(node_index);
const auto valid_batch_norm_grad =
[&](const utils::MutableNodeView& fused_batch_norm_grad) -> bool {
const NodeDef* node_def = fused_batch_norm_grad.node();
if (!IsFusedBatchNormGrad(*node_def) ||
HasControlFaninOrFanout(fused_batch_norm_grad))
return false;
if (!NodeIsOnGpu(node_def)) return false;
bool is_training;
if (!GetNodeAttr(*node_def, kIsTraining, &is_training).ok() || !is_training)
return false;
DataType t_dtype = GetDataTypeFromAttr(*node_def, "T");
if (t_dtype != DT_HALF) return false;
string data_format;
if (!GetNodeAttr(*node_def, kDataFormat, &data_format).ok()) return false;
if (data_format != "NHWC") return false;
const auto& props =
ctx.graph_properties.GetInputProperties(node_def->name());
const bool valid_channel_dim = !props.empty() &&
props[0].shape().dim_size() == 4 &&
props[0].shape().dim(3).size() % 4 == 0;
if (!valid_channel_dim) return false;
if (!BatchnormSpatialPersistentEnabled()) return false;
if (node_def->op() != "FusedBatchNorm" &&
!HasDataType(node_def, DT_FLOAT, "U"))
return false;
return true;
};
if (ctx.xla_auto_clustering_on) return false;
if (!valid_batch_norm_grad(*node_view)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const utils::MutableFanoutView& regular_fanin_0 =
node_view->GetRegularFanin(0);
const utils::MutableNodeView* relugrad_node_view =
regular_fanin_0.node_view();
const NodeDef* relugrad_node_def = relugrad_node_view->node();
bool is_relugrad = IsReluGrad(*relugrad_node_def);
if (!is_relugrad || HasControlFaninOrFanout(*relugrad_node_view) ||
IsInPreserveSet(ctx, relugrad_node_def))
return false;
if (relugrad_node_view->NumRegularFanins() < 1) return false;
const utils::MutableFanoutView& fanin_1 =
relugrad_node_view->GetRegularFanin(1);
const utils::MutableNodeView* fwd_node_view = fanin_1.node_view();
FusedBatchNormEx fwd_matched;
FindFusedBatchNormEx(ctx, fwd_node_view->node_index(), &fwd_matched);
bool fwd_bn_act_used = fwd_matched.activation != kMissingIndex &&
fwd_matched.side_input == kMissingIndex;
bool fwd_bn_add_act_used = fwd_matched.activation != kMissingIndex &&
fwd_matched.side_input != kMissingIndex;
if (fwd_bn_act_used && relugrad_node_view->GetRegularFanout(0).size() == 1) {
matched->activation_grad = regular_fanin_0.node_index();
matched->fused_batch_norm_grad = node_index;
matched->fwd_fused_batch_norm = fwd_matched.fused_batch_norm;
return true;
}
if (fwd_bn_add_act_used &&
relugrad_node_view->GetRegularFanout(0).size() == 2) {
const utils::MutableFanoutView& fwd_batch_norm_node =
node_view->GetRegularFanin(5);
if (fwd_matched.fused_batch_norm != fwd_batch_norm_node.node_index()) {
return false;
}
const std::vector<utils::MutableFaninView>& fanouts_at_port_0 =
relugrad_node_view->GetRegularFanouts()[0];
const utils::MutableNodeView* fanout_0_node_view =
ctx.graph_view.GetNode(fanouts_at_port_0[0].node_view()->GetName());
const utils::MutableNodeView* fanout_1_node_view =
ctx.graph_view.GetNode(fanouts_at_port_0[1].node_view()->GetName());
const NodeDef* fanout_0_node_def = fanout_0_node_view->node();
const NodeDef* fanout_1_node_def = fanout_1_node_view->node();
const NodeDef* node_def = node_view->node();
matched->activation_grad = regular_fanin_0.node_index();
matched->fused_batch_norm_grad = node_index;
matched->fwd_fused_batch_norm = fwd_matched.fused_batch_norm;
if (fanout_0_node_def == node_def) {
matched->side_input_grad = fanout_1_node_view->node_index();
return true;
}
if (fanout_1_node_def == node_def) {
matched->side_input_grad = fanout_0_node_view->node_index();
return true;
}
}
return false;
}
bool FindTensorToHashBucket(const RemapperContext& ctx, int node_index,
TensorToHashBucket* matched) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
if (!IsStringToHashBucketFast(*node_def) ||
HasControlFaninOrFanout(*node_view)) {
return false;
}
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* as_string_node_view = regular_fanin_0.node_view();
const auto* as_string_node_def = as_string_node_view->node();
bool is_as_string = IsAsString(*as_string_node_def);
if (!is_as_string || HasControlFaninOrFanout(*as_string_node_view) ||
!HasAtMostOneFanoutAtPort0(*as_string_node_view) ||
IsInPreserveSet(ctx, as_string_node_def))
return false;
if (!HasDataType(as_string_node_def, DT_INT8) &&
!HasDataType(as_string_node_def, DT_INT16) &&
!HasDataType(as_string_node_def, DT_INT32) &&
!HasDataType(as_string_node_def, DT_INT64)) {
return false;
}
int width;
if (!GetNodeAttr(*as_string_node_def, kWidth, &width).ok() || width != -1) {
return false;
}
string fill;
if (!GetNodeAttr(*as_string_node_def, kFill, &fill).ok() || !fill.empty()) {
return false;
}
if (as_string_node_view->NumRegularFanins() < 1) return false;
const auto& fanin_0 = as_string_node_view->GetRegularFanin(0);
const auto* pre_node_view = fanin_0.node_view();
const TensorToHashBucket pattern{pre_node_view->node_index(),
as_string_node_view->node_index(),
node_index};
*matched = pattern;
return true;
}
bool FindHardSwish(RemapperContext& ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern pattern {"Mul", "output", NodeStatus::kReplace,
{
{"Mul", "mul_one_sixth", NodeStatus::kRemove,
{
{"Const|Cast", "one_sixth", NodeStatus::kRemain},
{"*", "input", NodeStatus::kRemain}
}
},
{"Relu6", "relu6", NodeStatus::kRemove,
{
{"Add|AddV2", "add", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain},
{"Const|Cast", "three", NodeStatus::kRemain}
}
}
}
},
}
};
bool found_match = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx.graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_match = graph_matcher.GetMatchedNodes(
pattern, ctx.nodes_to_preserve, ctx.graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_match) {
std::map<string, float> values_map = {{"three", 3.0},
{"one_sixth", 0.16666}};
if (!VerifyConstants(&ctx, matched_nodes_map, &values_map)) return false;
}
return found_match;
}
bool FindContractionWithBiasAddAndHardSwish(
RemapperContext& ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsMKLEnabled()) return false;
const auto* node_view = ctx.graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
if (!FindHardSwish(ctx, node_index, matched_nodes_map, remove_node_indices))
return false;
const auto* add_node_view =
ctx.graph_view.GetNode(matched_nodes_map->at("add"));
const auto* add_node_def = add_node_view->node();
ContractionWithBiasAdd base;
int port_id = 0;
if (!FindContractionWithBiasInPort(ctx, *add_node_view, *add_node_def,
port_id, &base, 2)) {
port_id = 1;
if (!FindContractionWithBiasInPort(ctx, *add_node_view, *add_node_def,
port_id, &base, 2)) {
VLOG(2) << "Contraction + BiasAdd pattern was not found although"
<< " HardSwish pattern was found, so fusion failed.";
return false;
}
}
const auto* bias_node_def = ctx.graph_view.GetNode(base.bias_add)->node();
if (!HaveSameDataType(add_node_def, bias_node_def)) return false;
const auto* contraction_node_view = ctx.graph_view.GetNode(base.contraction);
const auto* contraction_node_def = contraction_node_view->node();
if (!IsConv2D(*contraction_node_def) &&
!IsDepthwiseConv2dNative(*contraction_node_def))
return false;
if (!IsCpuCompatibleConv2D(ctx, contraction_node_def) &&
!IsCpuCompatibleDepthwiseConv2dNative(contraction_node_def))
return false;
matched_nodes_map->insert({"contraction", base.contraction});
matched_nodes_map->insert({"bias_add", base.bias_add});
remove_node_indices->insert(base.contraction);
remove_node_indices->insert(base.bias_add);
return true;
}
bool FindFusedBatchMatMul(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices,
std::vector<string>* input_node_names) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
int pattern = 0;
utils::OpTypePattern fusion_pattern1 =
{"Add|AddV2", "output", NodeStatus::kReplace,
{
{"Mul", "mul", NodeStatus::kRemove,
{
{"BatchMatMulV2", "batch_matmul", NodeStatus::kRemove},
{"*", "multiplicand", NodeStatus::kRemain}
}
},
{"*", "addend", NodeStatus::kRemain}
}
};
utils::OpTypePattern fusion_pattern2 =
{"Add|AddV2", "output", NodeStatus::kReplace,
{
{"BatchMatMulV2", "batch_matmul", NodeStatus::kRemove,
{
{"Mul", "mul", NodeStatus::kRemove,
{
{"*", "mul_input0", NodeStatus::kRemain},
{"Const|Cast", "multiplicand", NodeStatus::kRemain}
}
},
{"*", "bmm_input1", NodeStatus::kRemain}
}
},
{"*", "addend", NodeStatus::kRemain}
}
};
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
bool found_op_type_match = false;
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match =
graph_matcher.GetMatchedNodes(fusion_pattern1, ctx->nodes_to_preserve,
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) pattern = 1;
if (!found_op_type_match) {
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match =
graph_matcher.GetMatchedNodes(fusion_pattern2, ctx->nodes_to_preserve,
ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) pattern = 2;
}
if (!found_op_type_match) return false;
if (!ctx->inferred_graph_properties) {
Status s = ctx->graph_properties.InferStatically(
true,
false,
false,
true);
if (!s.ok()) return false;
ctx->inferred_graph_properties = true;
}
NodeDef* multiplicand_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("multiplicand"))->node();
auto multiplicand_props =
ctx->graph_properties.GetOutputProperties(multiplicand_node_def->name());
if (NumCoefficients(multiplicand_props[0].shape()) != 1) return false;
NodeDef* batch_matmul_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("batch_matmul"))->node();
if (!IsCpuCompatibleMatMul(*ctx, batch_matmul_node_def)) return false;
auto batch_matmul_props =
ctx->graph_properties.GetOutputProperties(batch_matmul_node_def->name());
if (Rank(batch_matmul_props[0].shape()) != 4) return false;
NodeDef* addend_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("addend"))->node();
auto addend_props =
ctx->graph_properties.GetOutputProperties(addend_node_def->name());
auto addend_shape = addend_props[0].shape();
if (!(Rank(addend_shape) == 4 && addend_shape.dim(1).size() == 1)) {
return false;
}
input_node_names->clear();
input_node_names->resize(4);
if (pattern == 1) {
input_node_names->at(0) = batch_matmul_node_def->input(0);
input_node_names->at(1) = batch_matmul_node_def->input(1);
input_node_names->at(2) = multiplicand_node_def->name();
input_node_names->at(3) = addend_node_def->name();
} else if (pattern == 2) {
auto* mul_input0_node_def =
ctx->graph_view.GetNode(matched_nodes_map->at("mul_input0"))->node();
input_node_names->at(0) = mul_input0_node_def->name();
input_node_names->at(1) = batch_matmul_node_def->input(1);
input_node_names->at(2) = multiplicand_node_def->name();
input_node_names->at(3) = addend_node_def->name();
}
return found_op_type_match;
}
template <typename T>
bool IsInstanceNormReduction(const TensorShapeProto& input_shape,
const Tensor& reduction_axes_data) {
int input_dims = input_shape.dim_size();
int reduction_axes = reduction_axes_data.NumElements();
if ((input_dims != 4 && input_dims != 5) ||
(reduction_axes + 2) != input_dims) {
return false;
}
if (input_dims == 4) {
return ((reduction_axes_data.flat<T>()(0) == static_cast<T>(1) &&
reduction_axes_data.flat<T>()(1) == static_cast<T>(2)) ||
(reduction_axes_data.flat<T>()(0) == static_cast<T>(2) &&
reduction_axes_data.flat<T>()(1) == static_cast<T>(3)));
} else {
return ((reduction_axes_data.flat<T>()(0) == static_cast<T>(1) &&
reduction_axes_data.flat<T>()(1) == static_cast<T>(2) &&
reduction_axes_data.flat<T>()(2) == static_cast<T>(3)) ||
(reduction_axes_data.flat<T>()(0) == static_cast<T>(2) &&
reduction_axes_data.flat<T>()(1) == static_cast<T>(3) &&
reduction_axes_data.flat<T>()(2) == static_cast<T>(4)));
}
}
bool FindInstanceNorm(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsCommonNormPattern(ctx, node_index, matched_nodes_map,
remove_node_indices)) {
return false;
}
if (!ctx->inferred_graph_properties) {
Status s = ctx->graph_properties.InferStatically(
true,
false,
false,
true);
if (!s.ok()) return false;
ctx->inferred_graph_properties = true;
}
NodeDef* mean1_node =
ctx->graph_view.GetNode(matched_nodes_map->at("mean1"))->node();
bool keep_dims = false;
if (!mean1_node || !TryGetNodeAttr(*mean1_node, "keep_dims", &keep_dims) ||
!keep_dims) {
return false;
}
const auto& input_props =
ctx->graph_properties.GetInputProperties(mean1_node->name());
const TensorShapeProto& input_shape = input_props[0].shape();
if (input_shape.unknown_rank()) return false;
DataType dtype = GetDataTypeFromAttr(*mean1_node, "T");
if (dtype != DT_FLOAT && dtype != DT_HALF) return false;
NodeDef* gamma_node =
ctx->graph_view.GetNode(matched_nodes_map->at("gamma"))->node();
NodeDef* beta_node =
ctx->graph_view.GetNode(matched_nodes_map->at("beta"))->node();
if (!gamma_node || !beta_node) {
VLOG(2) << "Unexpected error to retrieve gamma or beta node";
return false;
}
Tensor gamma_tensor, beta_tensor;
if (gamma_node->op() != "Const" ||
!gamma_tensor.FromProto(gamma_node->attr().at("value").tensor()) ||
beta_node->op() != "Const" ||
!beta_tensor.FromProto(beta_node->attr().at("value").tensor())) {
return false;
}
if (!gamma_tensor.IsSameSize(beta_tensor)) return false;
NodeDef* mean_axes_node =
ctx->graph_view.GetNode(matched_nodes_map->at("r_indices1"))->node();
if (!mean_axes_node) {
VLOG(2) << "Unexpected error to retrieve reduction axes node";
return false;
}
Tensor mean_axes_tensor;
if (!mean_axes_tensor.FromProto(
mean_axes_node->attr().at("value").tensor())) {
return false;
}
dtype = mean_axes_tensor.dtype();
if (dtype != DT_INT32 && dtype != DT_INT64) return false;
return (dtype == DT_INT32)
? IsInstanceNormReduction<int32>(input_shape, mean_axes_tensor)
: IsInstanceNormReduction<int64>(input_shape, mean_axes_tensor);
}
bool FindInstanceNormWithActivation(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
const auto* node_view = ctx->graph_view.GetNode(node_index);
if (HasControlFaninOrFanout(*node_view)) return false;
const auto* node_def = node_view->node();
if (!IsLeakyRelu(*node_def) && !IsRelu(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& regular_fanin_0 = node_view->GetRegularFanin(0);
const auto* base_node_view = regular_fanin_0.node_view();
int base_node_idx = base_node_view->node_index();
if (!FindInstanceNorm(ctx, base_node_idx, matched_nodes_map,
remove_node_indices))
return false;
remove_node_indices->insert(matched_nodes_map->at("output"));
matched_nodes_map->insert(std::pair<string, int>("activation", node_index));
return true;
}
void CopyConv2DAttributes(const NodeDef& conv2d, NodeDef* fused_conv2d,
const NodeDef* activation = nullptr) {
DCHECK(IsConv2D(conv2d)) << "Input node must be a Conv2D";
auto* attr = fused_conv2d->mutable_attr();
auto& src_attr = conv2d.attr();
(*attr)["T"] = src_attr.at("T");
int num_args = fused_conv2d->input_size() - 2;
for (int i = 0; i < num_args; ++i) {
(*attr)["TArgs"].mutable_list()->add_type(src_attr.at("T").type());
}
(*attr)["num_args"].set_i(num_args);
(*attr)["num_host_args"].set_i(0);
(*attr)["strides"] = src_attr.at("strides");
(*attr)["padding"] = src_attr.at("padding");
(*attr)["explicit_paddings"] = src_attr.at("explicit_paddings");
(*attr)["dilations"] = src_attr.at("dilations");
(*attr)["data_format"] = src_attr.at("data_format");
(*attr)["use_cudnn_on_gpu"] = src_attr.at("use_cudnn_on_gpu");
if (IsMKLEnabled() && src_attr.find("_input_shapes") != src_attr.end()) {
(*attr)["_input_shapes"] = src_attr.at("_input_shapes");
}
if (activation != nullptr && IsLeakyRelu(*activation)) {
auto& activation_attr = activation->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
}
void CopyConv3DAttributes(const NodeDef& conv3d, NodeDef* fused_conv3d,
const NodeDef* activation = nullptr) {
DCHECK(IsConv3D(conv3d)) << "Input node must be a Conv3D";
auto* attr = fused_conv3d->mutable_attr();
auto& src_attr = conv3d.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["strides"] = src_attr.at("strides");
(*attr)["padding"] = src_attr.at("padding");
(*attr)["dilations"] = src_attr.at("dilations");
(*attr)["data_format"] = src_attr.at("data_format");
if (activation != nullptr && IsLeakyRelu(*activation)) {
auto& activation_attr = activation->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
}
void CopyDepthwiseConv2dNativeAttributes(const NodeDef& dw_conv2d,
NodeDef* fused_dw_conv2d,
const NodeDef* activation = nullptr) {
DCHECK(IsDepthwiseConv2dNative(dw_conv2d))
<< "Input node must be a DepthwiseConv2dNative";
auto* attr = fused_dw_conv2d->mutable_attr();
auto& src_attr = dw_conv2d.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["strides"] = src_attr.at("strides");
(*attr)["padding"] = src_attr.at("padding");
(*attr)["dilations"] = src_attr.at("dilations");
(*attr)["data_format"] = src_attr.at("data_format");
if (activation != nullptr && IsLeakyRelu(*activation)) {
auto& activation_attr = activation->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
}
void CopyFusedBatchNormAttributes(const NodeDef& fused_batch_norm,
NodeDef* fused_batch_norm_ex) {
DCHECK(IsFusedBatchNorm(fused_batch_norm))
<< "Input node must be a FusedBatchNorm";
auto* attr = fused_batch_norm_ex->mutable_attr();
auto src_attr = fused_batch_norm.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["is_training"] = src_attr.at("is_training");
(*attr)["data_format"] = src_attr.at("data_format");
(*attr)["epsilon"] = src_attr.at("epsilon");
(*attr)["exponential_avg_factor"] = src_attr.at("exponential_avg_factor");
if (fused_batch_norm.op() != "FusedBatchNorm") {
SetAttrValue(src_attr.at("U"), &(*attr)["U"]);
} else {
if (!IsMKLEnabled())
SetAttrValue(src_attr.at("T"), &(*attr)["U"]);
else
SetAttrValue(DT_FLOAT, &(*attr)["U"]);
}
}
void CopyFusedBatchNormGradAttributes(const NodeDef& fused_batch_norm_grad,
NodeDef* fused_batch_norm_grad_ex) {
DCHECK(IsFusedBatchNormGrad(fused_batch_norm_grad))
<< "Input node must be a FusedBatchNormGrad";
auto* attr = fused_batch_norm_grad_ex->mutable_attr();
auto src_attr = fused_batch_norm_grad.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["is_training"] = src_attr.at("is_training");
(*attr)["data_format"] = src_attr.at("data_format");
(*attr)["epsilon"] = src_attr.at("epsilon");
if (fused_batch_norm_grad.op() != "FusedBatchNormGrad") {
SetAttrValue(src_attr.at("U"), &(*attr)["U"]);
} else {
SetAttrValue(DT_FLOAT, &(*attr)["U"]);
}
}
void CopyMatMulAttributes(const NodeDef& matmul, NodeDef* fused_matmul,
const NodeDef* activation = nullptr) {
DCHECK(IsMatMul(matmul)) << "Input node must be a MatMul";
auto* attr = fused_matmul->mutable_attr();
auto& src_attr = matmul.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["transpose_a"] = src_attr.at("transpose_a");
(*attr)["transpose_b"] = src_attr.at("transpose_b");
if (activation != nullptr && IsLeakyRelu(*activation)) {
auto& activation_attr = activation->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
if (IsMKLEnabled()) {
auto input_shapes = src_attr.find("_input_shapes");
if (input_shapes != src_attr.end()) {
(*attr)["_input_shapes"] = input_shapes->second;
}
}
}
void CopyBatchMatMulAttributes(const NodeDef& batchmatmul,
NodeDef* fused_batch_matmul) {
DCHECK(IsAnyBatchMatMul(batchmatmul)) << "Input node must be a BatchMatMul";
auto* attr = fused_batch_matmul->mutable_attr();
auto& src_attr = batchmatmul.attr();
(*attr)["T"] = src_attr.at("T");
(*attr)["adj_x"] = src_attr.at("adj_x");
(*attr)["adj_y"] = src_attr.at("adj_y");
if (IsMKLEnabled()) {
auto input_shapes = src_attr.find("_input_shapes");
if (input_shapes != src_attr.end()) {
(*attr)["_input_shapes"] = input_shapes->second;
}
}
}
void SetFusedOpAttributes(NodeDef* fused,
const absl::Span<const absl::string_view> fused_ops,
int num_args = 1, float epsilon = 0.0) {
auto* attr = fused->mutable_attr();
SetAttrValue(fused_ops, &(*attr)["fused_ops"]);
SetAttrValue(num_args, &(*attr)["num_args"]);
SetAttrValue(epsilon, &(*attr)["epsilon"]);
}
Status AddFusedContractionNode(RemapperContext* ctx,
const ContractionWithBiasAdd& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
DCHECK(IsDeviceCompatible(*ctx, matched)) << "Unsupported fusion pattern";
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& bias_add = graph->node(matched.bias_add);
VLOG(2) << "Fuse " << contraction.op()
<< " with BiasAdd: " << " bias_add=" << bias_add.name()
<< " contraction=" << contraction.name();
NodeDef fused_op;
fused_op.set_name(bias_add.name());
fused_op.set_device(contraction.device());
fused_op.add_input(contraction.input(0));
fused_op.add_input(contraction.input(1));
fused_op.add_input(bias_add.input(matched.bias_port));
if (IsConv2D(contraction)) {
fused_op.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_op);
} else if (IsDepthwiseConv2dNative(contraction)) {
fused_op.set_op(kFusedDepthwiseConv2dNative);
CopyDepthwiseConv2dNativeAttributes(contraction, &fused_op);
} else if (IsMatMul(contraction)) {
fused_op.set_op(kFusedMatMul);
AddInputShapesAttr(*ctx, matched.contraction);
CopyMatMulAttributes(contraction, &fused_op);
} else if (IsConv3D(contraction)) {
fused_op.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &fused_op);
}
SetFusedOpAttributes(&fused_op, {"BiasAdd"});
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.bias_add] = true;
(*nodes_to_delete)[matched.contraction] = true;
return absl::OkStatus();
}
Status AddFusedContractionNode(RemapperContext* ctx,
const ContractionWithActivation& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& activation = graph->node(matched.activation);
VLOG(2) << "Fuse " << contraction.op() << " and " << activation.op() << ":"
<< " activation=" << activation.name()
<< " contraction=" << contraction.name();
NodeDef fused_op;
fused_op = contraction;
auto* attr = fused_op.mutable_attr();
auto contraction_fused_ops_list =
contraction.attr().at("fused_ops").list().s();
std::vector<std::string> fused_items;
for (auto it = contraction_fused_ops_list.begin();
it != contraction_fused_ops_list.end(); it++) {
fused_items.push_back(*it);
}
fused_items.push_back(GetActivationName(activation.op()));
SetAttrValue(fused_items, &(*attr)["fused_ops"]);
if (IsLeakyRelu(activation)) {
auto& activation_attr = activation.attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
fused_op.set_name(activation.name());
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*nodes_to_delete)[matched.contraction] = true;
(*invalidated_nodes)[matched.activation] = true;
return absl::OkStatus();
}
Status AddFusedContractionNode(
RemapperContext* ctx, const ContractionWithBiasAddAndActivation& matched,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete) {
DCHECK(IsDeviceCompatible(*ctx, matched)) << "Unsupported fusion pattern";
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& bias_add = graph->node(matched.bias_add);
const NodeDef& activation = graph->node(matched.activation);
VLOG(2) << "Fuse " << contraction.op() << " with BiasAdd and "
<< activation.op() << ":" << " activation=" << activation.name()
<< " bias_add=" << bias_add.name()
<< " contraction=" << contraction.name();
NodeDef fused_op;
fused_op.set_name(activation.name());
fused_op.set_device(contraction.device());
fused_op.add_input(contraction.input(0));
fused_op.add_input(contraction.input(1));
fused_op.add_input(bias_add.input(matched.bias_port));
if (IsConv2D(contraction)) {
fused_op.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_op, &activation);
} else if (IsDepthwiseConv2dNative(contraction)) {
fused_op.set_op(kFusedDepthwiseConv2dNative);
CopyDepthwiseConv2dNativeAttributes(contraction, &fused_op);
} else if (IsMatMul(contraction)) {
fused_op.set_op(kFusedMatMul);
AddInputShapesAttr(*ctx, matched.contraction);
CopyMatMulAttributes(contraction, &fused_op, &activation);
} else if (IsConv3D(contraction)) {
fused_op.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &fused_op, &activation);
}
SetFusedOpAttributes(&fused_op, {"BiasAdd", activation.op()});
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*nodes_to_delete)[matched.contraction] = true;
(*nodes_to_delete)[matched.bias_add] = true;
(*invalidated_nodes)[matched.activation] = true;
return absl::OkStatus();
}
Status AddFusedConvNode(RemapperContext* ctx,
const ContractionWithSqueezeAndBiasAdd& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
DCHECK(IsDeviceCompatible(*ctx, matched)) << "Unsupported fusion pattern";
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& bias_add = graph->node(matched.bias_add);
const NodeDef& squeeze = graph->node(matched.squeeze);
VLOG(2) << "Fuse Conv2D/3D with Squeeze and BiasAdd: " << " bias_add="
<< bias_add.name() << " squeeze=" << squeeze.name()
<< " conv=" << contraction.name();
NodeDef fused_conv;
fused_conv.set_name(contraction.name());
fused_conv.set_device(contraction.device());
fused_conv.add_input(contraction.input(0));
fused_conv.add_input(contraction.input(1));
fused_conv.add_input(bias_add.input(1));
if (IsConv2D(contraction)) {
fused_conv.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_conv);
} else if (IsConv3D(contraction)) {
fused_conv.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &fused_conv);
}
SetFusedOpAttributes(&fused_conv, {"BiasAdd"});
NodeDef remapped_squeeze = squeeze;
remapped_squeeze.set_name(bias_add.name());
remapped_squeeze.set_input(0, contraction.name());
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_conv), &status);
TF_RETURN_IF_ERROR(status);
mutation->AddNode(std::move(remapped_squeeze), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.contraction] = true;
(*invalidated_nodes)[matched.bias_add] = true;
(*nodes_to_delete)[matched.squeeze] = true;
return absl::OkStatus();
}
Status AddFusedConv2DNode(RemapperContext* ctx,
const ContractionWithBatchNorm& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
DCHECK(IsConv2D(contraction)) << "Only Conv2D supported for now";
const NodeDef& fused_batch_norm = graph->node(matched.fused_batch_norm);
VLOG(2) << "Fuse Conv2D with BatchNorm: batch_norm="
<< fused_batch_norm.name() << " conv2d=" << contraction.name();
NodeDef fused_conv2d;
fused_conv2d.set_name(fused_batch_norm.name());
fused_conv2d.set_op(kFusedConv2D);
fused_conv2d.set_device(contraction.device());
fused_conv2d.add_input(contraction.input(0));
fused_conv2d.add_input(contraction.input(1));
fused_conv2d.add_input(fused_batch_norm.input(1));
fused_conv2d.add_input(fused_batch_norm.input(2));
fused_conv2d.add_input(fused_batch_norm.input(3));
fused_conv2d.add_input(fused_batch_norm.input(4));
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_conv2d);
SetFusedOpAttributes(&fused_conv2d, {"FusedBatchNorm"},
4, matched.epsilon);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_conv2d), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.fused_batch_norm] = true;
(*nodes_to_delete)[matched.contraction] = true;
return absl::OkStatus();
}
Status AddFusedConv2DNode(RemapperContext* ctx,
const ContractionWithBatchNormAndActivation& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
DCHECK(IsConv2D(contraction)) << "Only Conv2D supported for now";
const NodeDef& activation = graph->node(matched.activation);
const NodeDef& fused_batch_norm = graph->node(matched.fused_batch_norm);
VLOG(2) << "Fuse Conv2D with BatchNorm and " << activation.op()
<< ": activation=" << activation.name()
<< " batch_norm=" << fused_batch_norm.name()
<< " conv2d=" << contraction.name();
NodeDef fused_conv2d;
fused_conv2d.set_name(activation.name());
fused_conv2d.set_op(kFusedConv2D);
fused_conv2d.set_device(contraction.device());
fused_conv2d.add_input(contraction.input(0));
fused_conv2d.add_input(contraction.input(1));
fused_conv2d.add_input(fused_batch_norm.input(1));
fused_conv2d.add_input(fused_batch_norm.input(2));
fused_conv2d.add_input(fused_batch_norm.input(3));
fused_conv2d.add_input(fused_batch_norm.input(4));
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_conv2d, &activation);
SetFusedOpAttributes(&fused_conv2d, {"FusedBatchNorm", activation.op()},
4, matched.epsilon);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_conv2d), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.activation] = true;
(*nodes_to_delete)[matched.contraction] = true;
(*nodes_to_delete)[matched.fused_batch_norm] = true;
return absl::OkStatus();
}
Status AddFusedContractionNode(RemapperContext* ctx,
const ContractionWithBiasAddAndAdd& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
const NodeDef& bias_add = graph->node(matched.bias_add);
DCHECK(IsConv2D(contraction) || IsMatMul(contraction) ||
IsConv3D(contraction));
NodeDef contraction_node;
const NodeDef& add = graph->node(matched.add);
contraction_node.set_name(add.name());
contraction_node.set_device(contraction.device());
contraction_node.add_input(
contraction.input(0));
contraction_node.add_input(
contraction.input(1));
contraction_node.add_input(bias_add.input(matched.bias_port));
contraction_node.add_input(add.input(1 - matched.port_id));
if (IsConv2D(contraction)) {
contraction_node.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &contraction_node);
} else if (IsMatMul(contraction)) {
AddInputShapesAttr(*ctx, matched.contraction);
contraction_node.set_op(kFusedMatMul);
CopyMatMulAttributes(contraction, &contraction_node);
} else if (IsConv3D(contraction)) {
contraction_node.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &contraction_node);
}
SetFusedOpAttributes(&contraction_node, {"BiasAdd", "Add"}, 2);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(contraction_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.add] = true;
(*nodes_to_delete)[matched.contraction] = true;
(*nodes_to_delete)[matched.bias_add] = true;
return absl::OkStatus();
}
Status AddFusedConv3DNode(RemapperContext* ctx, const PadWithConv3D& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction_idx);
const NodeDef& pad_node_def = graph->node(matched.pad_idx);
const NodeDef& padding_const_node_def =
graph->node(matched.padding_const_idx);
VLOG(2) << "Fuse " << pad_node_def.op()
<< " with contraction: " << " contraction=" << contraction.name();
NodeDef fused_node;
fused_node = contraction;
fused_node.set_input(0, pad_node_def.input(0));
fused_node.set_op(kFusedConv3D);
auto* attr = fused_node.mutable_attr();
if (!attr->contains("num_args")) {
SetAttrValue(0, &(*attr)["num_args"]);
}
Tensor const_tensor;
if (padding_const_node_def.op() == "Const" &&
const_tensor.FromProto(
padding_const_node_def.attr().at("value").tensor())) {
auto const_value = const_tensor.flat<int32>();
std::vector<int32> paddings;
for (int i = 0; i < const_value.size(); ++i) {
paddings.push_back(const_value(i));
SetAttrValue(paddings, &(*attr)["padding_list"]);
}
} else {
VLOG(2) << "Pad fusion with " << contraction.op() << " is invalidated, "
<< "it requires padding dim sizes to be constant.";
return absl::OkStatus();
}
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.contraction_idx] = true;
(*nodes_to_delete)[matched.pad_idx] = true;
return absl::OkStatus();
}
Status AddFusedContractionNode(
RemapperContext* ctx, const ContractionWithBiasAndAddActivation& matched,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& contraction = graph->node(matched.contraction);
DCHECK(IsConv2D(contraction) || IsConv3D(contraction));
const NodeDef& activation = graph->node(matched.activation);
NodeDef fused_conv;
fused_conv.set_name(activation.name());
fused_conv.set_device(contraction.device());
fused_conv.add_input(contraction.input(0));
fused_conv.add_input(contraction.input(1));
const NodeDef& bias_add = graph->node(matched.bias_add);
fused_conv.add_input(bias_add.input(matched.bias_port));
const NodeDef& add = graph->node(matched.add);
fused_conv.add_input(add.input(1 - matched.port_id));
if (IsConv2D(contraction)) {
fused_conv.set_op(kFusedConv2D);
AddInputShapesAttr(*ctx, matched.contraction);
CopyConv2DAttributes(contraction, &fused_conv);
} else if (IsConv3D(contraction)) {
fused_conv.set_op(kFusedConv3D);
CopyConv3DAttributes(contraction, &fused_conv);
}
SetFusedOpAttributes(&fused_conv, {"BiasAdd", "Add", activation.op()}, 2);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_conv), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.activation] = true;
(*nodes_to_delete)[matched.add] = true;
(*nodes_to_delete)[matched.bias_add] = true;
(*nodes_to_delete)[matched.contraction] = true;
return absl::OkStatus();
}
Status FuseContractionWithBiasAddAndHardSwish(
RemapperContext* ctx, std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices, std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map->at("output"))->node();
auto* contraction_node =
ctx->graph_view.GetNode(matched_nodes_map->at("contraction"))->node();
auto* bias_add_node =
ctx->graph_view.GetNode(matched_nodes_map->at("bias_add"))->node();
bool is_conv2d = IsConv2D(*contraction_node);
NodeDef fused_node;
fused_node.set_name(output_node->name());
fused_node.set_op(is_conv2d ? kFusedConv2D : kFusedDepthwiseConv2dNative);
fused_node.set_device(contraction_node->device());
fused_node.add_input(contraction_node->input(0));
fused_node.add_input(contraction_node->input(1));
fused_node.add_input(bias_add_node->input(1));
if (is_conv2d) {
CopyConv2DAttributes(*contraction_node, &fused_node);
} else {
CopyDepthwiseConv2dNativeAttributes(*contraction_node, &fused_node);
}
SetFusedOpAttributes(&fused_node, {"BiasAdd", "_FusedHardSwish"});
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map->at("output")] = true;
for (const auto& node_idx : *remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
Status FuseConv2DSwish(RemapperContext* ctx,
const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const NodeDef* mul =
ctx->graph_view.GetNode(matched_nodes_map.at("mulToswish"))->node();
const NodeDef* conv2d =
ctx->graph_view.GetNode(matched_nodes_map.at("conv"))->node();
NodeDef fused_op;
fused_op.set_name(mul->name());
fused_op.set_op(kFusedConv2D);
fused_op.set_device(mul->device());
fused_op.add_input(conv2d->input(0));
fused_op.add_input(conv2d->input(1));
if (matched_nodes_map.find("biasadd") != matched_nodes_map.end()) {
auto* bias_add_node =
ctx->graph_view.GetNode(matched_nodes_map.at("biasadd"))->node();
fused_op.add_input(bias_add_node->input(1));
SetFusedOpAttributes(&fused_op, {"BiasAdd", "_MklSwish"});
} else {
auto* fusebatchnorm_node =
ctx->graph_view.GetNode(matched_nodes_map.at("fusebatchnorm"))->node();
fused_op.add_input(fusebatchnorm_node->input(1));
fused_op.add_input(fusebatchnorm_node->input(2));
fused_op.add_input(fusebatchnorm_node->input(3));
fused_op.add_input(fusebatchnorm_node->input(4));
float epsilon;
TF_CHECK_OK(GetNodeAttr(*fusebatchnorm_node, "epsilon", &epsilon));
SetFusedOpAttributes(&fused_op, {"FusedBatchNorm", "_MklSwish"},
4, epsilon);
}
AddInputShapesAttr(*ctx, matched_nodes_map.at("conv"));
CopyConv2DAttributes(*conv2d, &fused_op);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("mulToswish")] = true;
for (const auto& node_index : remove_node_indices) {
(*nodes_to_delete)[node_index] = true;
}
return absl::OkStatus();
}
Status AddFusedMatMulBiasAddAndGelu(
RemapperContext* ctx, const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete,
bool is_gelu_approximate) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map.at("output"))->node();
auto* matmul_node =
ctx->graph_view.GetNode(matched_nodes_map.at("matmul"))->node();
NodeDef fused_node;
fused_node.set_name(output_node->name());
fused_node.set_op("_FusedMatMul");
fused_node.set_device(matmul_node->device());
fused_node.add_input(matmul_node->input(0));
fused_node.add_input(matmul_node->input(1));
if (is_gelu_approximate) {
fused_node.add_input(matmul_node->input(2));
} else {
auto* bias_add_node =
ctx->graph_view.GetNode(matched_nodes_map.at("bias_add"))->node();
fused_node.add_input(bias_add_node->input(1));
}
CopyMatMulAttributes(*matmul_node, &fused_node);
if (is_gelu_approximate)
SetFusedOpAttributes(&fused_node, {"BiasAdd", "GeluApproximate"});
else
SetFusedOpAttributes(&fused_node, {"BiasAdd", "GeluExact"});
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("output")] = true;
for (const auto& node_idx : remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
Status AddMklLayerNorm(RemapperContext* ctx,
const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
const std::vector<string>& input_node_names,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete,
const float epsilon) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map.at("output"))->node();
NodeDef fused_node;
fused_node.set_name(output_node->name());
fused_node.set_op("_MklLayerNorm");
fused_node.set_device(output_node->device());
for (const auto& name : input_node_names) fused_node.add_input(name);
auto* attr = fused_node.mutable_attr();
auto& src_attr = output_node->attr();
(*attr)["T"] = src_attr.at("T");
SetAttrValue(epsilon, &(*attr)["epsilon"]);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("output")] = true;
for (const auto& node_idx : remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
Status ReplaceMulMaximumWithLeakyRelu(
RemapperContext* ctx, const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete,
float alpha) {
const NodeDef* maximum =
ctx->graph_view.GetNode(matched_nodes_map.at("max_to_leakyrelu"))->node();
const NodeDef* input =
ctx->graph_view.GetNode(matched_nodes_map.at("input"))->node();
const auto* alpha_node_view =
ctx->graph_view.GetNode(matched_nodes_map.at("alpha"));
NodeDef fused_op;
fused_op.set_name(maximum->name());
fused_op.set_op("LeakyRelu");
fused_op.set_device(maximum->device());
fused_op.add_input(input->name());
if (alpha_node_view->NumControllingFanins() > 0) {
const auto& control_fanins = alpha_node_view->GetControllingFanins();
for (int i = 0; i < alpha_node_view->NumControllingFanins(); i++) {
const auto* control_node_view = control_fanins[i].node_view();
*fused_op.add_input() =
AsControlDependency(control_node_view->node()->name());
}
}
auto* attr = fused_op.mutable_attr();
(*attr)["T"] = maximum->attr().at("T");
SetAttrValue(alpha, &(*attr)["alpha"]);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("max_to_leakyrelu")] = true;
for (const auto& node_index : remove_node_indices) {
(*nodes_to_delete)[node_index] = true;
}
return absl::OkStatus();
}
Status ReplaceSigmoidMulWithSwish(
RemapperContext* ctx, const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete) {
const NodeDef* mul =
ctx->graph_view.GetNode(matched_nodes_map.at("mul_to_swish"))->node();
const NodeDef* sigmoid =
ctx->graph_view.GetNode(matched_nodes_map.at("sigmoid"))->node();
NodeDef fused_op;
fused_op.set_name(mul->name());
fused_op.set_op("_MklSwish");
fused_op.set_device(mul->device());
fused_op.add_input(sigmoid->input(0));
auto* attr = fused_op.mutable_attr();
(*attr)["T"] = mul->attr().at("T");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("mul_to_swish")] = true;
for (const auto& node_index : remove_node_indices) {
(*nodes_to_delete)[node_index] = true;
}
return absl::OkStatus();
}
Status AddFusedBatchNormExNode(RemapperContext* ctx,
const FusedBatchNormEx& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& fused_batch_norm = graph->node(matched.fused_batch_norm);
const NodeDef& activation = graph->node(matched.activation);
VLOG(2) << "Fuse " << activation.op()
<< " with FusedBatchNorm:" << " activation=" << activation.name()
<< " side_input="
<< (matched.side_input != kMissingIndex
? graph->node(matched.side_input).name()
: "<none>")
<< " invalidated="
<< (matched.invalidated != kMissingIndex
? graph->node(matched.invalidated).name()
: "<none>")
<< " fused_batch_norm=" << fused_batch_norm.name();
NodeDef fused_op;
fused_op.set_op(kFusedBatchNormEx);
fused_op.set_name(fused_batch_norm.name());
fused_op.set_device(fused_batch_norm.device());
fused_op.add_input(fused_batch_norm.input(0));
fused_op.add_input(fused_batch_norm.input(1));
fused_op.add_input(fused_batch_norm.input(2));
fused_op.add_input(fused_batch_norm.input(3));
fused_op.add_input(fused_batch_norm.input(4));
CopyFusedBatchNormAttributes(fused_batch_norm, &fused_op);
auto* attrs = fused_op.mutable_attr();
SetAttrValue(activation.op(), &(*attrs)["activation_mode"]);
if (matched.side_input != kMissingIndex) {
SetAttrValue(1, &(*attrs)["num_side_inputs"]);
const NodeDef& side_input = graph->node(matched.side_input);
fused_op.add_input(side_input.name());
} else {
SetAttrValue(0, &(*attrs)["num_side_inputs"]);
}
NodeDef identity_op;
identity_op.set_op("Identity");
identity_op.set_name(activation.name());
identity_op.set_device(fused_batch_norm.device());
identity_op.add_input(fused_batch_norm.name());
(*identity_op.mutable_attr())["T"] = attrs->at("T");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
mutation->AddNode(std::move(identity_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.fused_batch_norm] = true;
(*invalidated_nodes)[matched.activation] = true;
if (matched.side_input != kMissingIndex) {
(*nodes_to_delete)[matched.invalidated] = true;
}
return absl::OkStatus();
}
Status AddFusedBatchNormGradExNode(RemapperContext* ctx,
const FusedBatchNormGradEx& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& fused_batch_norm_grad =
graph->node(matched.fused_batch_norm_grad);
const NodeDef& activation_grad = graph->node(matched.activation_grad);
const NodeDef& fwd_fused_batch_norm =
graph->node(matched.fwd_fused_batch_norm);
VLOG(2) << "Fuse FusedBatchNormGrad with " << activation_grad.op() << ": "
<< " fused_batch_norm_grad=" << fused_batch_norm_grad.name()
<< " side_input="
<< (matched.side_input_grad != kMissingIndex
? graph->node(matched.side_input_grad).name()
: "<none>")
<< " activation=" << activation_grad.name()
<< " corresponding FusedBatchNorm=" << fwd_fused_batch_norm.name();
NodeDef fused_op;
fused_op.set_op(kFusedBatchNormGradEx);
fused_op.set_name(fused_batch_norm_grad.name());
fused_op.set_device(fused_batch_norm_grad.device());
fused_op.add_input(activation_grad.input(0));
fused_op.add_input(fused_batch_norm_grad.input(1));
fused_op.add_input(fused_batch_norm_grad.input(2));
fused_op.add_input(fused_batch_norm_grad.input(3));
fused_op.add_input(fused_batch_norm_grad.input(4));
fused_op.add_input(fused_batch_norm_grad.input(5));
fused_op.add_input(fwd_fused_batch_norm.input(2));
fused_op.add_input(activation_grad.input(1));
CopyFusedBatchNormGradAttributes(fused_batch_norm_grad, &fused_op);
auto* attrs = fused_op.mutable_attr();
SetAttrValue("Relu", &(*attrs)["activation_mode"]);
if (matched.side_input_grad != kMissingIndex) {
SetAttrValue(1, &(*attrs)["num_side_inputs"]);
} else {
SetAttrValue(0, &(*attrs)["num_side_inputs"]);
}
NodeDef identity_op;
identity_op.set_op("Identity");
identity_op.set_name(activation_grad.name());
identity_op.set_device(fused_batch_norm_grad.device());
identity_op.add_input(absl::StrCat(fused_batch_norm_grad.name(), ":5"));
(*identity_op.mutable_attr())["T"] = attrs->at("T");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
if (matched.side_input_grad != kMissingIndex) {
mutation->AddNode(std::move(identity_op), &status);
TF_RETURN_IF_ERROR(status);
}
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.fused_batch_norm_grad] = true;
if (matched.side_input_grad != kMissingIndex) {
(*invalidated_nodes)[matched.activation_grad] = true;
} else {
(*nodes_to_delete)[matched.activation_grad] = true;
}
return absl::OkStatus();
}
Status AddBatchNormNodes(RemapperContext* ctx, const FusedBatchNorm& matched) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& fused_node = graph->node(matched.fused_batch_norm);
VLOG(2) << "Optimizing fused batch norm node "
<< SummarizeNodeDef(fused_node);
const string& x = fused_node.input(0);
string scale = fused_node.input(1);
string offset = fused_node.input(2);
string mean = fused_node.input(3);
string variance = fused_node.input(4);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
string x_format = fused_node.attr().at(kDataFormat).s();
if (x_format == "NCHW" || x_format == "NCDHW") {
NodeDef new_shape;
const string new_shape_name =
AddPrefixToNodeName(x_format + "Shape", fused_node.name());
new_shape.set_name(new_shape_name);
new_shape.set_op("Const");
new_shape.set_device(fused_node.device());
*new_shape.add_input() = AsControlDependency(scale);
(*new_shape.mutable_attr())["dtype"].set_type(DT_INT32);
if (x_format == "NCHW") {
Tensor t(DT_INT32, {4});
t.flat<int32>()(0) = 1;
t.flat<int32>()(1) = -1;
t.flat<int32>()(2) = 1;
t.flat<int32>()(3) = 1;
t.AsProtoTensorContent(
(*new_shape.mutable_attr())["value"].mutable_tensor());
} else {
Tensor t(DT_INT32, {5});
t.flat<int32>()(0) = 1;
t.flat<int32>()(1) = -1;
t.flat<int32>()(2) = 1;
t.flat<int32>()(3) = 1;
t.flat<int32>()(4) = 1;
t.AsProtoTensorContent(
(*new_shape.mutable_attr())["value"].mutable_tensor());
}
mutation->AddNode(std::move(new_shape), &status);
TF_RETURN_IF_ERROR(status);
NodeDef reshaped_scale;
reshaped_scale.set_name(
AddPrefixToNodeName(x_format + "ShapedScale", fused_node.name()));
reshaped_scale.set_op("Reshape");
reshaped_scale.set_device(fused_node.device());
*reshaped_scale.add_input() = scale;
*reshaped_scale.add_input() = new_shape_name;
(*reshaped_scale.mutable_attr())["T"] = fused_node.attr().at("T");
(*reshaped_scale.mutable_attr())["Tshape"].set_type(DT_INT32);
scale = reshaped_scale.name();
mutation->AddNode(std::move(reshaped_scale), &status);
TF_RETURN_IF_ERROR(status);
NodeDef reshaped_offset;
reshaped_offset.set_name(
AddPrefixToNodeName(x_format + "ShapedOffset", fused_node.name()));
reshaped_offset.set_op("Reshape");
reshaped_offset.set_device(fused_node.device());
*reshaped_offset.add_input() = offset;
*reshaped_offset.add_input() = new_shape_name;
(*reshaped_offset.mutable_attr())["T"] = fused_node.attr().at("T");
(*reshaped_offset.mutable_attr())["Tshape"].set_type(DT_INT32);
offset = reshaped_offset.name();
mutation->AddNode(std::move(reshaped_offset), &status);
TF_RETURN_IF_ERROR(status);
NodeDef reshaped_mean;
reshaped_mean.set_name(
AddPrefixToNodeName(x_format + "ShapedMean", fused_node.name()));
reshaped_mean.set_op("Reshape");
reshaped_mean.set_device(fused_node.device());
*reshaped_mean.add_input() = mean;
*reshaped_mean.add_input() = new_shape_name;
(*reshaped_mean.mutable_attr())["T"] = fused_node.attr().at("T");
(*reshaped_mean.mutable_attr())["Tshape"].set_type(DT_INT32);
mean = reshaped_mean.name();
mutation->AddNode(std::move(reshaped_mean), &status);
TF_RETURN_IF_ERROR(status);
NodeDef reshaped_variance;
reshaped_variance.set_name(
AddPrefixToNodeName(x_format + "ShapedVariance", fused_node.name()));
reshaped_variance.set_op("Reshape");
reshaped_variance.set_device(fused_node.device());
*reshaped_variance.add_input() = variance;
*reshaped_variance.add_input() = new_shape_name;
(*reshaped_variance.mutable_attr())["T"] = fused_node.attr().at("T");
(*reshaped_variance.mutable_attr())["Tshape"].set_type(DT_INT32);
variance = reshaped_variance.name();
mutation->AddNode(std::move(reshaped_variance), &status);
TF_RETURN_IF_ERROR(status);
}
float epsilon = 0.0f;
if (fused_node.attr().count("epsilon")) {
epsilon = fused_node.attr().at("epsilon").f();
}
DataType dtype = fused_node.attr().at("T").type();
Tensor value(dtype, TensorShape());
value.scalar<float>()() = epsilon;
NodeDef variance_epsilon;
const string variance_epsilon_name =
AddPrefixToNodeName("Const", fused_node.name());
TF_RETURN_IF_ERROR(ConstantFolding::CreateNodeDef(
variance_epsilon_name, TensorValue(&value), &variance_epsilon));
variance_epsilon.set_device(fused_node.device());
mutation->AddNode(std::move(variance_epsilon), &status);
TF_RETURN_IF_ERROR(status);
NodeDef variance_plus_epsilon;
const string variance_plus_epsilon_name =
AddPrefixToNodeName("VarPlusEpsilon", fused_node.name());
variance_plus_epsilon.set_name(variance_plus_epsilon_name);
variance_plus_epsilon.set_op("Add");
(*variance_plus_epsilon.mutable_attr())["T"].set_type(dtype);
variance_plus_epsilon.set_device(fused_node.device());
*variance_plus_epsilon.add_input() = variance;
*variance_plus_epsilon.add_input() = variance_epsilon_name;
mutation->AddNode(std::move(variance_plus_epsilon), &status);
TF_RETURN_IF_ERROR(status);
NodeDef inv;
const string inv_name = AddPrefixToNodeName("Inv", fused_node.name());
inv.set_name(inv_name);
inv.set_op("Rsqrt");
inv.set_device(fused_node.device());
(*inv.mutable_attr())["T"].set_type(dtype);
*inv.add_input() = variance_plus_epsilon_name;
mutation->AddNode(std::move(inv), &status);
TF_RETURN_IF_ERROR(status);
NodeDef scaled;
const string scaled_name = AddPrefixToNodeName("Scaled", fused_node.name());
scaled.set_name(scaled_name);
scaled.set_op("Mul");
scaled.set_device(fused_node.device());
(*scaled.mutable_attr())["T"].set_type(dtype);
*scaled.add_input() = inv_name;
*scaled.add_input() = scale;
mutation->AddNode(std::move(scaled), &status);
TF_RETURN_IF_ERROR(status);
NodeDef a;
const string a_name = AddPrefixToNodeName("Mul", fused_node.name());
a.set_name(a_name);
a.set_op("Mul");
a.set_device(fused_node.device());
(*a.mutable_attr())["T"].set_type(dtype);
*a.add_input() = x;
*a.add_input() = scaled_name;
mutation->AddNode(std::move(a), &status);
TF_RETURN_IF_ERROR(status);
NodeDef b;
const string b_name = AddPrefixToNodeName("Mul2", fused_node.name());
b.set_name(b_name);
b.set_op("Mul");
b.set_device(fused_node.device());
(*b.mutable_attr())["T"].set_type(dtype);
*b.add_input() = mean;
*b.add_input() = scaled_name;
mutation->AddNode(std::move(b), &status);
TF_RETURN_IF_ERROR(status);
NodeDef c;
const string c_name = AddPrefixToNodeName("Offset", fused_node.name());
c.set_name(c_name);
c.set_op("Sub");
c.set_device(fused_node.device());
(*c.mutable_attr())["T"].set_type(dtype);
*c.add_input() = offset;
*c.add_input() = b_name;
mutation->AddNode(std::move(c), &status);
TF_RETURN_IF_ERROR(status);
NodeDef r;
r.set_name(fused_node.name());
r.set_op("Add");
r.set_device(fused_node.device());
(*r.mutable_attr())["T"].set_type(dtype);
*r.add_input() = a_name;
*r.add_input() = c_name;
mutation->AddNode(std::move(r), &status);
TF_RETURN_IF_ERROR(status);
return mutation->Apply();
}
Status AddTensorToHashBucketNode(RemapperContext* ctx,
const TensorToHashBucket& matched,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
const GraphDef* graph = ctx->graph_view.graph();
const NodeDef& pre_as_string = graph->node(matched.pre_as_string);
const NodeDef& as_string = graph->node(matched.as_string);
const NodeDef& string_to_hash_bucket =
graph->node(matched.string_to_hash_bucket);
VLOG(2) << "Fuse AsString with StringToHashBucketFast:" << " as_string="
<< as_string.name()
<< " string_to_hash_bucket=" << string_to_hash_bucket.name()
<< " on device=" << pre_as_string.device();
NodeDef fused_op;
fused_op.set_name(string_to_hash_bucket.name());
fused_op.set_device(pre_as_string.device());
fused_op.add_input(as_string.input(0));
fused_op.set_op(kTensorToHashBucket);
auto* attr = fused_op.mutable_attr();
auto& src_attr0 = as_string.attr();
auto& src_attr1 = string_to_hash_bucket.attr();
(*attr)["T"] = src_attr0.at("T");
(*attr)["num_buckets"] = src_attr1.at("num_buckets");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_op), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched.string_to_hash_bucket] = true;
(*nodes_to_delete)[matched.as_string] = true;
return absl::OkStatus();
}
Status AddFusedBatchMatMul(RemapperContext* ctx,
const std::map<string, int>& matched_nodes_map,
const std::set<int>& remove_node_indices,
const std::vector<string>& input_node_names,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map.at("output"))->node();
auto* batch_matmul_node =
ctx->graph_view.GetNode(matched_nodes_map.at("batch_matmul"))->node();
NodeDef fused_node;
fused_node.set_name(output_node->name());
fused_node.set_op("_MklFusedBatchMatMulV2");
fused_node.set_device(batch_matmul_node->device());
for (const auto& name : input_node_names) fused_node.add_input(name);
CopyBatchMatMulAttributes(*batch_matmul_node, &fused_node);
SetFusedOpAttributes(&fused_node, {"Mul", "Add"}, 2);
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map.at("output")] = true;
for (const auto& node_idx : remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
template <typename T, typename U>
std::vector<U> GetTensorValues(const Tensor& tensor) {
std::vector<U> result_vector;
int item_count = tensor.flat<T>().size();
result_vector.reserve(item_count);
for (int i = 0; i < item_count; i++) {
result_vector.push_back((U)(tensor.flat<T>()(i)));
}
return result_vector;
}
Status AddMklFusedInstanceNorm(RemapperContext* ctx,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices,
std::vector<bool>* invalidated_nodes,
std::vector<bool>* nodes_to_delete,
bool fuse_activation) {
auto* output_node =
ctx->graph_view.GetNode(matched_nodes_map->at("output"))->node();
auto* input_node =
ctx->graph_view.GetNode(matched_nodes_map->at("input"))->node();
auto* gamma_node =
ctx->graph_view.GetNode(matched_nodes_map->at("gamma"))->node();
auto* beta_node =
ctx->graph_view.GetNode(matched_nodes_map->at("beta"))->node();
auto* epsilon_node =
ctx->graph_view.GetNode(matched_nodes_map->at("epsilon"))->node();
auto* mean_axes_node =
ctx->graph_view.GetNode(matched_nodes_map->at("r_indices1"))->node();
if (!mean_axes_node || mean_axes_node->op() != "Const") {
VLOG(2) << "Mean reduction axes node is not valid, abort fusion";
return absl::OkStatus();
}
DataType dtype;
Tensor mean_axes_tensor;
if (!mean_axes_tensor.FromProto(
mean_axes_node->attr().at("value").tensor())) {
VLOG(2) << "Unable to get mean reduction axes, abort fusion";
return absl::OkStatus();
}
dtype = mean_axes_tensor.dtype();
if (dtype != DT_INT32 && dtype != DT_INT64) {
VLOG(2) << "Unexpected mean reduction axes data type, abort fusion";
return absl::OkStatus();
}
std::vector<int> reduction_axes =
(dtype == DT_INT32) ? GetTensorValues<int32, int>(mean_axes_tensor)
: GetTensorValues<int64, int>(mean_axes_tensor);
NodeDef* activation_node = nullptr;
if (fuse_activation) {
activation_node =
ctx->graph_view.GetNode(matched_nodes_map->at("activation"))->node();
if (!activation_node) {
VLOG(2) << "Error to retrieve activation node, abort fusion";
return absl::OkStatus();
}
if (!IsLeakyRelu(*activation_node) && !IsRelu(*activation_node)) {
VLOG(2) << "Unsupported activation node, abort fusion";
return absl::OkStatus();
}
}
NodeDef fused_node;
fused_node.set_op("_MklFusedInstanceNorm");
fused_node.set_device(output_node->device());
fused_node.add_input(input_node->name());
fused_node.add_input(gamma_node->name());
fused_node.add_input(beta_node->name());
auto* attr = fused_node.mutable_attr();
auto& src_attr = output_node->attr();
(*attr)["T"] = src_attr.at("T");
Tensor epsilon_tensor;
float epsilon_value = 0.0001;
if (epsilon_node != nullptr && epsilon_node->op() == "Const" &&
epsilon_tensor.FromProto(epsilon_node->attr().at("value").tensor())) {
dtype = epsilon_tensor.dtype();
if (dtype == DT_BFLOAT16) {
epsilon_value = static_cast<float>(epsilon_tensor.flat<bfloat16>()(0));
} else if (dtype == DT_HALF) {
epsilon_value = static_cast<float>(epsilon_tensor.flat<Eigen::half>()(0));
} else if (dtype == DT_FLOAT) {
epsilon_value = epsilon_tensor.flat<float>()(0);
}
SetAttrValue(epsilon_value, &(*attr)["epsilon"]);
}
SetAttrValue(reduction_axes, &(*attr)["reduction_axes"]);
if (fuse_activation) {
fused_node.set_name(activation_node->name());
string activation_op = activation_node->op();
absl::string_view fused_items[] = {activation_op};
SetAttrValue(absl::Span<absl::string_view>(fused_items),
&(*attr)["fused_ops"]);
if (activation_op == "LeakyRelu") {
auto& activation_attr = activation_node->attr();
(*attr)["leakyrelu_alpha"] = activation_attr.at("alpha");
}
} else {
fused_node.set_name(output_node->name());
}
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
if (fuse_activation) {
(*invalidated_nodes)[matched_nodes_map->at("activation")] = true;
} else {
(*invalidated_nodes)[matched_nodes_map->at("output")] = true;
}
for (const auto& node_idx : *remove_node_indices) {
(*nodes_to_delete)[node_idx] = true;
}
return absl::OkStatus();
}
bool IsContractionWithAdd(const RemapperContext& ctx, int node_index) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
auto is_supported_add_input = [](const auto* node_view) -> bool {
if (IsConvOrMatMul(*node_view->node())) return true;
if (IsBiasAdd(*node_view->node()) || IsAdd(*node_view->node())) {
if (node_view->NumRegularFanins() < 2) return false;
const auto& bias_add_fanin_0 = node_view->GetRegularFanin(0);
const auto& bias_add_fanin_1 = node_view->GetRegularFanin(1);
return IsConvOrMatMul(*bias_add_fanin_0.node_view()->node()) ||
IsConvOrMatMul(*bias_add_fanin_1.node_view()->node());
}
return false;
};
auto is_supported_add = [&](const auto* node_view) -> bool {
const auto* node_def = node_view->node();
if (IsAdd(*node_def)) {
if (node_view->NumRegularFanins() < 2) return false;
const auto& add_fanin_0 = node_view->GetRegularFanin(0);
const auto& add_fanin_1 = node_view->GetRegularFanin(1);
return is_supported_add_input(add_fanin_0.node_view()) ||
is_supported_add_input(add_fanin_1.node_view());
}
return false;
};
if (is_supported_add(node_view)) {
return true;
}
if (IsSupportedActivation(*node_view->node(), nullptr)) {
for (int i = 0; i < node_view->NumRegularFanins(); i++) {
const auto& fanin_i = node_view->GetRegularFanin(i);
if (is_supported_add(fanin_i.node_view())) return true;
}
}
return false;
}
bool FindSoftplusAndTanhAndMul(RemapperContext* ctx, int node_index,
std::map<string, int>* matched_nodes_map,
std::set<int>* remove_node_indices) {
if (!IsMKLEnabled()) return false;
using utils::MatchingDirection;
using utils::NodeStatus;
utils::OpTypePattern softplustanhmul_pattern {
"Mul", "mul_to_mish", NodeStatus::kReplace,
{
{
"Tanh", "tanh", NodeStatus::kRemove,
{
{
"Softplus", "softplus", NodeStatus::kRemove,
{
{"*", "input", NodeStatus::kRemain}
}
}
}
},
{"*", "input", NodeStatus::kRemain}
}
};
auto* mul_node_def = ctx->graph_view.GetNode(node_index)->node();
if (!HasDataType(mul_node_def, DT_FLOAT) &&
!HasDataType(mul_node_def, DT_HALF) &&
!HasDataType(mul_node_def, DT_BFLOAT16))
return false;
if (!NodeIsOnCpu(mul_node_def)) return false;
bool found_op_type_match = false;
utils::SubGraphMatcher<MatchingDirection::kFollowInputs> graph_matcher(
&(ctx->graph_view));
matched_nodes_map->clear();
remove_node_indices->clear();
found_op_type_match = graph_matcher.GetMatchedNodes(
softplustanhmul_pattern, {}, ctx->graph_view.GetNode(node_index),
matched_nodes_map, remove_node_indices);
if (found_op_type_match) {
NodeDef* matched_softplus_node =
ctx->graph_view.GetNode(matched_nodes_map->at("softplus"))->node();
auto in_tensor_softplus = matched_softplus_node->input(0);
if ((mul_node_def->input(0) != in_tensor_softplus) &&
(mul_node_def->input(1) != in_tensor_softplus)) {
found_op_type_match = false;
}
}
return found_op_type_match;
}
Status ReplaceSoftplusTanhAndMulWithMish(
RemapperContext* ctx, const std::map<string, int>* matched_nodes_map,
const std::set<int>* remove_node_indices,
std::vector<bool>* invalidated_nodes, std::vector<bool>* nodes_to_delete) {
auto* old_mul_node =
ctx->graph_view.GetNode(matched_nodes_map->at("mul_to_mish"))->node();
auto* softplus_node =
ctx->graph_view.GetNode(matched_nodes_map->at("softplus"))->node();
NodeDef fused_node;
fused_node.set_name(old_mul_node->name());
fused_node.set_op("_MklFusedMish");
fused_node.set_device(old_mul_node->device());
fused_node.add_input(softplus_node->input(0));
auto* fused_node_attr = fused_node.mutable_attr();
(*fused_node_attr)["T"] = old_mul_node->attr().at("T");
utils::Mutation* mutation = ctx->graph_view.GetMutationBuilder();
Status status;
mutation->AddNode(std::move(fused_node), &status);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(mutation->Apply());
(*invalidated_nodes)[matched_nodes_map->at("mul_to_mish")] = true;
for (const auto& node_index : *remove_node_indices) {
(*nodes_to_delete)[node_index] = true;
}
return absl::OkStatus();
}
bool RequiresInferredShapes(const RemapperContext& ctx, int node_index,
const Cluster* cluster) {
const auto* node_view = ctx.graph_view.GetNode(node_index);
const auto* node_def = node_view->node();
const auto is_batch_norm_candidate = [&]() -> bool {
if (!IsFusedBatchNorm(*node_def)) return false;
if (GetDataTypeFromAttr(*node_def, "T") != DT_FLOAT) return false;
bool is_training = true;
if (!TryGetNodeAttr(*node_def, kIsTraining, &is_training)) return false;
if (is_training) return false;
return true;
};
const auto is_act_biasadd_conv_candidate = [&]() -> bool {
if (!IsSupportedActivation(*node_def, cluster)) return false;
if (!RuntimeFusionEnabled(cluster) && !IsRelu(*node_def)) return false;
const auto is_compatible_dtype = [&](const NodeDef& node) -> bool {
bool fp16_only =
IsRelu6(*node_def) || IsElu(*node_def) || IsLeakyRelu(*node_def);
DataType dtype = GetDataTypeFromAttr(node, "T");
return dtype == DT_HALF || (!fp16_only && dtype == DT_FLOAT);
};
if (!is_compatible_dtype(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& relu_fanin_0 = node_view->GetRegularFanin(0);
const auto* relu_fanin_0_node_view = relu_fanin_0.node_view();
const auto* relu_fanin_0_node_def = relu_fanin_0_node_view->node();
if (!IsBiasAdd(*relu_fanin_0_node_def) && !IsAdd(*relu_fanin_0_node_def))
return false;
if (!is_compatible_dtype(*relu_fanin_0_node_def)) return false;
if (relu_fanin_0_node_view->NumRegularFanins() < 1) return false;
const auto& biasadd_fanin_0 = relu_fanin_0_node_view->GetRegularFanin(0);
const auto* biasadd_fanin_0_node_def = biasadd_fanin_0.node_view()->node();
if (!IsConv2D(*biasadd_fanin_0_node_def) &&
!IsConv3D(*biasadd_fanin_0_node_def))
return false;
if (!is_compatible_dtype(*biasadd_fanin_0_node_def)) return false;
return true;
};
const auto is_batch_norm_fusion_candidate = [&]() -> bool {
if (!IsRelu(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& relu_fanin_0 = node_view->GetRegularFanin(0);
const auto* relu_fanin_0_node_view = relu_fanin_0.node_view();
const auto* relu_fanin_0_node_def = relu_fanin_0_node_view->node();
if (IsFusedBatchNorm(*relu_fanin_0_node_def)) {
return true;
} else if (IsAdd(*relu_fanin_0_node_def)) {
if (relu_fanin_0_node_view->NumRegularFanins() < 2) return false;
const auto& add_regular_fanin_0 =
relu_fanin_0_node_view->GetRegularFanin(0);
if (IsFusedBatchNorm(*add_regular_fanin_0.node_view()->node()))
return true;
const auto& add_regular_fanin_1 =
relu_fanin_0_node_view->GetRegularFanin(1);
if (IsFusedBatchNorm(*add_regular_fanin_1.node_view()->node()))
return true;
}
return false;
};
const auto is_batch_norm_grad_fusion_candidate = [&]() -> bool {
if (!IsFusedBatchNormGrad(*node_def)) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& bn_fanin_0 = node_view->GetRegularFanin(0);
const auto* bn_fanin_0_node_view = bn_fanin_0.node_view();
const auto* bn_fanin_0_node_def = bn_fanin_0_node_view->node();
if (IsReluGrad(*bn_fanin_0_node_def)) {
return true;
}
return false;
};
const auto is_matmul_gelu_exact_fusion_candidate = [&]() -> bool {
if (!RuntimeFusionEnabled(cluster)) return false;
DataType node_dtype = GetDataTypeFromAttr(*node_def, "T");
if (node_dtype != DT_HALF) return false;
return IsMatchedMatMulBiasAddAndGeluExact(const_cast<RemapperContext&>(ctx),
node_index);
};
const auto is_act_biasadd_matmul_candidate = [&]() -> bool {
if (!IsTanh(*node_def) && !IsSigmoid(*node_def)) return false;
if (!RuntimeFusionEnabled(cluster)) return false;
DataType act_dtype = GetDataTypeFromAttr(*node_def, "T");
if (act_dtype != DT_HALF) return false;
if (node_view->NumRegularFanins() < 1) return false;
const auto& relu_fanin_0 = node_view->GetRegularFanin(0);
const auto* relu_fanin_0_node_view = relu_fanin_0.node_view();
const auto* relu_fanin_0_node_def = relu_fanin_0_node_view->node();
if (!IsBiasAdd(*relu_fanin_0_node_def) && !IsAdd(*relu_fanin_0_node_def)) {
return false;
}
DataType biasadd_dtype = GetDataTypeFromAttr(*relu_fanin_0_node_def, "T");
if (biasadd_dtype != DT_HALF) return false;
if (relu_fanin_0_node_view->NumRegularFanins() < 1) return false;
const auto& biasadd_fanin_0 = relu_fanin_0_node_view->GetRegularFanin(0);
const auto* biasadd_fanin_0_node_def = biasadd_fanin_0.node_view()->node();
if (!IsMatMul(*biasadd_fanin_0_node_def)) return false;
DataType matmul_dtype = GetDataTypeFromAttr(*biasadd_fanin_0_node_def, "T");
if (matmul_dtype != DT_HALF) return false;
return true;
};
if (IsMKLEnabled())
return is_batch_norm_candidate() || is_batch_norm_fusion_candidate() ||
IsContractionWithAdd(ctx, node_index) ||
is_act_biasadd_conv_candidate() || IsBiasAdd(*node_def) ||
IsTranspose(*node_def);
return is_act_biasadd_conv_candidate() || is_batch_norm_candidate() ||
is_batch_norm_fusion_candidate() ||
is_batch_norm_grad_fusion_candidate() ||
is_matmul_gelu_exact_fusion_candidate() ||
is_act_biasadd_matmul_candidate();
}
inline bool IsXlaCpuGlobalJitOn() {
std::vector<string> tf_xla_flags;
const std::string tf_xla_cpu_global_jit = "--tf_xla_cpu_global_jit";
TF_CHECK_OK(ReadStringsFromEnvVar("TF_XLA_FLAGS", "", &tf_xla_flags));
return std::find(tf_xla_flags.begin(), tf_xla_flags.end(),
tf_xla_cpu_global_jit) != tf_xla_flags.end();
}
}
Status Remapper::Optimize(Cluster* cluster, const GrapplerItem& item,
GraphDef* optimized_graph) {
GrapplerItem mutable_item = item;
Status status;
bool xla_cpu_jit_disable_fusion =
xla_auto_clustering_on_ && IsXlaCpuGlobalJitOn();
#ifdef DNNL_AARCH64_USE_ACL
xla_cpu_jit_disable_fusion = false;
#endif
RemapperContext ctx(&mutable_item, &status, cpu_layout_conversion_,
xla_auto_clustering_on_, xla_cpu_jit_disable_fusion);
TF_RETURN_IF_ERROR(status);
TF_RETURN_IF_ERROR(
ctx.graph_view.SortTopologically(false, {}));
const int num_nodes = item.graph.node_size();
std::vector<bool> invalidated_nodes(num_nodes);
std::vector<bool> nodes_to_delete(num_nodes);
bool allow_non_differentiable_rewrites =
item.optimization_options().allow_non_differentiable_rewrites;
for (int i = num_nodes - 1; i >= 0; --i) {
if (invalidated_nodes[i] || nodes_to_delete[i]) {
continue;
}
if (!ctx.inferred_graph_properties &&
RequiresInferredShapes(ctx, i, cluster)) {
const bool assume_valid_feeds = opt_level_ == RewriterConfig::AGGRESSIVE;
TF_RETURN_IF_ERROR(ctx.graph_properties.InferStatically(
assume_valid_feeds,
false,
true,
false));
ctx.inferred_graph_properties = true;
}
ContractionWithBiasAddAndAdd contract_with_bias_and_add;
ContractionWithActivation contract_with_activation;
ContractionWithBiasAndAddActivation contract_with_bias_and_add_activation;
if (IsConv2D(ctx.graph_view.graph()->node(i)) ||
IsFusedBatchNorm(ctx.graph_view.graph()->node(i)) ||
IsDepthwiseConv2dNative(ctx.graph_view.graph()->node(i)) ||
IsBiasAdd(ctx.graph_view.graph()->node(i)) ||
IsTranspose(ctx.graph_view.graph()->node(i)) ||
IsSigmoid(ctx.graph_view.graph()->node(i)) ||
IsMatMul(ctx.graph_view.graph()->node(i))) {
AddInputShapesAttr(ctx, i);
}
if (IsMKLEnabled() && !ctx.xla_cpu_jit_disable_fusion) {
const auto* node_view = ctx.graph_view.GetNode(i);
const auto* node_def = node_view->node();
const string& type_attr = "T";
DataType dtype = GetDataTypeFromAttr(*node_def, type_attr);
if ((dtype == DT_BFLOAT16 || dtype == DT_HALF) &&
!IsDataTypeSupportedByOneDNNOnThisCPU(dtype))
continue;
if (FindContractionWithBiasAndAddActivation(
ctx, i, &contract_with_bias_and_add_activation)) {
TF_RETURN_IF_ERROR(
AddFusedContractionNode(&ctx, contract_with_bias_and_add_activation,
&invalidated_nodes, &nodes_to_delete));
continue;
}
if (FindFusedConvWithFusedActivation(ctx, i, &contract_with_activation)) {
TF_RETURN_IF_ERROR(
AddFusedContractionNode(&ctx, contract_with_activation,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#ifndef DNNL_AARCH64_USE_ACL
if (FindContractionWithBiasAddAndAdd(ctx, i,
&contract_with_bias_and_add)) {
TF_RETURN_IF_ERROR(
AddFusedContractionNode(&ctx, contract_with_bias_and_add,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#endif
PadWithConv3D pad_with_conv3d;
if (FindPadWithConv3D(ctx, i, &pad_with_conv3d)) {
TF_RETURN_IF_ERROR(AddFusedConv3DNode(
&ctx, pad_with_conv3d, &invalidated_nodes, &nodes_to_delete));
continue;
}
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
std::vector<string> input_node_names;
if (FindContractionWithBiasAddAndHardSwish(ctx, i, &matched_nodes_map,
&remove_node_indices)) {
TF_RETURN_IF_ERROR(FuseContractionWithBiasAddAndHardSwish(
&ctx, &matched_nodes_map, &remove_node_indices, &invalidated_nodes,
&nodes_to_delete));
continue;
}
matched_nodes_map.clear();
remove_node_indices.clear();
if (FindSoftplusAndTanhAndMul(&ctx, i, &matched_nodes_map,
&remove_node_indices)) {
TF_RETURN_IF_ERROR(ReplaceSoftplusTanhAndMulWithMish(
&ctx, &matched_nodes_map, &remove_node_indices, &invalidated_nodes,
&nodes_to_delete));
continue;
}
matched_nodes_map.clear();
remove_node_indices.clear();
input_node_names.clear();
if (FindFusedBatchMatMul(&ctx, i, &matched_nodes_map,
&remove_node_indices, &input_node_names)) {
TF_RETURN_IF_ERROR(AddFusedBatchMatMul(
&ctx, matched_nodes_map, remove_node_indices, input_node_names,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#ifndef DNNL_AARCH64_USE_ACL
std::map<string, int> fusedconv2dSwish_matched_nodes_map;
std::set<int> fusedconv2dSwish_remove_node_indices;
if (FindConv2DSwish(&ctx, i, &fusedconv2dSwish_matched_nodes_map,
&fusedconv2dSwish_remove_node_indices)) {
TF_RETURN_IF_ERROR(
FuseConv2DSwish(&ctx, fusedconv2dSwish_matched_nodes_map,
fusedconv2dSwish_remove_node_indices,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#endif
std::map<string, int> mulmax_matched_nodes_map;
std::set<int> mulmax_remove_node_indices;
float alpha;
if (FindMulAndMaximum(&ctx, i, &mulmax_matched_nodes_map,
&mulmax_remove_node_indices, &alpha)) {
TF_RETURN_IF_ERROR(ReplaceMulMaximumWithLeakyRelu(
&ctx, mulmax_matched_nodes_map, mulmax_remove_node_indices,
&invalidated_nodes, &nodes_to_delete, alpha));
continue;
}
std::map<string, int> sigmoidmul_matched_nodes_map;
std::set<int> sigmoidmul_remove_node_indices;
if (FindSigmoidAndMul(&ctx, i, &sigmoidmul_matched_nodes_map,
&sigmoidmul_remove_node_indices)) {
bool replace = true;
#ifdef DNNL_AARCH64_USE_ACL
const int sigmoid_idx = sigmoidmul_matched_nodes_map.at("sigmoid");
AddInputShapesAttr(ctx, sigmoid_idx);
const NodeDef* sigmoid = ctx.graph_view.GetNode(sigmoid_idx)->node();
const int intra_op_parallelism_threads =
item.optimization_options().intra_op_parallelism_threads;
double total_mflops =
CalculateNodeMFlops(AttrSlice(*sigmoid), "Sigmoid");
double thr =
FindRewriteThreshold("Sigmoid", intra_op_parallelism_threads);
if (total_mflops != -1 && total_mflops < thr) {
replace = false;
}
#endif
if (replace) {
TF_RETURN_IF_ERROR(
ReplaceSigmoidMulWithSwish(&ctx, sigmoidmul_matched_nodes_map,
sigmoidmul_remove_node_indices,
&invalidated_nodes, &nodes_to_delete));
continue;
}
}
matched_nodes_map.clear();
remove_node_indices.clear();
input_node_names.clear();
float epsilon = 0.001;
if (FindMklLayerNorm(&ctx, i, &matched_nodes_map, &remove_node_indices,
&input_node_names, &epsilon)) {
TF_RETURN_IF_ERROR(AddMklLayerNorm(
&ctx, matched_nodes_map, remove_node_indices, input_node_names,
&invalidated_nodes, &nodes_to_delete, epsilon));
continue;
}
matched_nodes_map.clear();
remove_node_indices.clear();
if (FindInstanceNormWithActivation(&ctx, i, &matched_nodes_map,
&remove_node_indices)) {
TF_RETURN_IF_ERROR(AddMklFusedInstanceNorm(
&ctx, &matched_nodes_map, &remove_node_indices, &invalidated_nodes,
&nodes_to_delete, true));
continue;
}
matched_nodes_map.clear();
remove_node_indices.clear();
if (FindInstanceNorm(&ctx, i, &matched_nodes_map, &remove_node_indices)) {
TF_RETURN_IF_ERROR(AddMklFusedInstanceNorm(
&ctx, &matched_nodes_map, &remove_node_indices, &invalidated_nodes,
&nodes_to_delete, false));
continue;
}
}
std::map<string, int> matched_nodes_map;
std::set<int> remove_node_indices;
bool is_gelu_approximate = false;
if (FindMatMulBiasAddAndGelu(&ctx, i, cluster, &matched_nodes_map,
&remove_node_indices, &is_gelu_approximate)) {
TF_RETURN_IF_ERROR(AddFusedMatMulBiasAddAndGelu(
&ctx, matched_nodes_map, remove_node_indices, &invalidated_nodes,
&nodes_to_delete, is_gelu_approximate));
continue;
}
ContractionWithBiasAdd contract_with_bias;
if (allow_non_differentiable_rewrites &&
FindContractionWithBias(ctx, i, &contract_with_bias)) {
TF_RETURN_IF_ERROR(AddFusedContractionNode(
&ctx, contract_with_bias, &invalidated_nodes, &nodes_to_delete));
continue;
}
ContractionWithBiasAddAndActivation contract_with_bias_and_activation;
if (allow_non_differentiable_rewrites &&
FindContractionWithBiasAndActivation(
ctx, cluster, i, &contract_with_bias_and_activation)) {
TF_RETURN_IF_ERROR(
AddFusedContractionNode(&ctx, contract_with_bias_and_activation,
&invalidated_nodes, &nodes_to_delete));
continue;
}
ContractionWithSqueezeAndBiasAdd contract_with_squeeze_and_bias;
if (allow_non_differentiable_rewrites &&
FindConvWithSqueezeAndBias(ctx, i, &contract_with_squeeze_and_bias)) {
TF_RETURN_IF_ERROR(AddFusedConvNode(&ctx, contract_with_squeeze_and_bias,
&invalidated_nodes,
&nodes_to_delete));
continue;
}
#ifndef DNNL_AARCH64_USE_ACL
ContractionWithBatchNorm contract_with_batch_norm;
if (allow_non_differentiable_rewrites &&
FindConv2DWithBatchNorm(ctx, i, &contract_with_batch_norm)) {
TF_RETURN_IF_ERROR(AddFusedConv2DNode(&ctx, contract_with_batch_norm,
&invalidated_nodes,
&nodes_to_delete));
continue;
}
ContractionWithBatchNormAndActivation
contract_with_batch_norm_and_activation;
if (allow_non_differentiable_rewrites &&
FindConv2DWithBatchNormAndActivation(
ctx, i, &contract_with_batch_norm_and_activation)) {
TF_RETURN_IF_ERROR(
AddFusedConv2DNode(&ctx, contract_with_batch_norm_and_activation,
&invalidated_nodes, &nodes_to_delete));
continue;
}
#endif
FusedBatchNormEx fused_batch_norm_ex;
if (allow_non_differentiable_rewrites &&
FindFusedBatchNormEx(ctx, i, &fused_batch_norm_ex)) {
TF_RETURN_IF_ERROR(AddFusedBatchNormExNode(
&ctx, fused_batch_norm_ex, &invalidated_nodes, &nodes_to_delete));
continue;
}
FusedBatchNormGradEx fused_batch_norm_grad_ex;
if (allow_non_differentiable_rewrites &&
FindFusedBatchNormGradEx(ctx, i, &fused_batch_norm_grad_ex)) {
TF_RETURN_IF_ERROR(
AddFusedBatchNormGradExNode(&ctx, fused_batch_norm_grad_ex,
&invalidated_nodes, &nodes_to_delete));
continue;
}
TensorToHashBucket tensor_to_hash_bucket;
if (allow_non_differentiable_rewrites &&
FindTensorToHashBucket(ctx, i, &tensor_to_hash_bucket)) {
TF_RETURN_IF_ERROR(AddTensorToHashBucketNode(
&ctx, tensor_to_hash_bucket, &invalidated_nodes, &nodes_to_delete));
continue;
}
FusedBatchNorm fused_batch_norm;
if (FindFusedBatchNorm(ctx, i, &fused_batch_norm)) {
TF_RETURN_IF_ERROR(AddBatchNormNodes(&ctx, fused_batch_norm));
continue;
}
}
utils::Mutation* mutation = ctx.graph_view.GetMutationBuilder();
for (int i = 0; i < num_nodes; ++i) {
if (nodes_to_delete[i]) {
mutation->RemoveNode(ctx.graph_view.GetNode(i));
}
}
TF_RETURN_IF_ERROR(mutation->Apply());
*optimized_graph = std::move(mutable_item.graph);
return absl::OkStatus();
}
}
} | #include "tensorflow/core/grappler/optimizers/remapper.h"
#include "tensorflow/cc/ops/nn_ops_internal.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/grappler/devices.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/utils/graph_view.h"
#include "tensorflow/core/grappler/utils/grappler_test.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/util/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cudnn/cudnn.h"
#endif
namespace tensorflow {
namespace grappler {
class RemapperTest : public GrapplerTest {
protected:
void SetUp() override {
setenv("TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT", "1", 1 );
setenv("TF_USE_CUBLASLT", "1", 1 );
}
};
TEST_F(RemapperTest, FusedBatchNorm) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output dflt = ops::Const(s.WithOpName("dflt"), {3.14f, 2.7f}, {2, 1, 1, 1});
Output x = ops::PlaceholderWithDefault(s.WithOpName("x"), dflt, {2, 1, 1, 1});
Output scale = ops::Const(s.WithOpName("scale"), {0.3f}, {1});
Output offset = ops::Const(s.WithOpName("offset"), {0.123f}, {1});
Output mean = ops::Const(s.WithOpName("mean"), {7.3f}, {1});
Output variance = ops::Const(s.WithOpName("variance"), {0.57f}, {1});
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
ops::FusedBatchNorm bn(s.WithOpName("batch_norm"), x, scale, offset, mean,
variance, attr);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
item.fetch = {"batch_norm"};
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FusedBatchNormNCHW) {
#if !(GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
GTEST_SKIP() << "Neither CUDA nor ROCm is enabled";
#endif
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output dflt =
ops::Const(s.WithOpName("dflt"), {3.14f, 2.7f, 1.0f, 2.0f, 3.0f, 100.0f},
{1, 3, 1, 2});
Output x = ops::PlaceholderWithDefault(s.WithOpName("x"), dflt, {1, 3, 1, 2});
Output scale = ops::Const(s.WithOpName("scale"), {0.3f, 7.0f, 123.0f}, {3});
Output offset =
ops::Const(s.WithOpName("offset"), {0.123f, 2.1f, 0.55f}, {3});
Output mean = ops::Const(s.WithOpName("mean"), {7.3f, 8.3f, 3.1f}, {3});
Output variance =
ops::Const(s.WithOpName("variance"), {0.57f, 1.0f, 2.0f}, {3});
ops::FusedBatchNorm::Attrs attr;
attr = attr.IsTraining(false);
attr = attr.DataFormat("NCHW");
ops::FusedBatchNorm bn(s.WithOpName("batch_norm").WithDevice("/device:GPU:0"),
x, scale, offset, mean, variance, attr);
GrapplerItem item;
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
item.fetch = {"batch_norm"};
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-3);
}
}
TEST_F(RemapperTest, FuseBatchNormWithRelu) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
for (bool is_training : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
#if !defined(GOOGLE_CUDA) || !(CUDNN_VERSION >= 7402)
if (is_training) {
LOG(INFO) << "Skip FuseBatchNormWithRelu"
<< "[is_training=" << is_training << "] "
<< "test. It requires CUDNN_VERSION >= 7402.";
continue;
}
#endif
#if !defined(GOOGLE_CUDA)
if (!is_training) {
LOG(INFO) << "Skip FuseBatchNormWithRelu"
<< "[is_training=" << is_training << "]";
continue;
}
#endif
const int num_channels = 24;
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto relu = ops::Relu(s.WithOpName("relu"), fbn.y);
auto fetch = ops::Identity(s.WithOpName("fetch"), relu);
auto input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"scale", scale_t},
{"offset", offset_t},
{"mean", mean_t},
{"var", var_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 5);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 2);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
}
}
}
#if defined(GOOGLE_CUDA) && CUDNN_VERSION >= 7402
TEST_F(RemapperTest, FuseBatchNormGradWithReluGrad) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
bool is_training = true;
const int num_channels = 24;
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto relu = ops::Relu(s.WithOpName("relu"), fbn.y);
auto output_grad =
Placeholder(s.WithOpName("output_grad"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto output_grad_cast =
ops::Cast(s.WithOpName("output_grad_cast"), output_grad, DT_HALF);
auto relu_grad = ops::internal::ReluGrad(s.WithOpName("relu_grad"),
output_grad_cast, relu);
auto fbn_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_grad"), relu_grad, input_cast, scale,
fbn.reserve_space_1, fbn.reserve_space_2, fbn.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fetch0 = ops::Identity(s.WithOpName("fetch0"), fbn_grad.x_backprop);
auto fetch1 = ops::Identity(s.WithOpName("fetch1"), fbn_grad.scale_backprop);
auto fetch2 = ops::Identity(s.WithOpName("fetch2"), fbn_grad.offset_backprop);
auto input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto output_grad_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch0", "fetch1", "fetch2"};
item.feed = {{"input", input_t}, {"scale", scale_t},
{"offset", offset_t}, {"mean", mean_t},
{"var", var_t}, {"output_grad", output_grad_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 5);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
if (node.name() == "fused_batch_norm_grad") {
EXPECT_EQ(node.op(), "_FusedBatchNormGradEx");
ASSERT_EQ(node.input_size(), 8);
EXPECT_EQ(node.input(0), "output_grad_cast");
EXPECT_EQ(node.input(1), "input_cast");
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "fused_batch_norm:3");
EXPECT_EQ(node.input(4), "fused_batch_norm:4");
EXPECT_EQ(node.input(5), "fused_batch_norm:5");
EXPECT_EQ(node.input(6), "offset");
EXPECT_EQ(node.input(7), "relu");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 0);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 3);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 3);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 3);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
test::ExpectClose(tensors[1], tensors_expected[1], 1e-2, 1e-2);
test::ExpectClose(tensors[2], tensors_expected[2], 1e-2, 1e-2);
}
}
#endif
TEST_F(RemapperTest, FuseBatchNormWithAddAndRelu) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
for (bool is_training : {true, false}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
#if !defined(GOOGLE_CUDA) || !(CUDNN_VERSION >= 7402)
if (is_training) {
LOG(INFO) << "Skip FuseBatchNormWithAddAndRelu"
<< "[is_training=" << is_training << "] "
<< "test. It requires CUDNN_VERSION >= 7402.";
continue;
}
#endif
#if !defined(GOOGLE_CUDA)
if (!is_training) {
LOG(INFO) << "Skip FuseBatchNormWithAddAndRelu"
<< "[is_training=" << is_training << "]";
continue;
}
#endif
const int num_channels = 24;
TensorShape input_shape({2, 8, 8, num_channels});
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
auto side_input = Placeholder(s.WithOpName("side_input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto side_input_cast =
ops::Cast(s.WithOpName("side_input_cast"), side_input, DT_HALF);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto add = ops::Add(s.WithOpName("add"), fbn.y, side_input_cast);
auto relu = ops::Relu(s.WithOpName("relu"), add);
auto fetch = ops::Identity(s.WithOpName("fetch"), relu);
auto input_t = GenerateRandomTensor<DT_FLOAT>(input_shape);
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(is_training ? empty_shape
: channel_shape);
auto side_input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"scale", scale_t},
{"offset", offset_t}, {"mean", mean_t},
{"var", var_t}, {"side_input", side_input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
EXPECT_EQ(node.input(5), "side_input_cast");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 2);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
}
}
}
#if defined(GOOGLE_CUDA) && CUDNN_VERSION >= 7402
TEST_F(RemapperTest, FuseBatchNormGradWithAddAndReluGrad) {
if (IsMKLEnabled()) GTEST_SKIP() << "Fusion not available with oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
bool is_training = true;
const int num_channels = 24;
TensorShape input_shape({2, 8, 8, num_channels});
TensorShape channel_shape({num_channels});
TensorShape empty_shape({0});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto input_cast = ops::Cast(s.WithOpName("input_cast"), input, DT_HALF);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT);
auto var = Placeholder(s.WithOpName("var"), DT_FLOAT);
auto side_input = Placeholder(s.WithOpName("side_input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto side_input_cast =
ops::Cast(s.WithOpName("side_input_cast"), side_input, DT_HALF);
float epsilon = 0.1f;
auto fbn = ops::FusedBatchNormV3(
s.WithOpName("fused_batch_norm"), input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fbn_side_input =
ops::FusedBatchNormV3(s.WithOpName("fused_batch_norm_side_input"),
side_input_cast, scale, offset, mean, var,
ops::FusedBatchNormV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto add = ops::Add(s.WithOpName("add"), fbn.y, fbn_side_input.y);
auto relu = ops::Relu(s.WithOpName("relu"), add);
auto output_grad =
Placeholder(s.WithOpName("output_grad"), DT_FLOAT,
ops::Placeholder::Shape({2, 8, 8, num_channels}));
auto output_grad_cast =
ops::Cast(s.WithOpName("output_grad_cast"), output_grad, DT_HALF);
auto relu_grad = ops::internal::ReluGrad(s.WithOpName("relu_grad"),
output_grad_cast, relu);
auto fbn_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_grad"), relu_grad, input_cast, scale,
fbn.reserve_space_1, fbn.reserve_space_2, fbn.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fbn_side_input_grad = ops::FusedBatchNormGradV3(
s.WithOpName("fused_batch_norm_side_input_grad"), relu_grad,
side_input_cast, scale, fbn_side_input.reserve_space_1,
fbn_side_input.reserve_space_2, fbn_side_input.reserve_space_3,
ops::FusedBatchNormGradV3::IsTraining(is_training)
.Epsilon(epsilon)
.DataFormat("NHWC"));
auto fetch0 = ops::Identity(s.WithOpName("fetch0"), fbn_grad.x_backprop);
auto fetch1 = ops::Identity(s.WithOpName("fetch1"), fbn_grad.scale_backprop);
auto fetch2 = ops::Identity(s.WithOpName("fetch2"), fbn_grad.offset_backprop);
auto fetch3 =
ops::Identity(s.WithOpName("fetch3"), fbn_side_input_grad.x_backprop);
auto fetch4 =
ops::Identity(s.WithOpName("fetch4"), fbn_side_input_grad.scale_backprop);
auto fetch5 = ops::Identity(s.WithOpName("fetch5"),
fbn_side_input_grad.offset_backprop);
auto input_t = GenerateRandomTensor<DT_FLOAT>(input_shape);
auto scale_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto offset_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto mean_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto var_t = GenerateRandomTensor<DT_FLOAT>(channel_shape);
auto side_input_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
auto output_grad_t = GenerateRandomTensor<DT_FLOAT>({2, 8, 8, num_channels});
GrapplerItem item;
item.fetch = {"fetch0", "fetch1", "fetch2", "fetch3", "fetch4", "fetch5"};
item.feed = {{"input", input_t},
{"scale", scale_t},
{"offset", offset_t},
{"mean", mean_t},
{"var", var_t},
{"side_input", side_input_t},
{"output_grad", output_grad_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "relu") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm");
found++;
}
if (node.name() == "fused_batch_norm") {
EXPECT_EQ(node.op(), "_FusedBatchNormEx");
ASSERT_EQ(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input_cast");
EXPECT_EQ(node.input(1), "scale");
EXPECT_EQ(node.input(2), "offset");
EXPECT_EQ(node.input(3), "mean");
EXPECT_EQ(node.input(4), "var");
EXPECT_EQ(node.input(5), "fused_batch_norm_side_input");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
if (node.name() == "relu_grad") {
EXPECT_EQ(node.op(), "Identity");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "fused_batch_norm_grad:5");
found++;
}
if (node.name() == "fused_batch_norm_grad") {
EXPECT_EQ(node.op(), "_FusedBatchNormGradEx");
ASSERT_EQ(node.input_size(), 8);
EXPECT_EQ(node.input(0), "output_grad_cast");
EXPECT_EQ(node.input(1), "input_cast");
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "fused_batch_norm:3");
EXPECT_EQ(node.input(4), "fused_batch_norm:4");
EXPECT_EQ(node.input(5), "fused_batch_norm:5");
EXPECT_EQ(node.input(6), "offset");
EXPECT_EQ(node.input(7), "relu");
auto attr = node.attr();
EXPECT_EQ(attr["num_side_inputs"].i(), 1);
EXPECT_EQ(attr["activation_mode"].s(), "Relu");
found++;
}
}
EXPECT_EQ(found, 4);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 6);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 6);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
test::ExpectClose(tensors[1], tensors_expected[1], 1e-2, 1e-2);
test::ExpectClose(tensors[2], tensors_expected[2], 1e-2, 1e-2);
test::ExpectClose(tensors[3], tensors_expected[3], 1e-2, 1e-2);
test::ExpectClose(tensors[4], tensors_expected[4], 1e-2, 1e-2);
test::ExpectClose(tensors[5], tensors_expected[5], 1e-2, 1e-2);
}
}
#endif
class RemapperFuseConvWithBias : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = ops::Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFuseConvWithBias, Conv2D_F32) { RunTest<2, DT_FLOAT>(); }
TEST_F(RemapperFuseConvWithBias, Conv3D_F32) { RunTest<3, DT_FLOAT>(); }
TEST_F(RemapperFuseConvWithBias, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv2DWithBias with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithBias, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv3DWithBias with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
class RemapperFuseConvWithBiasAndActivation : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
input_shape = Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
float leakyrelu_alpha = 0.5;
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], activation);
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFuseConvWithBiasAndActivation, Conv2D_F32) {
RunTest<2, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithBiasAndActivation, Conv3D_F32) {
RunTest<3, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithBiasAndActivation, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv2DWithBiasAndActivation with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithBiasAndActivation, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConv3DWithBiasAndActivation with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
class RemapperFuseConvWithBiasAndAddActivation : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = Placeholder::Shape({128});
auto add_shape = ops::Placeholder::Shape({8, 32, 32, 128});
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 3, 128});
auto bias_t = GenerateRandomTensor<DT_FLOAT>({128});
auto add_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 128});
float leakyrelu_alpha = 0.5;
std::vector<int> strides = {1, 1, 1, 1};
if (dim == 3) {
input_shape = Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = Placeholder::Shape({128});
add_shape = ops::Placeholder::Shape({8, 4, 32, 32, 128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 128});
bias_t = GenerateRandomTensor<DT_FLOAT>({128});
add_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 128});
}
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto input_add =
Placeholder(s.WithOpName("input_add"), DT_FLOAT, add_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto add = ops::Add(s.WithOpName("add_op"), input_add, bias_add);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(fetch,
ops::internal::LeakyRelu(activate, add, attr));
}
return ops::Identity(fetch, bias);
}();
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto add = ops::Add(s.WithOpName("add_op"), input_add, bias_add);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(fetch,
ops::internal::LeakyRelu(activate, add, attr));
}
return ops::Identity(fetch, bias);
}();
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"filter", filter_t},
{"bias", bias_t},
{"input_add", add_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 2);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 3);
EXPECT_EQ("BiasAdd", fused_ops[0]);
EXPECT_EQ("Add", fused_ops[1]);
EXPECT_EQ(activation, fused_ops[2]);
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 0, 1e-6);
}
}
};
TEST_F(RemapperFuseConvWithBiasAndAddActivation, Conv2D_F32) {
RunTest<2, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithBiasAndAddActivation, Conv3D_F32) {
RunTest<3, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithBiasAndAddActivation, Conv2D_BF16) {
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithBiasAndAddActivation, Conv3D_BF16) {
RunTest<3, DT_BFLOAT16>();
}
class RemapperFuseConvWithSqueezeAndBias : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
using ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 1, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 1, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
input_shape = ops::Placeholder::Shape({8, 4, 32, 1, 3});
filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = ops::Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 1, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), conv,
ops::Squeeze::Attrs().Axis({2}));
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), squeeze, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto squeeze = ops::Squeeze(s.WithOpName("squeeze"), conv,
ops::Squeeze::Attrs().Axis({3}));
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), squeeze, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "conv") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
} else if (node.name() == "bias_add") {
EXPECT_EQ(node.op(), "Squeeze");
ASSERT_GE(node.input_size(), 1);
EXPECT_EQ(node.input(0), "conv");
found++;
}
}
EXPECT_EQ(found, 2);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFuseConvWithSqueezeAndBias, Conv2D_FP32) {
RunTest<2, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithSqueezeAndBias, Conv3D_FP32) {
RunTest<3, DT_FLOAT>();
}
TEST_F(RemapperFuseConvWithSqueezeAndBias, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConvWithSqueezeAndBias with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseConvWithSqueezeAndBias, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseConvWithSqueezeAndBias with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
TEST_F(RemapperTest, FusePadPrecededConv2DWithBias) {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 224, 224, 3});
auto filter_shape = ops::Placeholder::Shape({7, 7, 3, 64});
auto paddings_shape = ops::Placeholder::Shape({4, 2});
auto bias_shape = ops::Placeholder::Shape({64});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias_in = Placeholder(s.WithOpName("bias_in"), DT_FLOAT, bias_shape);
std::vector<int> strides = {1, 2, 2, 1};
auto padding_const =
ops::Const(s.WithOpName("padding"), {0, 0, 3, 3, 3, 3, 0, 0}, {4, 2});
auto pad = ops::Pad(s.WithOpName("pad"), input, padding_const);
auto conv = ops::Conv2D(s.WithOpName("conv"), pad, filter, strides, "VALID");
auto bias = ops::BiasAdd(s.WithOpName("bias"), conv, bias_in);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias);
auto input_t = GenerateTensorWithSetRandom<DT_FLOAT>({8, 224, 224, 3});
auto filter_t = GenerateTensorWithSetRandom<DT_FLOAT>({7, 7, 3, 64});
auto bias_t = GenerateTensorWithSetRandom<DT_FLOAT>({64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias_in", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "pad");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.input(2), "bias_in");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
#ifdef INTEL_MKL
TEST_F(RemapperTest, FuseConv3DWithBias) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 6});
auto add_shape = ops::Placeholder::Shape({6});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "VALID");
auto add_const = ops::Const(s.WithOpName("add_const"),
{1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f}, {6});
auto add = ops::Add(s.WithOpName("b_add"), add_const, conv);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 6});
auto add_t = GenerateRandomTensor<DT_FLOAT>({6});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "b_add") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "add_const");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FuseConv3DWithAdd) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 6});
auto add_shape = ops::Placeholder::Shape({1, 1, 1, 1, 6});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto a_placeholder =
Placeholder(s.WithOpName("add_placeholder"), DT_FLOAT, add_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "VALID");
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {1, 1, 1, 1, 6});
auto add = ops::Add(s.WithOpName("add"), add_const, conv);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 6});
auto add_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 1, 6});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "add_const");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FuseConv2DWithAdd) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 6});
auto add_shape = ops::Placeholder::Shape({1, 1, 6});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto a_placeholder =
Placeholder(s.WithOpName("add_placeholder"), DT_FLOAT, add_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "VALID");
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {1, 1, 6});
auto add = ops::Add(s.WithOpName("add"), add_const, conv);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 3, 6});
auto add_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 6});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "add_const");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
TEST_F(RemapperTest, FuseMatmulWithAdd) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto lhs_shape = ops::Placeholder::Shape({8, 32});
auto rhs_shape = ops::Placeholder::Shape({32, 64});
auto lhs = Placeholder(s.WithOpName("lhs"), DT_FLOAT, lhs_shape);
auto rhs = Placeholder(s.WithOpName("rhs"), DT_FLOAT, rhs_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), lhs, rhs);
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {1, 64});
auto add = ops::Add(s.WithOpName("add"), matmul, add_const);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto lhs_t = GenerateTensorWithSetRandom<DT_FLOAT>({8, 32});
auto rhs_t = GenerateTensorWithSetRandom<DT_FLOAT>({32, 64});
auto add_t = GenerateTensorWithSetRandom<DT_FLOAT>({1, 64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"lhs", lhs_t}, {"rhs", rhs_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add") {
EXPECT_EQ(node.op(), "_FusedMatMul");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "lhs");
EXPECT_EQ(node.input(1), "rhs");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "add_const");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(1, found);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
class RemapperFuseSoftplusTanhMul : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto softplus = ops::Softplus(s.WithOpName("softplus"), bias_add);
auto tanh = ops::Tanh(s.WithOpName("tanh"), softplus);
auto mul = ops::Mul(s.WithOpName("mul"), tanh, bias_add);
auto fetch = ops::Identity(s.WithOpName("fetch"), mul);
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "mul") {
EXPECT_EQ(node.op(), "_MklFusedMish");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "bias_add");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16) {
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
} else {
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFuseSoftplusTanhMul, FP32) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFuseSoftplusTanhMul, BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Test only applicable to oneDNN.";
RunTest<DT_BFLOAT16>();
}
#endif
TEST_F(RemapperTest, FuseMklLayerNorm) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TensorShape input_shape = TensorShape({2, 4});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT,
ops::Placeholder::Shape(input_shape));
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {2, 4});
auto add = ops::Add(s.WithOpName("b_add"), add_const, input);
auto r_indices = ops::Const(s.WithOpName("r_indices"), {1}, {1});
ops::Mean::Attrs attrs;
attrs = attrs.KeepDims(true);
auto mean = ops::Mean(s.WithOpName("mean"), add, r_indices, attrs);
auto s_diff = ops::SquaredDifference(s.WithOpName("s_diff"), mean, add);
auto variance = ops::Mean(s.WithOpName("variance"), s_diff, r_indices, attrs);
auto e_const = ops::Const(s.WithOpName("e_const"), {0.001f}, {});
auto add_1 = ops::Add(s.WithOpName("add_1"), e_const, variance);
auto rsqrt = ops::Rsqrt(s.WithOpName("rsqrt"), add_1);
auto g_const = ops::Const(s.WithOpName("g_const"), 1.0f, {4});
auto mul = ops::Mul(s.WithOpName("mul"), rsqrt, g_const);
auto mul_1 = ops::Mul(s.WithOpName("mul_1"), mul, add);
auto mul_2 = ops::Mul(s.WithOpName("mul_2"), mul, mean);
auto b_const = ops::Const(s.WithOpName("b_const"), 0.0f, {4});
auto sub = ops::Sub(s.WithOpName("sub"), b_const, mul_2);
auto add_2 = ops::Add(s.WithOpName("add_2"), mul_1, sub);
auto fetch = ops::Identity(s.WithOpName("fetch"), add_2);
auto input_t = GenerateTensorWithSetRandom<DT_FLOAT>({2, 4});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add_2") {
EXPECT_EQ(node.op(), "_MklLayerNorm");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "b_add");
EXPECT_EQ(node.input(1), "g_const");
EXPECT_EQ(node.input(2), "b_const");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-4);
}
class FuseMklLayerNormPattern : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
TensorShape input_shape = TensorShape({2, 4});
auto input = Placeholder(s.WithOpName("input"), DTYPE,
ops::Placeholder::Shape(input_shape));
auto add_const = ops::Const(s.WithOpName("add_const"), 1.0f, {2, 4});
auto add = ops::Add(s.WithOpName("b_add"), add_const, input);
auto r_indices = ops::Const(s.WithOpName("r_indices"), {1}, {1});
ops::Mean::Attrs attrs;
attrs = attrs.KeepDims(true);
auto mean = ops::Mean(s.WithOpName("mean"), add, r_indices, attrs);
auto sub = ops::Sub(s.WithOpName("sub"), add, mean);
auto s_diff = ops::SquaredDifference(s.WithOpName("s_diff"), mean, add);
auto variance =
ops::Mean(s.WithOpName("variance"), s_diff, r_indices, attrs);
auto e_const = ops::Const(s.WithOpName("e_const"), {0.001f}, {});
auto add_1 = ops::AddV2(s.WithOpName("add_1"), e_const, variance);
auto rsqrt = ops::Rsqrt(s.WithOpName("rsqrt"), add_1);
auto mul = ops::Mul(s.WithOpName("mul"), sub, rsqrt);
auto g_const = ops::Const(s.WithOpName("g_const"), 1.0f, {4});
auto mul_1 = ops::Mul(s.WithOpName("mul_1"), g_const, mul);
auto b_const = ops::Const(s.WithOpName("b_const"), 0.0f, {4});
auto add_2 = ops::AddV2(s.WithOpName("add_2"), mul_1, b_const);
auto fetch = ops::Identity(s.WithOpName("fetch"), add_2);
auto input_t = GenerateTensorWithSetRandom<DTYPE>({2, 4});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add_2") {
EXPECT_EQ(node.op(), "_MklLayerNorm");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "b_add");
EXPECT_EQ(node.input(1), "g_const");
EXPECT_EQ(node.input(2), "b_const");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-4);
}
};
TEST_F(FuseMklLayerNormPattern, F32) { RunTest<DT_FLOAT>(); }
class RemapperTensorToHashBucketTest : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
int num_buckets = 100;
auto to_string = ops::AsString(s.WithOpName("to_string"), input);
auto to_bucket = ops::StringToHashBucketFast(s.WithOpName("to_bucket"),
to_string, num_buckets);
auto fetch = ops::Identity(s.WithOpName("fetch"), to_bucket);
auto input_t = GenerateRandomTensor<DTYPE>({8, 32, 32, 3});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
const string input_device =
GetNumAvailableGPUs() > 0 ? "/device:GPU:0" : "/device:CPU:0";
for (int i = 0; i < item.graph.node_size(); ++i) {
if (item.graph.node(i).name() == "input") {
item.graph.mutable_node(i)->set_device(input_device);
} else {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "to_bucket") {
EXPECT_EQ(node.op(), "_TensorToHashBucketFast");
ASSERT_GE(node.input_size(), 1);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.attr().at("num_buckets").i(), num_buckets);
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorEqual<int64_t>(tensors[0], tensors_expected[0]);
}
};
TEST_F(RemapperTensorToHashBucketTest, I8) { RunTest<DT_INT8>(); }
TEST_F(RemapperTensorToHashBucketTest, I16) { RunTest<DT_INT16>(); }
TEST_F(RemapperTensorToHashBucketTest, I32) { RunTest<DT_INT32>(); }
TEST_F(RemapperTensorToHashBucketTest, I64) { RunTest<DT_INT64>(); }
class RemapperFuseMatMulWithBiasTest : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto lhs_shape = ops::Placeholder::Shape({8, 32});
auto rhs_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto lhs = Placeholder(s.WithOpName("lhs"), DTYPE, lhs_shape);
auto rhs = Placeholder(s.WithOpName("rhs"), DTYPE, rhs_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), lhs, rhs);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
auto lhs_t = GenerateTensorWithSetRandom<DTYPE>({8, 32});
auto rhs_t = GenerateTensorWithSetRandom<DTYPE>({32, 64});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"lhs", lhs_t}, {"rhs", rhs_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
const string device =
GetNumAvailableGPUs() > 0 && (DTYPE == DT_HALF || DTYPE == DT_FLOAT)
? "/device:GPU:0"
: "/device:CPU:0";
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(device);
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
EXPECT_EQ(node.op(), "_FusedMatMul");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "lhs");
EXPECT_EQ(node.input(1), "rhs");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(1, found);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16 || DTYPE == DT_HALF)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFuseMatMulWithBiasTest, F16) {
bool skip_test = false;
#if !defined(GOOGLE_CUDA) || !TF_HIPBLASLT
skip_test = true;
#endif
if (skip_test || GetNumAvailableGPUs() == 0) {
GTEST_SKIP() << "Skipping FuseMatMulWithBias with half, which is only "
"supported in CUDA.";
}
RunTest<DT_HALF>();
}
TEST_F(RemapperFuseMatMulWithBiasTest, F32) {
bool skip_test = false;
#if !defined(GOOGLE_CUDA)
skip_test = true;
#endif
if (skip_test || GetNumAvailableGPUs() == 0) {
GTEST_SKIP() << "Skipping FuseMatMulWithBias with float, which is only "
"supported in CUDA.";
}
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFuseMatMulWithBiasTest, Bf16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseMatMulWithBias with bfloat16.";
RunTest<DT_BFLOAT16>();
}
TEST_F(RemapperTest, DISABLED_FuseConv2DWithBiasAndActivationOnGPU) {
#if !(GOOGLE_CUDA)
GTEST_SKIP() << "No CUDA, skip FuseConv2DWithBiasAndActivation on GPU";
#endif
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = Placeholder::Shape({3, 3, 3, 128});
auto bias_shape = Placeholder::Shape({128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv = ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
return ops::Identity(fetch, ops::Relu(activate, bias_add));
}();
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({3, 3, 3, 128});
auto bias_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:GPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], "Relu");
found++;
}
}
EXPECT_EQ(found, 1);
if (GetNumAvailableGPUs() > 0) {
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
}
class RemapperFuseMatMulWithBiasAndActivationTest : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
std::vector<string> activations = {"Relu", "Relu6", "Elu", "LeakyRelu"};
#if !defined(GOOGLE_CUDA)
activations.push_back("Tanh");
#endif
for (const string& activation : activations) {
if (DTYPE == DT_HALF && activation != "Relu") continue;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto lhs_shape = ops::Placeholder::Shape({8, 32});
auto rhs_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto lhs = Placeholder(s.WithOpName("lhs"), DTYPE, lhs_shape);
auto rhs = Placeholder(s.WithOpName("rhs"), DTYPE, rhs_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), lhs, rhs);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
float leakyrelu_alpha = 0.5;
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
#if !defined(GOOGLE_CUDA)
} else if (activation == "Tanh") {
return ops::Identity(fetch, ops::Tanh(activate, bias_add));
#endif
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
auto lhs_t = GenerateTensorWithSetRandom<DTYPE>({8, 32});
auto rhs_t = GenerateTensorWithSetRandom<DTYPE>({32, 64});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"lhs", lhs_t}, {"rhs", rhs_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
const string device = GetNumAvailableGPUs() > 0 &&
(DTYPE == DT_HALF || DTYPE == DT_FLOAT) &&
activation == "Relu"
? "/device:GPU:0"
: "/device:CPU:0";
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(device);
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
EXPECT_EQ(node.op(), "_FusedMatMul");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "lhs");
EXPECT_EQ(node.input(1), "rhs");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], activation);
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(1, found);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16 || DTYPE == DT_HALF)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFuseMatMulWithBiasAndActivationTest, F16) {
bool skip_test = false;
#if !defined(GOOGLE_CUDA) || !TF_HIPBLASLT
skip_test = true;
#endif
if (skip_test || GetNumAvailableGPUs() == 0) {
GTEST_SKIP() << "Skipping FuseMatMulWithBiasAndActivationTest with half, "
"which is only supported in CUDA.";
}
RunTest<DT_HALF>();
}
TEST_F(RemapperFuseMatMulWithBiasAndActivationTest, F32) {
bool skip_test = false;
#if !defined(GOOGLE_CUDA)
skip_test = true;
#endif
if (skip_test || GetNumAvailableGPUs() == 0) {
GTEST_SKIP() << "Skipping FuseMatMulWithBiasAndActivationTest with float, "
"which is only supported in CUDA.";
}
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFuseMatMulWithBiasAndActivationTest, Bf16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"FuseMatMulWithBiasAndActivation with bfloat16.";
RunTest<DT_BFLOAT16>();
}
TEST_F(RemapperTest, FuseConv2DWithBatchNorm) {
#ifdef DNNL_AARCH64_USE_ACL
GTEST_SKIP() << "Skipping test due to different behaviour on AARCH64";
#endif
using ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto scale_shape = ops::Placeholder::Shape({128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT, scale_shape);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT, scale_shape);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT, scale_shape);
auto variance = Placeholder(s.WithOpName("variance"), DT_FLOAT, scale_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv = ops::Conv2D(
s.WithOpName("conv"), input, filter, strides, "EXPLICIT",
ops::Conv2D::Attrs().ExplicitPaddings({0, 0, 1, 2, 3, 4, 0, 0}));
ops::FusedBatchNorm::Attrs attrs;
attrs = attrs.IsTraining(false);
auto batch_norm = ops::FusedBatchNorm(s.WithOpName("batch_norm"), conv, scale,
offset, mean, variance, attrs);
auto fetch = ops::Identity(s.WithOpName("fetch"), batch_norm.y);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 3, 128});
auto scale_t = GenerateRandomTensor<DT_FLOAT>({128});
auto offset_t = GenerateRandomTensor<DT_FLOAT>({128});
auto mean_t = GenerateRandomTensor<DT_FLOAT>({128});
auto variance_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t},
{"scale", scale_t}, {"offset", offset_t},
{"mean", mean_t}, {"variance", variance_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "batch_norm") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 4);
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "offset");
EXPECT_EQ(node.input(4), "mean");
EXPECT_EQ(node.input(5), "variance");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "FusedBatchNorm");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6, 1e-4);
}
TEST_F(RemapperTest, FuseConv2DWithBatchNormAndActivation) {
#ifdef DNNL_AARCH64_USE_ACL
GTEST_SKIP() << "Skipping test due to different behaviour on AARCH64";
#endif
using ops::Placeholder;
for (const string& activation : {"Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto scale_shape = ops::Placeholder::Shape({128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto scale = Placeholder(s.WithOpName("scale"), DT_FLOAT, scale_shape);
auto offset = Placeholder(s.WithOpName("offset"), DT_FLOAT, scale_shape);
auto mean = Placeholder(s.WithOpName("mean"), DT_FLOAT, scale_shape);
auto variance =
Placeholder(s.WithOpName("variance"), DT_FLOAT, scale_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
ops::FusedBatchNorm::Attrs attrs;
attrs = attrs.IsTraining(false);
auto batch_norm = ops::FusedBatchNorm(s.WithOpName("batch_norm"), conv,
scale, offset, mean, variance, attrs);
float leakyrelu_alpha = 0.5;
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, batch_norm.y));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, batch_norm.y));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, batch_norm.y));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, batch_norm.y, attr));
}
return ops::Identity(fetch, batch_norm.y);
}();
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 3, 128});
auto scale_t = GenerateRandomTensor<DT_FLOAT>({128});
auto offset_t = GenerateRandomTensor<DT_FLOAT>({128});
auto mean_t = GenerateRandomTensor<DT_FLOAT>({128});
auto variance_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t},
{"scale", scale_t}, {"offset", offset_t},
{"mean", mean_t}, {"variance", variance_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 6);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 4);
EXPECT_EQ(node.input(2), "scale");
EXPECT_EQ(node.input(3), "offset");
EXPECT_EQ(node.input(4), "mean");
EXPECT_EQ(node.input(5), "variance");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "FusedBatchNorm");
EXPECT_EQ(fused_ops[1], activation);
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6, 1e-4);
}
}
#ifdef INTEL_MKL
TEST_F(RemapperTest, FuseConv3DWithBiasAndAddN) {
#ifdef DNNL_AARCH64_USE_ACL
GTEST_SKIP() << "Skipping test due to different behaviour on AARCH64";
#endif
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
auto add_shape = ops::Placeholder::Shape({8, 4, 32, 32, 128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto input_add = Placeholder(s.WithOpName("input_add"), DT_FLOAT, add_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto conv = ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto add = ops::AddN(s.WithOpName("add_op"),
std::initializer_list<Input>{input_add, bias_add});
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 128});
auto add_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 128});
auto bias_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"filter", filter_t},
{"bias", bias_t},
{"input_add", add_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add_op") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 2);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], "Add");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 0, 1e-6);
}
TEST_F(RemapperTest, FuseConv3DWithBiasAndAdd) {
#ifdef DNNL_AARCH64_USE_ACL
GTEST_SKIP() << "Skipping test due to different behaviour on AARCH64";
#endif
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
auto add_shape = ops::Placeholder::Shape({8, 4, 32, 32, 128});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
auto input_add = Placeholder(s.WithOpName("input_add"), DT_FLOAT, add_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto conv = ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
auto add = ops::Add(s.WithOpName("add_op"), input_add, bias_add);
auto fetch = ops::Identity(s.WithOpName("fetch"), add);
auto input_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 3});
auto filter_t = GenerateRandomTensor<DT_FLOAT>({1, 1, 1, 3, 128});
auto add_t = GenerateRandomTensor<DT_FLOAT>({8, 4, 32, 32, 128});
auto bias_t = GenerateRandomTensor<DT_FLOAT>({128});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t},
{"filter", filter_t},
{"bias", bias_t},
{"input_add", add_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "add_op") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 2);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], "Add");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectClose(tensors[0], tensors_expected[0], 0, 1e-6);
}
TEST_F(RemapperTest, FuseConv2DWithSemanticAdd) {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 6});
auto filter_shape_1 = ops::Placeholder::Shape({1, 1, 6, 6});
auto semanticadd_shape = ops::Placeholder::Shape({6});
auto bias_shape = ops::Placeholder::Shape({6});
auto input = Placeholder(s.WithOpName("input"), DT_FLOAT, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DT_FLOAT, filter_shape);
auto filter_1 =
Placeholder(s.WithOpName("filter_1"), DT_FLOAT, filter_shape_1);
auto semanticadd =
Placeholder(s.WithOpName("semanticadd"), DT_FLOAT, semanticadd_shape);
auto bias = Placeholder(s.WithOpName("bias"), DT_FLOAT, bias_shape);
std::vector<int> strides = {1, 1, 1, 1};
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "VALID");
auto add = ops::Add(s.WithOpName("add"), semanticadd, conv);
auto conv_1 =
ops::Conv2D(s.WithOpName("conv_1"), add, filter_1, strides, "VALID");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv_1, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
auto input_tensor = GenerateRandomTensor<DT_FLOAT>(
TensorShape(input_shape.shape_.dim_sizes()));
auto filter_tensor = GenerateRandomTensor<DT_FLOAT>(
TensorShape(filter_shape.shape_.dim_sizes()));
auto filter_tensor_1 = GenerateRandomTensor<DT_FLOAT>(
TensorShape(filter_shape_1.shape_.dim_sizes()));
auto semanticadd_tensor = GenerateRandomTensor<DT_FLOAT>(
TensorShape(semanticadd_shape.shape_.dim_sizes()));
auto bias_tensor = GenerateRandomTensor<DT_FLOAT>(
TensorShape(bias_shape.shape_.dim_sizes()));
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_tensor},
{"filter", filter_tensor},
{"filter_1", filter_tensor_1},
{"semanticadd", semanticadd_tensor},
{"bias", bias_tensor}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "add");
EXPECT_EQ(node.input(1), "filter_1");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
if (node.name() == "add") {
EXPECT_EQ(node.op(), "_FusedConv2D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "semanticadd");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 1);
EXPECT_EQ(fused_ops[0], "BiasAdd");
found++;
}
}
EXPECT_EQ(found, 2);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
test::ExpectTensorNear<float>(tensors[0], tensors_expected[0], 1e-6);
}
class RemapperFusePadConv3D : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to MKL.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 6});
auto paddings_shape = ops::Placeholder::Shape({5, 2});
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
std::vector<int> strides = {1, 1, 1, 1, 1};
auto padding_const = ops::Const(s.WithOpName("padding"),
{0, 0, 1, 1, 1, 1, 1, 1, 0, 0}, {5, 2});
auto pad = ops::Pad(s.WithOpName("pad"), input, padding_const);
auto conv =
ops::Conv3D(s.WithOpName("conv"), pad, filter, strides, "VALID");
auto fetch = ops::Identity(s.WithOpName("fetch"), conv);
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 6});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::AGGRESSIVE);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "conv") {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 2);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
};
TEST_F(RemapperFusePadConv3D, Conv3D_FP32) {
if (!IsMKLEnabled())
GTEST_SKIP()
<< "Pad fusion with Conv3D is only enabled with oneDNN, skipping "
"RemapperFusePadConv3D with FP32.";
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFusePadConv3D, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperFusePadConv3D with bfloat16.";
RunTest<DT_BFLOAT16>();
}
class RemapperFusePadWithFusedConv3D : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"", "Relu", "Relu6", "Elu", "LeakyRelu"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
auto paddings_shape = ops::Placeholder::Shape({5, 2});
auto strides = {1, 1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
auto padding_const = ops::Const(s.WithOpName("padding"),
{0, 0, 1, 1, 1, 1, 1, 1, 0, 0}, {5, 2});
auto pad = ops::Pad(s.WithOpName("pad"), input, padding_const);
auto conv =
ops::Conv3D(s.WithOpName("conv"), pad, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
float leakyrelu_alpha = 0.5;
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
if (activation == "Relu") {
return ops::Identity(fetch, ops::Relu(activate, bias_add));
} else if (activation == "Relu6") {
return ops::Identity(fetch, ops::Relu6(activate, bias_add));
} else if (activation == "Elu") {
return ops::Identity(fetch, ops::Elu(activate, bias_add));
} else if (activation == "LeakyRelu") {
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}
return ops::Identity(fetch, bias);
}();
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output_1;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output_1));
item.graph = std::move(output_1);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
string fused_node_name;
std::vector<string> expected_fused_ops = {"BiasAdd"};
if (activation.empty()) {
fused_node_name = "bias_add";
} else {
fused_node_name = "activation";
expected_fused_ops.push_back(activation);
}
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == fused_node_name) {
EXPECT_EQ(node.op(), "_FusedConv3D");
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), expected_fused_ops.size());
for (int i = 0; i < fused_ops.size(); ++i) {
EXPECT_EQ(fused_ops[i], expected_fused_ops[i]);
}
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFusePadWithFusedConv3D, FusedConv3D_FP32) {
if (!IsMKLEnabled())
GTEST_SKIP()
<< "Pad fusion with FusedConv3D is only enabled with oneDNN, skipping "
"RemapperFusePadWithFusedConv3D with FP32.";
RunTest<DT_FLOAT>();
}
TEST_F(RemapperFusePadWithFusedConv3D, FusedConv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperFusePadWithFusedConv3D with bfloat16.";
RunTest<DT_BFLOAT16>();
}
#endif
class RemapperLeakyReluTest : public GrapplerTest {
protected:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto max_shape = ops::Placeholder::Shape({64, 64});
auto input = Placeholder(s.WithOpName("input"), DTYPE, max_shape);
float epsilon = 0.3f;
typedef typename EnumToDataType<DTYPE>::Type CType;
auto leakyrelu_alpha = ops::Const<CType>(s.WithOpName("alpha"), epsilon);
auto mul = ops::Mul(s.WithOpName("Mul"), input, leakyrelu_alpha);
auto max = ops::Maximum(s.WithOpName("Maximum"), mul, input);
auto fetch = ops::Identity(s.WithOpName("fetch"), max);
auto max_t = GenerateTensorWithSetRandom<DTYPE>({64, 64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", max_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "Maximum") {
EXPECT_EQ(node.op(), "LeakyRelu");
ASSERT_EQ(node.input_size(), 1);
EXPECT_EQ(node.input(0), "input");
++found;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
float atol = 1e-6, rtol = 1e-6;
if (DTYPE == DT_BFLOAT16) {
atol = 1e-2;
rtol = 1e-2;
}
test::ExpectClose(tensors[0], tensors_expected[0], atol, rtol);
}
};
TEST_F(RemapperLeakyReluTest, F32) { RunTest<DT_FLOAT>(); }
TEST_F(RemapperLeakyReluTest, BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperLeakyRelu with bfloat16.";
RunTest<DT_BFLOAT16>();
}
class RemapperFuseFusedConvWithFusedActivation : public RemapperTest {
public:
template <int dim, DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
for (const string& activation : {"LeakyRelu", "Mish"}) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input_shape = ops::Placeholder::Shape({8, 32, 32, 3});
auto filter_shape = ops::Placeholder::Shape({1, 1, 3, 128});
auto bias_shape = ops::Placeholder::Shape({128});
std::vector<int> strides = {1, 1, 1, 1};
auto input_t = GenerateTensorWithSetRandom<DTYPE>({8, 32, 32, 3});
auto filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 3, 128});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
if (dim == 3) {
input_shape = ops::Placeholder::Shape({8, 4, 32, 32, 3});
filter_shape = ops::Placeholder::Shape({1, 1, 1, 3, 128});
bias_shape = ops::Placeholder::Shape({128});
strides = {1, 1, 1, 1, 1};
input_t = GenerateTensorWithSetRandom<DTYPE>({8, 4, 32, 32, 3});
filter_t = GenerateTensorWithSetRandom<DTYPE>({1, 1, 1, 3, 128});
bias_t = GenerateTensorWithSetRandom<DTYPE>({128});
}
auto input = Placeholder(s.WithOpName("input"), DTYPE, input_shape);
auto filter = Placeholder(s.WithOpName("filter"), DTYPE, filter_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
float leakyrelu_alpha = 0.5;
if (dim == 2) {
auto conv =
ops::Conv2D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
if (activation == "LeakyRelu") {
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}();
} else if (activation == "Mish") {
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
auto softplus = ops::Softplus(s.WithOpName("softplus"), bias_add);
auto tanh = ops::Tanh(s.WithOpName("tanh"), softplus);
return ops::Identity(fetch, ops::Mul(activate, bias_add, tanh));
}();
}
} else if (dim == 3) {
auto conv =
ops::Conv3D(s.WithOpName("conv"), input, filter, strides, "SAME");
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), conv, bias);
if (activation == "LeakyRelu") {
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
auto attr = ops::internal::LeakyRelu::Alpha(leakyrelu_alpha);
return ops::Identity(
fetch, ops::internal::LeakyRelu(activate, bias_add, attr));
}();
} else if (activation == "Mish") {
ops::Identity fetch = [&]() -> ops::Identity {
auto activate = s.WithOpName("activation");
auto fetch = s.WithOpName("fetch");
auto softplus = ops::Softplus(s.WithOpName("softplus"), bias_add);
auto tanh = ops::Tanh(s.WithOpName("tanh"), softplus);
return ops::Identity(fetch, ops::Mul(activate, bias_add, tanh));
}();
}
}
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input", input_t}, {"filter", filter_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output_1;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output_1));
item.graph = std::move(output_1);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "activation") {
if (dim == 2) {
EXPECT_EQ(node.op(), "_FusedConv2D");
} else if (dim == 3) {
EXPECT_EQ(node.op(), "_FusedConv3D");
}
ASSERT_GE(node.input_size(), 3);
EXPECT_EQ(node.input(0), "input");
EXPECT_EQ(node.input(1), "filter");
EXPECT_EQ(node.attr().at("num_args").i(), 1);
EXPECT_EQ(node.input(2), "bias");
const auto fused_ops = node.attr().at("fused_ops").list().s();
ASSERT_EQ(fused_ops.size(), 2);
EXPECT_EQ(fused_ops[0], "BiasAdd");
EXPECT_EQ(fused_ops[1], activation);
if (activation == "LeakyRelu") {
EXPECT_EQ(node.attr().at("leakyrelu_alpha").f(), leakyrelu_alpha);
}
found++;
}
}
EXPECT_EQ(found, 1);
auto tensors_expected = EvaluateNodes(item.graph, item.fetch, item.feed);
ASSERT_EQ(tensors_expected.size(), 1);
auto tensors = EvaluateNodes(output, item.fetch, item.feed);
ASSERT_EQ(tensors.size(), 1);
if (DTYPE == DT_BFLOAT16)
test::ExpectClose(tensors[0], tensors_expected[0], 1e-2, 1e-2);
else
test::ExpectClose(tensors[0], tensors_expected[0], 1e-6);
}
}
};
TEST_F(RemapperFuseFusedConvWithFusedActivation, Conv2D_F32) {
RunTest<2, DT_FLOAT>();
}
TEST_F(RemapperFuseFusedConvWithFusedActivation, Conv2D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperFuseFusedConvWithFusedActivation with bfloat16.";
RunTest<2, DT_BFLOAT16>();
}
TEST_F(RemapperFuseFusedConvWithFusedActivation, Conv3D_F32) {
RunTest<3, DT_FLOAT>();
}
TEST_F(RemapperFuseFusedConvWithFusedActivation, Conv3D_BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperFuseFusedConvWithFusedActivation with bfloat16.";
RunTest<3, DT_BFLOAT16>();
}
class RemapperControlDependencyPatternMatcher : public RemapperTest {
public:
template <DataType DTYPE>
void RunTest() {
if (!IsMKLEnabled()) GTEST_SKIP() << "Test only applicable to oneDNN.";
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto input0_shape = ops::Placeholder::Shape({1});
auto input0 = Placeholder(s.WithOpName("input_0"), DTYPE, input0_shape);
auto input0_t = GenerateTensorWithSetRandom<DTYPE>({1});
auto input1_shape = ops::Placeholder::Shape({1});
auto input1 = Placeholder(s.WithOpName("input_1"), DTYPE, input1_shape);
auto input1_t = GenerateTensorWithSetRandom<DTYPE>({1});
auto add0 = ops::Add(s.WithOpName("add_0"), input0, input1);
auto add1 = ops::Add(s.WithOpName("add_1"), input0, input1);
float leakyrelu_alpha = 0.18;
typedef typename EnumToDataType<DTYPE>::Type CType;
auto const1 = ops::Const<CType>(
s.WithOpName("alpha").WithControlDependencies(
std::vector<Operation>{add0.operation, add1.operation}),
leakyrelu_alpha);
auto sub = ops::Subtract(s.WithOpName("sub_0"), input0, input1);
auto mul = ops::Mul(s.WithOpName("mul_0"), const1, sub);
auto max = ops::Maximum(s.WithOpName("max_0"), mul, sub);
auto softplus = ops::Softplus(s.WithOpName("softplus"), max);
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"input_0", input0_t}, {"input_1", input1_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device("/device:CPU:0");
}
Remapper optimizer(RewriterConfig::ON);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
Status status;
utils::MutableGraphView graph_view(&output, &status);
const int num_nodes = output.node_size();
int found = 0;
for (int i = 0; i < num_nodes; i++) {
auto* node = graph_view.GetNode(i)->node();
if (node->name() == "max_0") {
EXPECT_EQ(node->op(), "LeakyRelu");
EXPECT_EQ(node->attr().at("alpha").f(), leakyrelu_alpha);
ASSERT_EQ(node->input_size(), 3);
EXPECT_EQ(node->input(0), "sub_0");
auto* node_view = graph_view.GetNode(i);
EXPECT_EQ(node_view->NumControllingFanins(), 2);
if (node->input(1).compare("^add_0")) {
if (node->input(2).compare("^add_1")) found++;
} else if (node->input(1).compare("^add_1")) {
if (node->input(2).compare("^add_0")) found++;
}
}
}
EXPECT_EQ(found, 1);
}
};
TEST_F(RemapperControlDependencyPatternMatcher, F32) { RunTest<DT_FLOAT>(); }
TEST_F(RemapperControlDependencyPatternMatcher, BF16) {
if (!IsMKLEnabled() || !IsDataTypeSupportedByOneDNNOnThisCPU(DT_BFLOAT16))
GTEST_SKIP() << "Intel oneDNN with bfloat16 is not supported, skipping "
"RemapperControlDependencyPatternMatcher with bfloat16.";
RunTest<DT_BFLOAT16>();
}
class XlaCpuJitDisableFusionTest : public RemapperTest {
protected:
void SetUp() override {
setenv("TF_XLA_FLAGS", "--tf_xla_cpu_global_jit", 1);
}
template <DataType DTYPE>
void RunTest() {
using ::tensorflow::ops::Placeholder;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto lhs_shape = ops::Placeholder::Shape({8, 32});
auto rhs_shape = ops::Placeholder::Shape({32, 64});
auto bias_shape = ops::Placeholder::Shape({64});
auto lhs = Placeholder(s.WithOpName("lhs"), DTYPE, lhs_shape);
auto rhs = Placeholder(s.WithOpName("rhs"), DTYPE, rhs_shape);
auto bias = Placeholder(s.WithOpName("bias"), DTYPE, bias_shape);
auto matmul = ops::MatMul(s.WithOpName("matmul"), lhs, rhs);
auto bias_add = ops::BiasAdd(s.WithOpName("bias_add"), matmul, bias);
auto fetch = ops::Identity(s.WithOpName("fetch"), bias_add);
auto lhs_t = GenerateTensorWithSetRandom<DTYPE>({8, 32});
auto rhs_t = GenerateTensorWithSetRandom<DTYPE>({32, 64});
auto bias_t = GenerateTensorWithSetRandom<DTYPE>({64});
GrapplerItem item;
item.fetch = {"fetch"};
item.feed = {{"lhs", lhs_t}, {"rhs", rhs_t}, {"bias", bias_t}};
TF_ASSERT_OK(s.ToGraphDef(&item.graph));
const string device = "/device:CPU:0";
for (int i = 0; i < item.graph.node_size(); ++i) {
item.graph.mutable_node(i)->set_device(device);
}
Remapper optimizer(RewriterConfig::ON, RewriterConfig::NO_CONVERSION_ON_CPU,
true);
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
int found = 0;
for (const NodeDef& node : output.node()) {
if (node.name() == "bias_add") {
EXPECT_EQ(node.op(), "BiasAdd");
found++;
} else if (node.name() == "matmul") {
EXPECT_EQ(node.op(), "MatMul");
found++;
}
}
EXPECT_EQ(2, found);
}
};
#if !(DNNL_AARCH64_USE_ACL || GOOGLE_CUDA || TENSORFLOW_USE_ROCM)
TEST_F(XlaCpuJitDisableFusionTest, MatMulWithBias) { RunTest<DT_FLOAT>(); }
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/remapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/remapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dc782095-8db1-4f12-bb84-d81e11395814 | cpp | tensorflow/tensorflow | parse_annotation | third_party/xla/xla/tsl/profiler/utils/parse_annotation.cc | third_party/xla/xla/tsl/profiler/utils/parse_annotation_test.cc | #include "xla/tsl/profiler/utils/parse_annotation.h"
#include <stack>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/ascii.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
namespace tsl {
namespace profiler {
namespace {
std::vector<absl::string_view> SplitNameAndMetadata(
absl::string_view annotation) {
std::vector<absl::string_view> parts;
if (!HasMetadata(annotation)) {
parts.emplace_back(annotation);
} else {
annotation.remove_suffix(1);
parts = absl::StrSplit(annotation, '#');
if (parts.size() > 2) {
parts.resize(2);
}
}
while (parts.size() < 2) {
parts.emplace_back();
}
return parts;
}
std::vector<absl::string_view> SplitPairs(absl::string_view metadata) {
std::vector<absl::string_view> key_value_pairs;
std::stack<char> quotes;
size_t start = 0, end = 0;
for (; end < metadata.size(); ++end) {
char ch = metadata[end];
switch (ch) {
case '\"':
case '\'':
if (quotes.empty() || quotes.top() != ch) {
quotes.push(ch);
} else {
quotes.pop();
}
break;
case '{':
case '(':
case '[':
quotes.push(ch);
break;
case '}':
if (!quotes.empty() && quotes.top() == '{') {
quotes.pop();
}
break;
case ')':
if (!quotes.empty() && quotes.top() == '(') {
quotes.pop();
}
break;
case ']':
if (!quotes.empty() && quotes.top() == '[') {
quotes.pop();
}
break;
case ',':
if (quotes.empty()) {
if (end - start > 1) {
key_value_pairs.emplace_back(metadata.data() + start, end - start);
}
start = end + 1;
}
break;
}
}
if (end - start > 1) {
key_value_pairs.emplace_back(metadata.data() + start, end - start);
}
return key_value_pairs;
}
std::vector<std::pair<absl::string_view, absl::string_view>> ParseMetadata(
absl::string_view metadata) {
std::vector<std::pair<absl::string_view, absl::string_view>> key_values;
for (absl::string_view pair : SplitPairs(metadata)) {
std::vector<absl::string_view> parts =
absl::StrSplit(pair, absl::MaxSplits('=', 1));
if (parts.size() == 2) {
absl::string_view key = absl::StripAsciiWhitespace(parts[0]);
absl::string_view value = absl::StripAsciiWhitespace(parts[1]);
if (!key.empty() && !value.empty()) {
key_values.push_back({key, value});
}
}
}
return key_values;
}
}
Annotation ParseAnnotation(absl::string_view annotation) {
Annotation result;
std::vector<absl::string_view> parts = SplitNameAndMetadata(annotation);
if (!parts.empty()) {
result.name = absl::StripAsciiWhitespace(parts[0]);
for (const auto& key_value : ParseMetadata(parts[1])) {
result.metadata.push_back({key_value.first, key_value.second});
}
}
return result;
}
std::vector<Annotation> ParseAnnotationStack(
absl::string_view annotation_stack) {
std::vector<Annotation> annotations;
const std::string kAnnotationDelimiter = "::";
for (absl::string_view annotation : absl::StrSplit(
annotation_stack, kAnnotationDelimiter, absl::SkipEmpty())) {
annotations.emplace_back(ParseAnnotation(annotation));
}
return annotations;
}
}
} | #include "xla/tsl/profiler/utils/parse_annotation.h"
#include <vector>
#include "absl/strings/string_view.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace profiler {
namespace {
TEST(ParseAnnotationStackTest, EmptyAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("");
ASSERT_TRUE(annotations.empty());
}
TEST(ParseAnnotationStackTest, SingleAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("name");
ASSERT_FALSE(annotations.empty());
EXPECT_EQ(annotations.back().name, "name");
EXPECT_TRUE(annotations.back().metadata.empty());
}
TEST(ParseAnnotationStackTest, MultiLevelAnnotationStackTest) {
std::vector<Annotation> annotations = ParseAnnotationStack("outer::inner");
ASSERT_EQ(annotations.size(), 2);
EXPECT_EQ(annotations.front().name, "outer");
EXPECT_TRUE(annotations.front().metadata.empty());
EXPECT_EQ(annotations.back().name, "inner");
EXPECT_TRUE(annotations.back().metadata.empty());
}
TEST(ParseAnnotationTest, EmptyAnnotationTest) {
Annotation annotation = ParseAnnotation("");
EXPECT_TRUE(annotation.name.empty());
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SimpleNameTest) {
Annotation annotation = ParseAnnotation("name");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SimpleNameWithWhitespaceTest) {
Annotation annotation = ParseAnnotation("name ");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, EmptyMetadataTest) {
Annotation annotation = ParseAnnotation("name#");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
annotation = ParseAnnotation("name1##");
EXPECT_EQ(annotation.name, "name1");
EXPECT_TRUE(annotation.metadata.empty());
annotation = ParseAnnotation("name2###");
EXPECT_EQ(annotation.name, "name2");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, SingleMetadataTest) {
Annotation annotation = ParseAnnotation("name#key=value#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 1);
EXPECT_EQ(annotation.metadata.at(0).key, "key");
EXPECT_EQ(annotation.metadata.at(0).value, "value");
}
TEST(ParseAnnotationTest, MultipleMetadataTest) {
Annotation annotation = ParseAnnotation("name#k1=v1,k2=v2,k3=v3#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 3);
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "v2");
EXPECT_EQ(annotation.metadata.at(2).key, "k3");
EXPECT_EQ(annotation.metadata.at(2).value, "v3");
}
TEST(ParseAnnotationTest, MultipleMetadataWithWhitespaceTest) {
Annotation annotation = ParseAnnotation("name # k1 = v1, ,k2=v2 #");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 2);
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "v2");
}
TEST(ParseAnnotationTest, KeyValueSeparatorTest) {
Annotation annotation = ParseAnnotation("name#=v1,k2=,k3==v3,k4=v4=#");
EXPECT_EQ(annotation.name, "name");
ASSERT_EQ(annotation.metadata.size(), 2);
EXPECT_EQ(annotation.metadata.at(0).key, "k3");
EXPECT_EQ(annotation.metadata.at(0).value, "=v3");
EXPECT_EQ(annotation.metadata.at(1).key, "k4");
EXPECT_EQ(annotation.metadata.at(1).value, "v4=");
}
TEST(ParseAnnotationTest, ExtraMetadataSeparatorTest) {
Annotation annotation = ParseAnnotation("name##k1=v1#");
EXPECT_EQ(annotation.name, "name");
EXPECT_TRUE(annotation.metadata.empty());
}
TEST(ParseAnnotationTest, QuotedMetadata) {
Annotation annotation = ParseAnnotation(
"name#k1=(v11,v12),k2=[v21,v22,v23],k3={v31,v32}, k4=\"v41,v42\","
"(k51,k52)='v51,v52'#");
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "(v11,v12)");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "[v21,v22,v23]");
EXPECT_EQ(annotation.metadata.at(2).key, "k3");
EXPECT_EQ(annotation.metadata.at(2).value, "{v31,v32}");
EXPECT_EQ(annotation.metadata.at(3).key, "k4");
EXPECT_EQ(annotation.metadata.at(3).value, "\"v41,v42\"");
EXPECT_EQ(annotation.metadata.at(4).key, "(k51,k52)");
EXPECT_EQ(annotation.metadata.at(4).value, "'v51,v52'");
}
TEST(ParseAnnotationTest, UnmatchedQuotedMetadata) {
Annotation annotation = ParseAnnotation("name#k1=v1,k2=(v2,k3=v3#");
EXPECT_EQ(annotation.metadata.at(0).key, "k1");
EXPECT_EQ(annotation.metadata.at(0).value, "v1");
EXPECT_EQ(annotation.metadata.at(1).key, "k2");
EXPECT_EQ(annotation.metadata.at(1).value, "(v2,k3=v3");
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/parse_annotation.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/profiler/utils/parse_annotation_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
43da607d-bf25-4e1f-836a-cb8c6ddb47bb | cpp | tensorflow/tensorflow | symbolic_tiled_hlo_instruction | third_party/xla/xla/service/gpu/model/symbolic_tiled_hlo_instruction.cc | third_party/xla/xla/service/gpu/model/symbolic_tiled_hlo_instruction_test.cc | #include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include <cstdint>
#include <sstream>
#include <string>
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "xla/service/gpu/model/affine_map_evaluator.h"
#include "xla/service/gpu/model/symbolic_tile.h"
namespace xla {
namespace gpu {
llvm::SmallVector<int64_t> SymbolicTiledHloInstruction::TileOffsets(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateAffineMap(symbolic_tile().offset_map(),
tile_parameters);
}
llvm::SmallVector<int64_t> SymbolicTiledHloInstruction::TileSizes(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateAffineMap(symbolic_tile().size_map(),
tile_parameters);
}
llvm::SmallVector<int64_t> SymbolicTiledHloInstruction::TileStrides(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateAffineMap(symbolic_tile().stride_map(),
tile_parameters);
}
std::string SymbolicTiledHloInstruction::ToString() const {
std::stringstream ss;
ss << "\thlo: " << hlo_->ToString() << "\n";
ss << "\t" << symbolic_tile().ToString() << "\n";
ss << "\tindexing map: " << indexing_map_ << "\n";
return ss.str();
}
}
} | #include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include <cstdint>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using SymbolicTiledHloInstructionTest = HloTestBase;
TEST_F(SymbolicTiledHloInstructionTest, TransposeTileSizesAreSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
fused_computation {
p0 = f32[16,32] parameter(0)
p1 = f32[32,16] parameter(1)
transpose = f32[32,16] transpose(p0), dimensions={1,0}
ROOT subtract = f32[32,16] subtract(transpose, p1)
}
ENTRY main {
p0 = f32[16,32] parameter(0)
p1 = f32[32,16] parameter(1)
ROOT root = f32[32,16] fusion(p0, p1), kind=kLoop, calls=fused_computation
}
)"));
mlir::MLIRContext mlir_ctx;
auto fusion = module->entry_computation()->root_instruction();
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(fusion);
auto output_to_input_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_ctx);
HloInstruction* subtract = fusion->fused_expression_root();
HloInstruction* p0 = subtract->mutable_operand(0)->mutable_operand(0);
HloInstruction* p1 = subtract->mutable_operand(1);
IndexingMap p0_indexing =
*output_to_input_indexing[fusion->operand(0)].begin();
std::optional<SymbolicTile> p0_symbolic_tile =
SymbolicTile::FromIndexingMap(p0_indexing);
ASSERT_TRUE(p0_symbolic_tile.has_value());
SymbolicTiledHloInstruction tiled_p0(p0, p0_indexing);
tiled_p0.set_symbolic_tile(*p0_symbolic_tile);
ASSERT_TRUE(p0_symbolic_tile.has_value());
IndexingMap p1_indexing =
*output_to_input_indexing[fusion->operand(1)].begin();
std::optional<SymbolicTile> p1_symbolic_tile =
SymbolicTile::FromIndexingMap(p1_indexing);
ASSERT_TRUE(p1_symbolic_tile.has_value());
SymbolicTiledHloInstruction tiled_p1(p1, p1_indexing);
tiled_p1.set_symbolic_tile(*p1_symbolic_tile);
std::vector<int64_t> output_tile_sizes = {8, 4};
auto p0_tile_sizes = tiled_p0.TileSizes(output_tile_sizes);
EXPECT_THAT(tiled_p0.TileSizes(output_tile_sizes), ElementsAre(4, 8));
EXPECT_THAT(tiled_p1.TileSizes(output_tile_sizes), ElementsAre(8, 4));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/symbolic_tiled_hlo_instruction.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/symbolic_tiled_hlo_instruction_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd4944f1-a2be-4199-8289-233b06c073b9 | cpp | google/tensorstore | gcs_testbench | tensorstore/kvstore/gcs/gcs_testbench.cc | tensorstore/kvstore/gcs_http/gcs_testbench_test.cc | #include "tensorstore/kvstore/gcs/gcs_testbench.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/flags/flag.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/channel.h"
#include "grpcpp/client_context.h"
#include "grpcpp/create_channel.h"
#include "grpcpp/security/credentials.h"
#include "grpcpp/support/status.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/http/curl_transport.h"
#include "tensorstore/internal/http/http_request.h"
#include "tensorstore/internal/http/http_transport.h"
#include "tensorstore/internal/http/transport_test_utils.h"
#include "tensorstore/internal/os/subprocess.h"
#include "tensorstore/proto/parse_text_proto_or_die.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "google/storage/v2/storage.grpc.pb.h"
#include "google/storage/v2/storage.pb.h"
ABSL_FLAG(std::string, testbench_binary, "",
"Path to the gcs storage-testbench rest_server");
namespace gcs_testbench {
using ::google::storage::v2::Storage;
using ::tensorstore::internal::GrpcStatusToAbslStatus;
using ::tensorstore::internal::SpawnSubprocess;
using ::tensorstore::internal::Subprocess;
using ::tensorstore::internal::SubprocessOptions;
using ::tensorstore::internal_http::GetDefaultHttpTransport;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::tensorstore::internal_http::IssueRequestOptions;
using ::tensorstore::transport_test_utils::TryPickUnusedPort;
StorageTestbench::StorageTestbench() = default;
std::string StorageTestbench::http_address() {
return absl::StrFormat("localhost:%d", http_port);
}
std::string StorageTestbench::grpc_address() {
return absl::StrFormat("localhost:%d", grpc_port);
}
void StorageTestbench::SpawnProcess() {
if (running) return;
const auto start_child = [&] {
http_port = TryPickUnusedPort().value_or(0);
ABSL_CHECK(http_port > 0);
ABSL_LOG(INFO) << "Spawning testbench: http:
{
SubprocessOptions options{absl::GetFlag(FLAGS_testbench_binary),
{absl::StrFormat("--port=%d", http_port)}};
TENSORSTORE_CHECK_OK_AND_ASSIGN(child, SpawnSubprocess(options));
}
};
start_child();
for (auto deadline = absl::Now() + absl::Seconds(30);;) {
absl::SleepFor(absl::Milliseconds(200));
if (!absl::IsUnavailable(child->Join(false).status())) {
start_child();
}
auto result =
GetDefaultHttpTransport()
->IssueRequest(
HttpRequestBuilder(
"GET", absl::StrFormat("http:
http_port))
.BuildRequest(),
IssueRequestOptions()
.SetRequestTimeout(absl::Seconds(15))
.SetConnectTimeout(absl::Seconds(15)))
.result();
if (result.ok()) {
if (result->status_code != 200) {
ABSL_LOG(ERROR) << "Failed to start grpc server: " << *result;
} else if (!absl::SimpleAtoi(result->payload.Flatten(), &grpc_port)) {
ABSL_LOG(ERROR) << "Unexpected response from start_grpc: " << *result;
} else {
break;
}
} else {
ABSL_LOG(ERROR) << "Failed to start grpc server: " << result.status();
}
if (absl::Now() < deadline && absl::IsUnavailable(result.status())) {
continue;
}
ABSL_LOG(FATAL) << "Failed to start testbench: " << result.status();
}
running = true;
}
StorageTestbench::~StorageTestbench() {
if (child) {
child->Kill().IgnoreError();
auto join_result = child->Join();
if (!join_result.ok()) {
ABSL_LOG(ERROR) << "Joining storage_testbench subprocess failed: "
<< join_result.status();
}
}
}
absl::Status StorageTestbench::CreateBucket(std::string grpc_endpoint,
std::string bucket) {
google::storage::v2::CreateBucketRequest bucket_request =
tensorstore::ParseTextProtoOrDie(R"pb(
parent: 'projects/12345'
bucket: { location: 'US' storage_class: 'STANDARD' }
bucket_id: 'bucket'
predefined_acl: 'publicReadWrite'
predefined_default_object_acl: 'publicReadWrite'
)pb");
bucket_request.set_bucket_id(bucket);
google::storage::v2::Bucket bucket_response;
std::shared_ptr<grpc::Channel> channel = grpc::CreateChannel(
grpc_endpoint, grpc::InsecureChannelCredentials());
if (!channel->WaitForConnected(
absl::ToChronoTime(absl::Now() + absl::Milliseconds(100)))) {
ABSL_LOG(WARNING) << "Failed to connect to grpc endpoint after 100ms: "
<< grpc_endpoint;
}
auto stub = Storage::NewStub(std::move(channel));
grpc::ClientContext client_context;
grpc::Status status =
stub->CreateBucket(&client_context, bucket_request, &bucket_response);
return GrpcStatusToAbslStatus(status);
}
} | #include "tensorstore/kvstore/gcs/gcs_testbench.h"
#include <stddef.h>
#include <cstring>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/base/call_once.h"
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "tensorstore/internal/env.h"
#include "tensorstore/internal/thread/thread.h"
#include "tensorstore/kvstore/generation.h"
#include "tensorstore/kvstore/kvstore.h"
#include "tensorstore/kvstore/operations.h"
#include "tensorstore/kvstore/test_util.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace kvstore = ::tensorstore::kvstore;
using ::gcs_testbench::StorageTestbench;
using ::tensorstore::KvStore;
using ::tensorstore::StorageGeneration;
namespace {
StorageTestbench& GetTestBench() {
static absl::NoDestructor<StorageTestbench> testbench;
static absl::once_flag init_once;
absl::call_once(init_once, [&]() {
testbench->SpawnProcess();
static std::string http_address = testbench->http_address();
::tensorstore::internal::SetEnv("TENSORSTORE_GCS_HTTP_URL",
http_address.c_str());
::tensorstore::internal::SetEnv("GOOGLE_AUTH_TOKEN_FOR_TESTING", "abc");
ABSL_LOG(INFO) << "Using " << http_address;
ABSL_LOG(INFO) << "Creating bucket: "
<< StorageTestbench::CreateBucket(testbench->grpc_address(),
"test_bucket");
});
return *testbench;
}
class GcsTestbenchTest : public testing::Test {
public:
tensorstore::KvStore OpenStore(std::string path = "") {
GetTestBench();
return kvstore::Open(
{{"driver", "gcs"}, {"bucket", "test_bucket"}, {"path", path}})
.value();
}
};
TEST_F(GcsTestbenchTest, Basic) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueReadWriteOps(store);
}
TEST_F(GcsTestbenchTest, DeletePrefix) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeletePrefix(store);
}
TEST_F(GcsTestbenchTest, DeleteRange) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRange(store);
}
TEST_F(GcsTestbenchTest, DeleteRangeToEnd) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeToEnd(store);
}
TEST_F(GcsTestbenchTest, DeleteRangeFromBeginning) {
auto store = OpenStore();
tensorstore::internal::TestKeyValueStoreDeleteRangeFromBeginning(store);
}
TEST_F(GcsTestbenchTest, List) {
auto store = OpenStore("list/");
tensorstore::internal::TestKeyValueStoreList(store);
}
TEST_F(GcsTestbenchTest, CancellationDoesNotCrash) {
auto store = OpenStore("cancellation/");
static constexpr size_t kCount = 1000;
std::vector<std::string> keys;
keys.reserve(kCount);
for (size_t i = 0; i < kCount; ++i) {
keys.push_back(absl::StrCat(i));
}
absl::Cord value("xyzzyx");
std::vector<tensorstore::AnyFuture> futures;
futures.reserve(kCount * 2);
for (const auto& key : keys) {
futures.push_back(kvstore::Write(store, key, value));
}
for (const auto& key : keys) {
futures.push_back(kvstore::Read(store, key));
}
futures = {};
for (const auto& key : keys) {
futures.push_back(kvstore::Delete(store, key));
}
for (auto& future : futures) {
future.Wait();
}
}
TEST_F(GcsTestbenchTest, ConcurrentWrites) {
tensorstore::internal::TestConcurrentWritesOptions options;
auto store = OpenStore("concurrent_writes/");
options.get_store = [&] { return store; };
options.num_iterations = 0x3f;
tensorstore::internal::TestConcurrentWrites(options);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs/gcs_testbench.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/gcs_http/gcs_testbench_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
e8e2de40-a21c-4b51-aef3-3f6259a0dd1a | cpp | google/quiche | quic_generic_session | quiche/quic/core/quic_generic_session.cc | quiche/quic/core/quic_generic_session_test.cc | #include "quiche/quic/core/quic_generic_session.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/web_transport_stream_adapter.h"
#include "quiche/quic/core/quic_crypto_client_stream.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_stream_priority.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/common/simple_buffer_allocator.h"
#include "quiche/web_transport/web_transport.h"
namespace quic {
namespace {
class NoOpProofHandler : public QuicCryptoClientStream::ProofHandler {
public:
void OnProofValid(const QuicCryptoClientConfig::CachedState&) override {}
void OnProofVerifyDetailsAvailable(const ProofVerifyDetails&) override {}
};
class NoOpServerCryptoHelper : public QuicCryptoServerStreamBase::Helper {
public:
bool CanAcceptClientHello(const CryptoHandshakeMessage& ,
const QuicSocketAddress& ,
const QuicSocketAddress& ,
const QuicSocketAddress& ,
std::string* ) const override {
return true;
}
};
}
ParsedQuicVersionVector GetQuicVersionsForGenericSession() {
return {ParsedQuicVersion::RFCv1()};
}
class QUICHE_EXPORT QuicGenericStream : public QuicStream {
public:
QuicGenericStream(QuicStreamId id, QuicSession* session)
: QuicStream(id, session, false,
QuicUtils::GetStreamType(
id, session->connection()->perspective(),
session->IsIncomingStream(id), session->version())),
adapter_(session, this, sequencer(), std::nullopt) {
adapter_.SetPriority(webtransport::StreamPriority{0, 0});
}
WebTransportStreamAdapter* adapter() { return &adapter_; }
void OnDataAvailable() override { adapter_.OnDataAvailable(); }
void OnCanWriteNewData() override { adapter_.OnCanWriteNewData(); }
private:
WebTransportStreamAdapter adapter_;
};
QuicGenericSessionBase::QuicGenericSessionBase(
QuicConnection* connection, bool owns_connection, Visitor* owner,
const QuicConfig& config, std::string alpn, WebTransportVisitor* visitor,
bool owns_visitor,
std::unique_ptr<QuicDatagramQueue::Observer> datagram_observer)
: QuicSession(connection, owner, config, GetQuicVersionsForGenericSession(),
0,
std::move(datagram_observer),
QuicPriorityType::kWebTransport),
alpn_(std::move(alpn)),
visitor_(visitor),
owns_connection_(owns_connection),
owns_visitor_(owns_visitor) {}
QuicGenericSessionBase::~QuicGenericSessionBase() {
if (owns_connection_) {
DeleteConnection();
}
if (owns_visitor_) {
delete visitor_;
visitor_ = nullptr;
}
}
QuicStream* QuicGenericSessionBase::CreateIncomingStream(QuicStreamId id) {
QUIC_DVLOG(1) << "Creating incoming QuicGenricStream " << id;
QuicGenericStream* stream = CreateStream(id);
if (stream->type() == BIDIRECTIONAL) {
incoming_bidirectional_streams_.push_back(id);
visitor_->OnIncomingBidirectionalStreamAvailable();
} else {
incoming_unidirectional_streams_.push_back(id);
visitor_->OnIncomingUnidirectionalStreamAvailable();
}
return stream;
}
void QuicGenericSessionBase::OnTlsHandshakeComplete() {
QuicSession::OnTlsHandshakeComplete();
visitor_->OnSessionReady();
}
webtransport::Stream*
QuicGenericSessionBase::AcceptIncomingBidirectionalStream() {
while (!incoming_bidirectional_streams_.empty()) {
webtransport::Stream* stream =
GetStreamById(incoming_bidirectional_streams_.front());
incoming_bidirectional_streams_.pop_front();
if (stream != nullptr) {
return stream;
}
}
return nullptr;
}
webtransport::Stream*
QuicGenericSessionBase::AcceptIncomingUnidirectionalStream() {
while (!incoming_unidirectional_streams_.empty()) {
webtransport::Stream* stream =
GetStreamById(incoming_unidirectional_streams_.front());
incoming_unidirectional_streams_.pop_front();
if (stream != nullptr) {
return stream;
}
}
return nullptr;
}
webtransport::Stream*
QuicGenericSessionBase::OpenOutgoingBidirectionalStream() {
if (!CanOpenNextOutgoingBidirectionalStream()) {
QUIC_BUG(QuicGenericSessionBase_flow_control_violation_bidi)
<< "Attempted to open a stream in violation of flow control";
return nullptr;
}
return CreateStream(GetNextOutgoingBidirectionalStreamId())->adapter();
}
webtransport::Stream*
QuicGenericSessionBase::OpenOutgoingUnidirectionalStream() {
if (!CanOpenNextOutgoingUnidirectionalStream()) {
QUIC_BUG(QuicGenericSessionBase_flow_control_violation_unidi)
<< "Attempted to open a stream in violation of flow control";
return nullptr;
}
return CreateStream(GetNextOutgoingUnidirectionalStreamId())->adapter();
}
QuicGenericStream* QuicGenericSessionBase::CreateStream(QuicStreamId id) {
auto stream = std::make_unique<QuicGenericStream>(id, this);
QuicGenericStream* stream_ptr = stream.get();
ActivateStream(std::move(stream));
return stream_ptr;
}
void QuicGenericSessionBase::OnMessageReceived(absl::string_view message) {
visitor_->OnDatagramReceived(message);
}
void QuicGenericSessionBase::OnCanCreateNewOutgoingStream(bool unidirectional) {
if (unidirectional) {
visitor_->OnCanCreateNewOutgoingUnidirectionalStream();
} else {
visitor_->OnCanCreateNewOutgoingBidirectionalStream();
}
}
webtransport::Stream* QuicGenericSessionBase::GetStreamById(
webtransport::StreamId id) {
QuicStream* stream = GetActiveStream(id);
if (stream == nullptr) {
return nullptr;
}
return static_cast<QuicGenericStream*>(stream)->adapter();
}
webtransport::DatagramStatus QuicGenericSessionBase::SendOrQueueDatagram(
absl::string_view datagram) {
quiche::QuicheBuffer buffer = quiche::QuicheBuffer::Copy(
quiche::SimpleBufferAllocator::Get(), datagram);
return MessageStatusToWebTransportStatus(
datagram_queue()->SendOrQueueDatagram(
quiche::QuicheMemSlice(std::move(buffer))));
}
void QuicGenericSessionBase::OnConnectionClosed(
const QuicConnectionCloseFrame& frame, ConnectionCloseSource source) {
QuicSession::OnConnectionClosed(frame, source);
visitor_->OnSessionClosed(static_cast<webtransport::SessionErrorCode>(
frame.transport_close_frame_type),
frame.error_details);
}
QuicGenericClientSession::QuicGenericClientSession(
QuicConnection* connection, bool owns_connection, Visitor* owner,
const QuicConfig& config, std::string host, uint16_t port, std::string alpn,
webtransport::SessionVisitor* visitor, bool owns_visitor,
std::unique_ptr<QuicDatagramQueue::Observer> datagram_observer,
QuicCryptoClientConfig* crypto_config)
: QuicGenericSessionBase(connection, owns_connection, owner, config,
std::move(alpn), visitor, owns_visitor,
std::move(datagram_observer)) {
static NoOpProofHandler* handler = new NoOpProofHandler();
crypto_stream_ = std::make_unique<QuicCryptoClientStream>(
QuicServerId(std::move(host), port), this,
crypto_config->proof_verifier()->CreateDefaultContext(), crypto_config,
handler, false);
}
QuicGenericClientSession::QuicGenericClientSession(
QuicConnection* connection, bool owns_connection, Visitor* owner,
const QuicConfig& config, std::string host, uint16_t port, std::string alpn,
CreateWebTransportSessionVisitorCallback create_visitor_callback,
std::unique_ptr<QuicDatagramQueue::Observer> datagram_observer,
QuicCryptoClientConfig* crypto_config)
: QuicGenericClientSession(
connection, owns_connection, owner, config, std::move(host), port,
std::move(alpn), std::move(create_visitor_callback)(*this).release(),
true, std::move(datagram_observer), crypto_config) {}
QuicGenericServerSession::QuicGenericServerSession(
QuicConnection* connection, bool owns_connection, Visitor* owner,
const QuicConfig& config, std::string alpn,
webtransport::SessionVisitor* visitor, bool owns_visitor,
std::unique_ptr<QuicDatagramQueue::Observer> datagram_observer,
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache)
: QuicGenericSessionBase(connection, owns_connection, owner, config,
std::move(alpn), visitor, owns_visitor,
std::move(datagram_observer)) {
static NoOpServerCryptoHelper* helper = new NoOpServerCryptoHelper();
crypto_stream_ = CreateCryptoServerStream(
crypto_config, compressed_certs_cache, this, helper);
}
QuicGenericServerSession::QuicGenericServerSession(
QuicConnection* connection, bool owns_connection, Visitor* owner,
const QuicConfig& config, std::string alpn,
CreateWebTransportSessionVisitorCallback create_visitor_callback,
std::unique_ptr<QuicDatagramQueue::Observer> datagram_observer,
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache)
: QuicGenericServerSession(
connection, owns_connection, owner, config, std::move(alpn),
std::move(create_visitor_callback)(*this).release(),
true, std::move(datagram_observer), crypto_config,
compressed_certs_cache) {}
} | #include "quiche/quic/core/quic_generic_session.h"
#include <cstddef>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/core/crypto/quic_compressed_certs_cache.h"
#include "quiche/quic/core/crypto/quic_crypto_client_config.h"
#include "quiche/quic/core/crypto/quic_crypto_server_config.h"
#include "quiche/quic/core/crypto/quic_random.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_datagram_queue.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/web_transport_interface.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/quic_session_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simulator/simulator.h"
#include "quiche/quic/test_tools/simulator/test_harness.h"
#include "quiche/quic/test_tools/web_transport_test_tools.h"
#include "quiche/quic/tools/web_transport_test_visitors.h"
#include "quiche/common/quiche_stream.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
#include "quiche/web_transport/web_transport.h"
namespace quic::test {
namespace {
enum ServerType { kDiscardServer, kEchoServer };
using quiche::test::StatusIs;
using simulator::Simulator;
using testing::_;
using testing::Assign;
using testing::AtMost;
using testing::Eq;
class CountingDatagramObserver : public QuicDatagramQueue::Observer {
public:
CountingDatagramObserver(int& total) : total_(total) {}
void OnDatagramProcessed(std::optional<MessageStatus>) { ++total_; }
private:
int& total_;
};
class ClientEndpoint : public simulator::QuicEndpointWithConnection {
public:
ClientEndpoint(Simulator* simulator, const std::string& name,
const std::string& peer_name, const QuicConfig& config)
: QuicEndpointWithConnection(simulator, name, peer_name,
Perspective::IS_CLIENT,
GetQuicVersionsForGenericSession()),
crypto_config_(crypto_test_utils::ProofVerifierForTesting()),
session_(connection_.get(), false, nullptr, config, "test.example.com",
443, "example_alpn", &visitor_, false,
std::make_unique<CountingDatagramObserver>(
total_datagrams_processed_),
&crypto_config_) {
session_.Initialize();
session_.connection()->sent_packet_manager().SetSendAlgorithm(
CongestionControlType::kBBRv2);
EXPECT_CALL(visitor_, OnSessionReady())
.Times(AtMost(1))
.WillOnce(Assign(&session_ready_, true));
}
QuicGenericClientSession* session() { return &session_; }
MockWebTransportSessionVisitor* visitor() { return &visitor_; }
bool session_ready() const { return session_ready_; }
int total_datagrams_processed() const { return total_datagrams_processed_; }
private:
QuicCryptoClientConfig crypto_config_;
MockWebTransportSessionVisitor visitor_;
QuicGenericClientSession session_;
bool session_ready_ = false;
int total_datagrams_processed_ = 0;
};
class ServerEndpoint : public simulator::QuicEndpointWithConnection {
public:
ServerEndpoint(Simulator* simulator, const std::string& name,
const std::string& peer_name, const QuicConfig& config,
ServerType type)
: QuicEndpointWithConnection(simulator, name, peer_name,
Perspective::IS_SERVER,
GetQuicVersionsForGenericSession()),
crypto_config_(QuicCryptoServerConfig::TESTING,
QuicRandom::GetInstance(),
crypto_test_utils::ProofSourceForTesting(),
KeyExchangeSource::Default()),
compressed_certs_cache_(
QuicCompressedCertsCache::kQuicCompressedCertsCacheSize),
session_(connection_.get(), false, nullptr, config, "example_alpn",
type == kEchoServer
? static_cast<webtransport::SessionVisitor*>(
new EchoWebTransportSessionVisitor(
&session_,
false))
: static_cast<webtransport::SessionVisitor*>(
new DiscardWebTransportSessionVisitor(&session_)),
true,
nullptr, &crypto_config_,
&compressed_certs_cache_) {
session_.Initialize();
session_.connection()->sent_packet_manager().SetSendAlgorithm(
CongestionControlType::kBBRv2);
}
QuicGenericServerSession* session() { return &session_; }
private:
QuicCryptoServerConfig crypto_config_;
QuicCompressedCertsCache compressed_certs_cache_;
QuicGenericServerSession session_;
};
class QuicGenericSessionTest : public QuicTest {
public:
void CreateDefaultEndpoints(ServerType server_type) {
client_ = std::make_unique<ClientEndpoint>(
&test_harness_.simulator(), "Client", "Server", client_config_);
server_ =
std::make_unique<ServerEndpoint>(&test_harness_.simulator(), "Server",
"Client", server_config_, server_type);
test_harness_.set_client(client_.get());
test_harness_.set_server(server_.get());
}
void WireUpEndpoints() { test_harness_.WireUpEndpoints(); }
void RunHandshake() {
client_->session()->CryptoConnect();
bool result = test_harness_.RunUntilWithDefaultTimeout([this]() {
return client_->session_ready() ||
client_->session()->error() != QUIC_NO_ERROR;
});
EXPECT_TRUE(result);
}
protected:
QuicConfig client_config_ = DefaultQuicConfig();
QuicConfig server_config_ = DefaultQuicConfig();
simulator::TestHarness test_harness_;
std::unique_ptr<ClientEndpoint> client_;
std::unique_ptr<ServerEndpoint> server_;
};
TEST_F(QuicGenericSessionTest, SuccessfulHandshake) {
CreateDefaultEndpoints(kDiscardServer);
WireUpEndpoints();
RunHandshake();
EXPECT_TRUE(client_->session_ready());
}
TEST_F(QuicGenericSessionTest, SendOutgoingStreams) {
CreateDefaultEndpoints(kDiscardServer);
WireUpEndpoints();
RunHandshake();
std::vector<webtransport::Stream*> streams;
for (int i = 0; i < 10; i++) {
webtransport::Stream* stream =
client_->session()->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream->Write("test"));
streams.push_back(stream);
}
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout([this]() {
return QuicSessionPeer::GetNumOpenDynamicStreams(server_->session()) == 10;
}));
for (webtransport::Stream* stream : streams) {
ASSERT_TRUE(stream->SendFin());
}
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout([this]() {
return QuicSessionPeer::GetNumOpenDynamicStreams(server_->session()) == 0;
}));
}
TEST_F(QuicGenericSessionTest, EchoBidirectionalStreams) {
CreateDefaultEndpoints(kEchoServer);
WireUpEndpoints();
RunHandshake();
webtransport::Stream* stream =
client_->session()->OpenOutgoingBidirectionalStream();
EXPECT_TRUE(stream->Write("Hello!"));
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout(
[stream]() { return stream->ReadableBytes() == strlen("Hello!"); }));
std::string received;
WebTransportStream::ReadResult result = stream->Read(&received);
EXPECT_EQ(result.bytes_read, strlen("Hello!"));
EXPECT_FALSE(result.fin);
EXPECT_EQ(received, "Hello!");
EXPECT_TRUE(stream->SendFin());
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout([this]() {
return QuicSessionPeer::GetNumOpenDynamicStreams(server_->session()) == 0;
}));
}
TEST_F(QuicGenericSessionTest, EchoUnidirectionalStreams) {
CreateDefaultEndpoints(kEchoServer);
WireUpEndpoints();
RunHandshake();
webtransport::Stream* stream1 =
client_->session()->OpenOutgoingUnidirectionalStream();
EXPECT_TRUE(stream1->Write("Stream One"));
webtransport::Stream* stream2 =
client_->session()->OpenOutgoingUnidirectionalStream();
EXPECT_TRUE(stream2->Write("Stream Two"));
EXPECT_TRUE(stream2->SendFin());
bool stream_received = false;
EXPECT_CALL(*client_->visitor(), OnIncomingUnidirectionalStreamAvailable())
.Times(2)
.WillRepeatedly(Assign(&stream_received, true));
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout(
[&stream_received]() { return stream_received; }));
webtransport::Stream* reply =
client_->session()->AcceptIncomingUnidirectionalStream();
ASSERT_TRUE(reply != nullptr);
std::string buffer;
WebTransportStream::ReadResult result = reply->Read(&buffer);
EXPECT_GT(result.bytes_read, 0u);
EXPECT_TRUE(result.fin);
EXPECT_EQ(buffer, "Stream Two");
stream_received = false;
buffer = "";
EXPECT_TRUE(stream1->SendFin());
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout(
[&stream_received]() { return stream_received; }));
reply = client_->session()->AcceptIncomingUnidirectionalStream();
ASSERT_TRUE(reply != nullptr);
result = reply->Read(&buffer);
EXPECT_GT(result.bytes_read, 0u);
EXPECT_TRUE(result.fin);
EXPECT_EQ(buffer, "Stream One");
}
TEST_F(QuicGenericSessionTest, EchoStreamsUsingPeekApi) {
CreateDefaultEndpoints(kEchoServer);
WireUpEndpoints();
RunHandshake();
webtransport::Stream* stream1 =
client_->session()->OpenOutgoingBidirectionalStream();
EXPECT_TRUE(stream1->Write("Stream One"));
webtransport::Stream* stream2 =
client_->session()->OpenOutgoingUnidirectionalStream();
EXPECT_TRUE(stream2->Write("Stream Two"));
EXPECT_TRUE(stream2->SendFin());
bool stream_received_unidi = false;
EXPECT_CALL(*client_->visitor(), OnIncomingUnidirectionalStreamAvailable())
.WillOnce(Assign(&stream_received_unidi, true));
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout(
[&]() { return stream_received_unidi; }));
webtransport::Stream* reply =
client_->session()->AcceptIncomingUnidirectionalStream();
ASSERT_TRUE(reply != nullptr);
std::string buffer;
quiche::ReadStream::PeekResult peek_result = reply->PeekNextReadableRegion();
EXPECT_EQ(peek_result.peeked_data, "Stream Two");
EXPECT_EQ(peek_result.fin_next, false);
EXPECT_EQ(peek_result.all_data_received, true);
bool fin_received =
quiche::ProcessAllReadableRegions(*reply, [&](absl::string_view chunk) {
buffer.append(chunk.data(), chunk.size());
return true;
});
EXPECT_TRUE(fin_received);
EXPECT_EQ(buffer, "Stream Two");
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout(
[&]() { return stream1->PeekNextReadableRegion().has_data(); }));
peek_result = stream1->PeekNextReadableRegion();
EXPECT_EQ(peek_result.peeked_data, "Stream One");
EXPECT_EQ(peek_result.fin_next, false);
EXPECT_EQ(peek_result.all_data_received, false);
fin_received = stream1->SkipBytes(strlen("Stream One"));
EXPECT_FALSE(fin_received);
peek_result = stream1->PeekNextReadableRegion();
EXPECT_EQ(peek_result.peeked_data, "");
EXPECT_EQ(peek_result.fin_next, false);
EXPECT_EQ(peek_result.all_data_received, false);
EXPECT_TRUE(stream1->SendFin());
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout(
[&]() { return stream1->PeekNextReadableRegion().all_data_received; }));
peek_result = stream1->PeekNextReadableRegion();
EXPECT_EQ(peek_result.peeked_data, "");
EXPECT_EQ(peek_result.fin_next, true);
EXPECT_EQ(peek_result.all_data_received, true);
webtransport::StreamId id = stream1->GetStreamId();
EXPECT_TRUE(client_->session()->GetStreamById(id) != nullptr);
fin_received = stream1->SkipBytes(0);
EXPECT_TRUE(fin_received);
EXPECT_TRUE(client_->session()->GetStreamById(id) == nullptr);
}
TEST_F(QuicGenericSessionTest, EchoDatagram) {
CreateDefaultEndpoints(kEchoServer);
WireUpEndpoints();
RunHandshake();
client_->session()->SendOrQueueDatagram("test");
bool datagram_received = false;
EXPECT_CALL(*client_->visitor(), OnDatagramReceived(Eq("test")))
.WillOnce(Assign(&datagram_received, true));
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout(
[&datagram_received]() { return datagram_received; }));
}
TEST_F(QuicGenericSessionTest, EchoALotOfDatagrams) {
CreateDefaultEndpoints(kEchoServer);
WireUpEndpoints();
RunHandshake();
client_->session()->SetDatagramMaxTimeInQueue(
(10000 * simulator::TestHarness::kRtt).ToAbsl());
for (int i = 0; i < 1000; i++) {
client_->session()->SendOrQueueDatagram(std::string(
client_->session()->GetGuaranteedLargestMessagePayload(), 'a'));
}
size_t received = 0;
EXPECT_CALL(*client_->visitor(), OnDatagramReceived(_))
.WillRepeatedly(
[&received](absl::string_view ) { received++; });
ASSERT_TRUE(test_harness_.simulator().RunUntilOrTimeout(
[this]() { return client_->total_datagrams_processed() >= 1000; },
3 * simulator::TestHarness::kServerBandwidth.TransferTime(
1000 * kMaxOutgoingPacketSize)));
test_harness_.simulator().RunFor(2 * simulator::TestHarness::kRtt);
EXPECT_GT(received, 500u);
EXPECT_LT(received, 1000u);
}
TEST_F(QuicGenericSessionTest, OutgoingStreamFlowControlBlocked) {
server_config_.SetMaxUnidirectionalStreamsToSend(4);
CreateDefaultEndpoints(kDiscardServer);
WireUpEndpoints();
RunHandshake();
webtransport::Stream* stream;
for (int i = 0; i <= 3; i++) {
ASSERT_TRUE(client_->session()->CanOpenNextOutgoingUnidirectionalStream());
stream = client_->session()->OpenOutgoingUnidirectionalStream();
ASSERT_TRUE(stream != nullptr);
ASSERT_TRUE(stream->SendFin());
}
EXPECT_FALSE(client_->session()->CanOpenNextOutgoingUnidirectionalStream());
bool can_create_new_stream = false;
EXPECT_CALL(*client_->visitor(), OnCanCreateNewOutgoingUnidirectionalStream())
.WillOnce(Assign(&can_create_new_stream, true));
ASSERT_TRUE(test_harness_.RunUntilWithDefaultTimeout(
[&can_create_new_stream]() { return can_create_new_stream; }));
EXPECT_TRUE(client_->session()->CanOpenNextOutgoingUnidirectionalStream());
}
TEST_F(QuicGenericSessionTest, ExpireDatagrams) {
CreateDefaultEndpoints(kEchoServer);
WireUpEndpoints();
RunHandshake();
client_->session()->SetDatagramMaxTimeInQueue(
(0.2 * simulator::TestHarness::kRtt).ToAbsl());
for (int i = 0; i < 1000; i++) {
client_->session()->SendOrQueueDatagram(std::string(
client_->session()->GetGuaranteedLargestMessagePayload(), 'a'));
}
size_t received = 0;
EXPECT_CALL(*client_->visitor(), OnDatagramReceived(_))
.WillRepeatedly(
[&received](absl::string_view ) { received++; });
ASSERT_TRUE(test_harness_.simulator().RunUntilOrTimeout(
[this]() { return client_->total_datagrams_processed() >= 1000; },
3 * simulator::TestHarness::kServerBandwidth.TransferTime(
1000 * kMaxOutgoingPacketSize)));
test_harness_.simulator().RunFor(2 * simulator::TestHarness::kRtt);
EXPECT_LT(received, 500);
EXPECT_EQ(received + client_->session()->GetDatagramStats().expired_outgoing,
1000);
}
TEST_F(QuicGenericSessionTest, LoseDatagrams) {
CreateDefaultEndpoints(kEchoServer);
test_harness_.WireUpEndpointsWithLoss(4);
RunHandshake();
client_->session()->SetDatagramMaxTimeInQueue(
(10000 * simulator::TestHarness::kRtt).ToAbsl());
for (int i = 0; i < 1000; i++) {
client_->session()->SendOrQueueDatagram(std::string(
client_->session()->GetGuaranteedLargestMessagePayload(), 'a'));
}
size_t received = 0;
EXPECT_CALL(*client_->visitor(), OnDatagramReceived(_))
.WillRepeatedly(
[&received](absl::string_view ) { received++; });
ASSERT_TRUE(test_harness_.simulator().RunUntilOrTimeout(
[this]() { return client_->total_datagrams_processed() >= 1000; },
4 * simulator::TestHarness::kServerBandwidth.TransferTime(
1000 * kMaxOutgoingPacketSize)));
test_harness_.simulator().RunFor(16 * simulator::TestHarness::kRtt);
QuicPacketCount client_lost =
client_->session()->GetDatagramStats().lost_outgoing;
QuicPacketCount server_lost =
server_->session()->GetDatagramStats().lost_outgoing;
EXPECT_LT(received, 800u);
EXPECT_GT(client_lost, 100u);
EXPECT_GT(server_lost, 100u);
EXPECT_EQ(received + client_lost + server_lost, 1000u);
}
TEST_F(QuicGenericSessionTest, WriteWhenBufferFull) {
CreateDefaultEndpoints(kEchoServer);
WireUpEndpoints();
RunHandshake();
const std::string buffer(64 * 1024 + 1, 'q');
webtransport::Stream* stream =
client_->session()->OpenOutgoingBidirectionalStream();
ASSERT_TRUE(stream != nullptr);
ASSERT_TRUE(stream->CanWrite());
absl::Status status = quiche::WriteIntoStream(*stream, buffer);
QUICHE_EXPECT_OK(status);
EXPECT_FALSE(stream->CanWrite());
status = quiche::WriteIntoStream(*stream, buffer);
EXPECT_THAT(status, StatusIs(absl::StatusCode::kUnavailable));
quiche::StreamWriteOptions options;
options.set_buffer_unconditionally(true);
options.set_send_fin(true);
status = quiche::WriteIntoStream(*stream, buffer, options);
QUICHE_EXPECT_OK(status);
EXPECT_FALSE(stream->CanWrite());
QuicByteCount total_received = 0;
for (;;) {
test_harness_.RunUntilWithDefaultTimeout(
[&] { return stream->PeekNextReadableRegion().has_data(); });
quiche::ReadStream::PeekResult result = stream->PeekNextReadableRegion();
total_received += result.peeked_data.size();
bool fin_consumed = stream->SkipBytes(result.peeked_data.size());
if (fin_consumed) {
break;
}
}
EXPECT_EQ(total_received, 128u * 1024u + 2);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_generic_session.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_generic_session_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
e0e8be79-3ac2-41fa-9213-6eda04617265 | cpp | tensorflow/tensorflow | file_util | tensorflow/lite/delegates/xnnpack/file_util.cc | tensorflow/lite/delegates/xnnpack/file_util_test.cc | #include "tensorflow/lite/delegates/xnnpack/file_util.h"
#include <fcntl.h>
#if defined(_MSC_VER)
#include <io.h>
#define F_OK 0
#else
#include <unistd.h>
#endif
#if defined(__linux__) || defined(__ANDROID__)
#ifndef TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED
#include <sys/syscall.h>
#ifdef SYS_memfd_create
#define TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED 1
#endif
#endif
#endif
#include <cstdio>
#if !TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
#endif
namespace tflite {
namespace xnnpack {
FileDescriptor FileDescriptor::Duplicate() const {
if (!IsValid()) {
return FileDescriptor(-1);
}
return FileDescriptor(dup(fd_));
}
void FileDescriptor::Reset(int new_fd) {
if (fd_ == new_fd) {
return;
}
if (IsValid()) {
close(fd_);
}
fd_ = new_fd;
}
off_t FileDescriptor::GetPos() const { return lseek(fd_, 0, SEEK_CUR); }
off_t FileDescriptor::SetPos(off_t position) const {
return lseek(fd_, position, SEEK_SET);
}
off_t FileDescriptor::SetPosFromEnd(off_t offset) const {
return lseek(fd_, offset, SEEK_END);
}
off_t FileDescriptor::MovePos(off_t offset) const {
return lseek(fd_, offset, SEEK_CUR);
}
FileDescriptor FileDescriptor::Open(const char* path, int flags, mode_t mode) {
return FileDescriptor(open(path, flags, mode));
}
void FileDescriptor::Close() { Reset(-1); }
bool FileDescriptor::Read(void* dst, size_t count) const {
char* dst_it = reinterpret_cast<char*>(dst);
while (count > 0) {
const auto bytes = read(fd_, dst_it, count);
if (bytes == -1) {
return false;
} else if (bytes == 0) {
break;
}
count -= bytes;
dst_it += bytes;
}
return true;
}
bool FileDescriptor::Write(const void* src, size_t count) const {
const char* src_it = reinterpret_cast<const char*>(src);
while (count > 0) {
const auto bytes = write(fd_, src_it, count);
if (bytes == -1) {
return false;
}
count -= bytes;
src_it += bytes;
}
return true;
}
bool InMemoryFileDescriptorAvailable() {
#if TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED
const int test_fd = syscall(SYS_memfd_create, "test fd", 0);
if (test_fd != -1) {
close(test_fd);
return true;
}
#endif
return false;
}
FileDescriptor CreateInMemoryFileDescriptor(const char* path) {
#ifdef TFLITE_XNNPACK_IN_MEMORY_FILE_ENABLED
return FileDescriptor(
syscall(SYS_memfd_create, "XNNPack in-memory weight cache", 0));
#else
TFLITE_LOG_PROD(tflite::TFLITE_LOG_ERROR,
"XNNPack weight cache: in-memory cache is not enabled for "
"this build.");
return FileDescriptor(-1);
#endif
}
}
} | #include "tensorflow/lite/delegates/xnnpack/file_util.h"
#include <fcntl.h>
#include <string>
#include <type_traits>
#include <utility>
#include <gtest/gtest.h>
namespace tflite::xnnpack {
namespace {
TEST(FileDescriptorTest, DefaultConstructedIsInvalid) {
FileDescriptor fd;
EXPECT_FALSE(fd.IsValid());
}
TEST(FileDescriptorTest, ConstructAndRelease) {
const int kFd = 53;
FileDescriptor fd(kFd);
EXPECT_TRUE(fd.IsValid());
EXPECT_EQ(fd.Value(), kFd);
FileDescriptor fd2(std::move(fd));
EXPECT_FALSE(fd.IsValid());
EXPECT_TRUE(fd2.IsValid());
EXPECT_EQ(fd2.Value(), kFd);
EXPECT_EQ(fd2.Release(), kFd);
EXPECT_FALSE(fd2.IsValid());
EXPECT_FALSE(std::is_copy_constructible_v<FileDescriptor>);
}
TEST(FileDescriptorTest, OpenWriteRewindAndReadWorks) {
const std::string tmp_file = testing::TempDir() + __FUNCTION__;
FileDescriptor fd =
FileDescriptor::Open(tmp_file.c_str(), O_CREAT | O_TRUNC | O_RDWR, 0644);
ASSERT_TRUE(fd.IsValid());
const std::string src_data = "The quick brown fox jumps over the lazy dog.";
EXPECT_TRUE(fd.Write(src_data.data(), src_data.size()));
EXPECT_EQ(fd.SetPos(0), 0);
std::string dst_data(src_data.size(), ' ');
EXPECT_TRUE(fd.Read(dst_data.data(), src_data.size()));
EXPECT_EQ(dst_data, src_data);
}
TEST(FileDescriptorTest, WriteFailureReturnsFalse) {
const std::string tmp_file = testing::TempDir() + __FUNCTION__;
FileDescriptor fd = FileDescriptor::Open(tmp_file.c_str(),
O_CREAT | O_TRUNC | O_RDONLY, 0644);
ASSERT_TRUE(fd.IsValid());
const std::string src_data = "The quick brown fox jumps over the lazy dog.";
EXPECT_FALSE(fd.Write(src_data.data(), src_data.size()));
}
TEST(FileDescriptorTest, ReadFailureReturnsFalse) {
const std::string tmp_file = testing::TempDir() + __FUNCTION__;
FileDescriptor fd = FileDescriptor::Open(tmp_file.c_str(),
O_CREAT | O_TRUNC | O_WRONLY, 0644);
ASSERT_TRUE(fd.IsValid());
std::string dst_data(5, ' ');
EXPECT_FALSE(fd.Read(dst_data.data(), dst_data.size()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/file_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/file_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
239cfaaf-48f0-4bbb-8672-546770e99723 | cpp | tensorflow/tensorflow | thread_safe_buffer | tensorflow/core/data/service/thread_safe_buffer.h | tensorflow/core/data/service/thread_safe_buffer_test.cc | #ifndef TENSORFLOW_CORE_DATA_SERVICE_THREAD_SAFE_BUFFER_H_
#define TENSORFLOW_CORE_DATA_SERVICE_THREAD_SAFE_BUFFER_H_
#include <deque>
#include <utility>
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/statusor.h"
namespace tensorflow {
namespace data {
template <class T>
class ThreadSafeBuffer final {
public:
explicit ThreadSafeBuffer(size_t buffer_size);
StatusOr<T> Pop();
Status Push(StatusOr<T> value);
void Cancel(Status status);
bool Empty() const;
private:
const size_t buffer_size_;
mutable mutex mu_;
condition_variable ready_to_pop_;
condition_variable ready_to_push_;
std::deque<StatusOr<T>> results_ TF_GUARDED_BY(mu_);
Status status_ TF_GUARDED_BY(mu_) = absl::OkStatus();
ThreadSafeBuffer(const ThreadSafeBuffer&) = delete;
void operator=(const ThreadSafeBuffer&) = delete;
};
template <class T>
ThreadSafeBuffer<T>::ThreadSafeBuffer(size_t buffer_size)
: buffer_size_(buffer_size) {
DCHECK_GT(buffer_size, 0)
<< "ThreadSafeBuffer must have a positive buffer size. Got "
<< buffer_size << ".";
}
template <class T>
bool ThreadSafeBuffer<T>::Empty() const {
tf_shared_lock l(mu_);
return results_.empty();
}
template <class T>
StatusOr<T> ThreadSafeBuffer<T>::Pop() {
mutex_lock l(mu_);
while (status_.ok() && results_.empty()) {
ready_to_pop_.wait(l);
}
if (!status_.ok()) {
return status_;
}
StatusOr<T> result = std::move(results_.front());
results_.pop_front();
ready_to_push_.notify_one();
return result;
}
template <class T>
Status ThreadSafeBuffer<T>::Push(StatusOr<T> value) {
mutex_lock l(mu_);
while (status_.ok() && results_.size() >= buffer_size_) {
ready_to_push_.wait(l);
}
if (!status_.ok()) {
return status_;
}
results_.push_back(std::move(value));
ready_to_pop_.notify_one();
return absl::OkStatus();
}
template <class T>
void ThreadSafeBuffer<T>::Cancel(Status status) {
DCHECK(!status.ok())
<< "Cancelling ThreadSafeBuffer requires a non-OK status. Got " << status;
mutex_lock l(mu_);
status_ = std::move(status);
ready_to_push_.notify_all();
ready_to_pop_.notify_all();
}
}
}
#endif | #include "tensorflow/core/data/service/thread_safe_buffer.h"
#include <memory>
#include <tuple>
#include <vector>
#include "absl/strings/str_cat.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status_matchers.h"
#include "tensorflow/core/platform/statusor.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/error_codes.pb.h"
namespace tensorflow {
namespace data {
namespace {
using ::tensorflow::testing::IsOk;
using ::tensorflow::testing::StatusIs;
using ::testing::UnorderedElementsAreArray;
class ThreadSafeBufferTest
: public ::testing::Test,
public ::testing::WithParamInterface<std::tuple<size_t, size_t>> {
protected:
size_t GetBufferSize() const { return std::get<0>(GetParam()); }
size_t GetNumOfElements() const { return std::get<1>(GetParam()); }
};
std::vector<int> GetRange(const size_t range) {
std::vector<int> result;
for (int i = 0; i < range; ++i) {
result.push_back(i);
}
return result;
}
INSTANTIATE_TEST_SUITE_P(VaryingBufferAndInputSizes, ThreadSafeBufferTest,
::testing::Values(std::make_tuple(1, 2),
std::make_tuple(2, 10),
std::make_tuple(10, 2)));
TEST_P(ThreadSafeBufferTest, OneReaderAndOneWriter) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "writer_thread", [this, &buffer]() {
for (int i = 0; i < GetNumOfElements(); ++i) {
ASSERT_THAT(buffer.Push(i), IsOk());
}
}));
for (size_t i = 0; i < GetNumOfElements(); ++i) {
TF_ASSERT_OK_AND_ASSIGN(int next, buffer.Pop());
EXPECT_EQ(next, i);
}
}
TEST_P(ThreadSafeBufferTest, OneReaderAndMultipleWriters) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("writer_thread_", i),
[&buffer, i] { ASSERT_THAT(buffer.Push(i), IsOk()); })));
}
std::vector<int> results;
for (int i = 0; i < GetNumOfElements(); ++i) {
TF_ASSERT_OK_AND_ASSIGN(int next, buffer.Pop());
results.push_back(next);
}
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(GetNumOfElements())));
}
TEST_P(ThreadSafeBufferTest, MultipleReadersAndOneWriter) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
mutex mu;
std::vector<int> results;
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("reader_thread_", i),
[&buffer, &mu, &results]() {
TF_ASSERT_OK_AND_ASSIGN(int next, buffer.Pop());
mutex_lock l(mu);
results.push_back(next);
})));
}
for (int i = 0; i < GetNumOfElements(); ++i) {
ASSERT_THAT(buffer.Push(i), IsOk());
}
threads.clear();
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(GetNumOfElements())));
}
TEST_P(ThreadSafeBufferTest, MultipleReadersAndWriters) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
mutex mu;
std::vector<int> results;
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("reader_thread_", i),
[&buffer, &mu, &results]() {
TF_ASSERT_OK_AND_ASSIGN(int next, buffer.Pop());
mutex_lock l(mu);
results.push_back(next);
})));
}
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("writer_thread_", i),
[&buffer, i]() { ASSERT_THAT(buffer.Push(i), IsOk()); })));
}
threads.clear();
EXPECT_THAT(results, UnorderedElementsAreArray(GetRange(GetNumOfElements())));
}
TEST_P(ThreadSafeBufferTest, BlockReaderWhenBufferIsEmpty) {
ThreadSafeBuffer<Tensor> buffer(GetBufferSize());
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "reader_thread", [&buffer]() {
TF_ASSERT_OK_AND_ASSIGN(Tensor tensor, buffer.Pop());
test::ExpectEqual(tensor, Tensor("Test tensor"));
}));
Env::Default()->SleepForMicroseconds(10000);
ASSERT_THAT(buffer.Push(Tensor("Test tensor")), IsOk());
}
TEST_P(ThreadSafeBufferTest, BlockWriterWhenBufferIsFull) {
ThreadSafeBuffer<Tensor> buffer(GetBufferSize());
for (int i = 0; i < GetBufferSize(); ++i) {
ASSERT_THAT(buffer.Push(Tensor("Test tensor")), IsOk());
}
uint64 push_time = 0;
auto thread = absl::WrapUnique(Env::Default()->StartThread(
{}, "writer_thread", [&buffer, &push_time]() {
ASSERT_THAT(buffer.Push(Tensor("Test tensor")), IsOk());
push_time = Env::Default()->NowMicros();
}));
Env::Default()->SleepForMicroseconds(10000);
uint64 pop_time = Env::Default()->NowMicros();
ASSERT_THAT(buffer.Pop(), IsOk());
thread.reset();
EXPECT_LE(pop_time, push_time);
}
TEST_P(ThreadSafeBufferTest, CancelReaders) {
ThreadSafeBuffer<int> buffer(GetBufferSize());
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("reader_thread_", i),
[&buffer]() { EXPECT_THAT(buffer.Pop(), StatusIs(error::ABORTED)); })));
}
buffer.Cancel(errors::Aborted("Aborted"));
}
TEST_P(ThreadSafeBufferTest, CancelWriters) {
ThreadSafeBuffer<Tensor> buffer(GetBufferSize());
for (int i = 0; i < GetBufferSize(); ++i) {
ASSERT_THAT(buffer.Push(Tensor("Test tensor")), IsOk());
}
std::vector<std::unique_ptr<Thread>> threads;
for (int i = 0; i < GetNumOfElements(); ++i) {
threads.push_back(absl::WrapUnique(Env::Default()->StartThread(
{}, absl::StrCat("writer_thread_", i),
[&buffer]() {
for (int i = 0; i < 100; ++i) {
EXPECT_THAT(buffer.Push(Tensor("Test tensor")),
StatusIs(error::CANCELLED));
}
})));
}
buffer.Cancel(errors::Cancelled("Cancelled"));
}
TEST_P(ThreadSafeBufferTest, CancelMultipleTimes) {
ThreadSafeBuffer<Tensor> buffer(GetBufferSize());
buffer.Cancel(errors::Unknown("Unknown"));
EXPECT_THAT(buffer.Push(Tensor("Test tensor")), StatusIs(error::UNKNOWN));
buffer.Cancel(errors::DeadlineExceeded("Deadline exceeded"));
EXPECT_THAT(buffer.Pop(), StatusIs(error::DEADLINE_EXCEEDED));
buffer.Cancel(errors::ResourceExhausted("Resource exhausted"));
EXPECT_THAT(buffer.Push(Tensor("Test tensor")),
StatusIs(error::RESOURCE_EXHAUSTED));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/thread_safe_buffer.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/data/service/thread_safe_buffer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
4f18d1c7-9e2a-451b-b785-37047394c51e | cpp | tensorflow/tensorflow | inputstream_interface | third_party/xla/xla/tsl/lib/io/inputstream_interface.cc | third_party/xla/xla/tsl/lib/io/inputstream_interface_test.cc | #include "xla/tsl/lib/io/inputstream_interface.h"
#include "tsl/platform/errors.h"
namespace tsl {
namespace io {
static constexpr int64_t kMaxSkipSize = 8 * 1024 * 1024;
absl::Status InputStreamInterface::SkipNBytes(int64_t bytes_to_skip) {
if (bytes_to_skip < 0) {
return errors::InvalidArgument("Can't skip a negative number of bytes");
}
tstring unused;
while (bytes_to_skip > 0) {
int64_t bytes_to_read = std::min<int64_t>(kMaxSkipSize, bytes_to_skip);
TF_RETURN_IF_ERROR(ReadNBytes(bytes_to_read, &unused));
bytes_to_skip -= bytes_to_read;
}
return absl::OkStatus();
}
}
} | #include "xla/tsl/lib/io/inputstream_interface.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace io {
namespace {
class TestStringStream : public InputStreamInterface {
public:
explicit TestStringStream(const string& content) : content_(content) {}
absl::Status ReadNBytes(int64_t bytes_to_read, tstring* result) override {
result->clear();
if (pos_ + bytes_to_read > content_.size()) {
return errors::OutOfRange("limit reached");
}
*result = content_.substr(pos_, bytes_to_read);
pos_ += bytes_to_read;
return absl::OkStatus();
}
int64_t Tell() const override { return pos_; }
absl::Status Reset() override {
pos_ = 0;
return absl::OkStatus();
}
private:
string content_;
int64_t pos_ = 0;
};
TEST(InputStreamInterface, Basic) {
TestStringStream ss("This is a test string");
tstring res;
TF_ASSERT_OK(ss.ReadNBytes(4, &res));
EXPECT_EQ("This", res);
TF_ASSERT_OK(ss.SkipNBytes(6));
TF_ASSERT_OK(ss.ReadNBytes(11, &res));
EXPECT_EQ("test string", res);
EXPECT_TRUE(errors::IsOutOfRange(ss.SkipNBytes(1)));
TF_ASSERT_OK(ss.Reset());
TF_ASSERT_OK(ss.ReadNBytes(4, &res));
EXPECT_EQ("This", res);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/inputstream_interface.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/lib/io/inputstream_interface_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ee933ba6-047b-4c1a-ac4f-ae4936556cca | cpp | google/quiche | quic_write_blocked_list | quiche/quic/core/quic_write_blocked_list.cc | quiche/quic/core/quic_write_blocked_list_test.cc | #include "quiche/quic/core/quic_write_blocked_list.h"
#include <algorithm>
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
QuicWriteBlockedList::QuicWriteBlockedList()
: last_priority_popped_(0),
respect_incremental_(
GetQuicReloadableFlag(quic_priority_respect_incremental)),
disable_batch_write_(GetQuicReloadableFlag(quic_disable_batch_write)) {
memset(batch_write_stream_id_, 0, sizeof(batch_write_stream_id_));
memset(bytes_left_for_batch_write_, 0, sizeof(bytes_left_for_batch_write_));
}
bool QuicWriteBlockedList::ShouldYield(QuicStreamId id) const {
for (const auto& stream : static_stream_collection_) {
if (stream.id == id) {
return false;
}
if (stream.is_blocked) {
return true;
}
}
return priority_write_scheduler_.ShouldYield(id);
}
QuicStreamId QuicWriteBlockedList::PopFront() {
QuicStreamId static_stream_id;
if (static_stream_collection_.UnblockFirstBlocked(&static_stream_id)) {
return static_stream_id;
}
const auto [id, priority] =
priority_write_scheduler_.PopNextReadyStreamAndPriority();
const spdy::SpdyPriority urgency = priority.urgency;
const bool incremental = priority.incremental;
last_priority_popped_ = urgency;
if (disable_batch_write_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_disable_batch_write, 1, 3);
if (!respect_incremental_ || !incremental) {
batch_write_stream_id_[urgency] = id;
}
return id;
}
if (!priority_write_scheduler_.HasReadyStreams()) {
batch_write_stream_id_[urgency] = 0;
} else if (batch_write_stream_id_[urgency] != id) {
batch_write_stream_id_[urgency] = id;
bytes_left_for_batch_write_[urgency] = 16000;
}
return id;
}
void QuicWriteBlockedList::RegisterStream(QuicStreamId stream_id,
bool is_static_stream,
const QuicStreamPriority& priority) {
QUICHE_DCHECK(!priority_write_scheduler_.StreamRegistered(stream_id))
<< "stream " << stream_id << " already registered";
if (is_static_stream) {
static_stream_collection_.Register(stream_id);
return;
}
priority_write_scheduler_.RegisterStream(stream_id, priority.http());
}
void QuicWriteBlockedList::UnregisterStream(QuicStreamId stream_id) {
if (static_stream_collection_.Unregister(stream_id)) {
return;
}
priority_write_scheduler_.UnregisterStream(stream_id);
}
void QuicWriteBlockedList::UpdateStreamPriority(
QuicStreamId stream_id, const QuicStreamPriority& new_priority) {
QUICHE_DCHECK(!static_stream_collection_.IsRegistered(stream_id));
priority_write_scheduler_.UpdateStreamPriority(stream_id,
new_priority.http());
}
void QuicWriteBlockedList::UpdateBytesForStream(QuicStreamId stream_id,
size_t bytes) {
if (disable_batch_write_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_disable_batch_write, 2, 3);
return;
}
if (batch_write_stream_id_[last_priority_popped_] == stream_id) {
bytes_left_for_batch_write_[last_priority_popped_] -=
std::min(bytes_left_for_batch_write_[last_priority_popped_], bytes);
}
}
void QuicWriteBlockedList::AddStream(QuicStreamId stream_id) {
if (static_stream_collection_.SetBlocked(stream_id)) {
return;
}
if (respect_incremental_) {
QUIC_RELOADABLE_FLAG_COUNT(quic_priority_respect_incremental);
if (!priority_write_scheduler_.GetStreamPriority(stream_id).incremental) {
const bool push_front =
stream_id == batch_write_stream_id_[last_priority_popped_];
priority_write_scheduler_.MarkStreamReady(stream_id, push_front);
return;
}
}
if (disable_batch_write_) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_disable_batch_write, 3, 3);
priority_write_scheduler_.MarkStreamReady(stream_id,
false);
return;
}
const bool push_front =
stream_id == batch_write_stream_id_[last_priority_popped_] &&
bytes_left_for_batch_write_[last_priority_popped_] > 0;
priority_write_scheduler_.MarkStreamReady(stream_id, push_front);
}
bool QuicWriteBlockedList::IsStreamBlocked(QuicStreamId stream_id) const {
for (const auto& stream : static_stream_collection_) {
if (stream.id == stream_id) {
return stream.is_blocked;
}
}
return priority_write_scheduler_.IsStreamReady(stream_id);
}
void QuicWriteBlockedList::StaticStreamCollection::Register(QuicStreamId id) {
QUICHE_DCHECK(!IsRegistered(id));
streams_.push_back({id, false});
}
bool QuicWriteBlockedList::StaticStreamCollection::IsRegistered(
QuicStreamId id) const {
for (const auto& stream : streams_) {
if (stream.id == id) {
return true;
}
}
return false;
}
bool QuicWriteBlockedList::StaticStreamCollection::Unregister(QuicStreamId id) {
for (auto it = streams_.begin(); it != streams_.end(); ++it) {
if (it->id == id) {
if (it->is_blocked) {
--num_blocked_;
}
streams_.erase(it);
return true;
}
}
return false;
}
bool QuicWriteBlockedList::StaticStreamCollection::SetBlocked(QuicStreamId id) {
for (auto& stream : streams_) {
if (stream.id == id) {
if (!stream.is_blocked) {
stream.is_blocked = true;
++num_blocked_;
}
return true;
}
}
return false;
}
bool QuicWriteBlockedList::StaticStreamCollection::UnblockFirstBlocked(
QuicStreamId* id) {
for (auto& stream : streams_) {
if (stream.is_blocked) {
--num_blocked_;
stream.is_blocked = false;
*id = stream.id;
return true;
}
}
return false;
}
} | #include "quiche/quic/core/quic_write_blocked_list.h"
#include <optional>
#include <tuple>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
using spdy::kV3HighestPriority;
using spdy::kV3LowestPriority;
namespace quic {
namespace test {
namespace {
constexpr bool kStatic = true;
constexpr bool kNotStatic = false;
constexpr bool kIncremental = true;
constexpr bool kNotIncremental = false;
class QuicWriteBlockedListTest : public QuicTest {
protected:
void SetUp() override {
write_blocked_list_.emplace();
}
bool HasWriteBlockedDataStreams() const {
return write_blocked_list_->HasWriteBlockedDataStreams();
}
bool HasWriteBlockedSpecialStream() const {
return write_blocked_list_->HasWriteBlockedSpecialStream();
}
size_t NumBlockedSpecialStreams() const {
return write_blocked_list_->NumBlockedSpecialStreams();
}
size_t NumBlockedStreams() const {
return write_blocked_list_->NumBlockedStreams();
}
bool ShouldYield(QuicStreamId id) const {
return write_blocked_list_->ShouldYield(id);
}
QuicStreamPriority GetPriorityOfStream(QuicStreamId id) const {
return write_blocked_list_->GetPriorityOfStream(id);
}
QuicStreamId PopFront() { return write_blocked_list_->PopFront(); }
void RegisterStream(QuicStreamId stream_id, bool is_static_stream,
const HttpStreamPriority& priority) {
write_blocked_list_->RegisterStream(stream_id, is_static_stream,
QuicStreamPriority(priority));
}
void UnregisterStream(QuicStreamId stream_id) {
write_blocked_list_->UnregisterStream(stream_id);
}
void UpdateStreamPriority(QuicStreamId stream_id,
const HttpStreamPriority& new_priority) {
write_blocked_list_->UpdateStreamPriority(stream_id,
QuicStreamPriority(new_priority));
}
void UpdateBytesForStream(QuicStreamId stream_id, size_t bytes) {
write_blocked_list_->UpdateBytesForStream(stream_id, bytes);
}
void AddStream(QuicStreamId stream_id) {
write_blocked_list_->AddStream(stream_id);
}
bool IsStreamBlocked(QuicStreamId stream_id) const {
return write_blocked_list_->IsStreamBlocked(stream_id);
}
private:
std::optional<QuicWriteBlockedList> write_blocked_list_;
};
TEST_F(QuicWriteBlockedListTest, PriorityOrder) {
RegisterStream(40, kNotStatic, {kV3LowestPriority, kNotIncremental});
RegisterStream(23, kNotStatic, {kV3HighestPriority, kIncremental});
RegisterStream(17, kNotStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(1, kStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(3, kStatic, {kV3HighestPriority, kNotIncremental});
EXPECT_EQ(kV3LowestPriority, GetPriorityOfStream(40).http().urgency);
EXPECT_EQ(kNotIncremental, GetPriorityOfStream(40).http().incremental);
EXPECT_EQ(kV3HighestPriority, GetPriorityOfStream(23).http().urgency);
EXPECT_EQ(kIncremental, GetPriorityOfStream(23).http().incremental);
EXPECT_EQ(kV3HighestPriority, GetPriorityOfStream(17).http().urgency);
EXPECT_EQ(kNotIncremental, GetPriorityOfStream(17).http().incremental);
AddStream(40);
EXPECT_TRUE(IsStreamBlocked(40));
AddStream(23);
EXPECT_TRUE(IsStreamBlocked(23));
AddStream(17);
EXPECT_TRUE(IsStreamBlocked(17));
AddStream(3);
EXPECT_TRUE(IsStreamBlocked(3));
AddStream(1);
EXPECT_TRUE(IsStreamBlocked(1));
EXPECT_EQ(5u, NumBlockedStreams());
EXPECT_TRUE(HasWriteBlockedSpecialStream());
EXPECT_EQ(2u, NumBlockedSpecialStreams());
EXPECT_TRUE(HasWriteBlockedDataStreams());
EXPECT_EQ(1u, PopFront());
EXPECT_EQ(1u, NumBlockedSpecialStreams());
EXPECT_FALSE(IsStreamBlocked(1));
EXPECT_EQ(3u, PopFront());
EXPECT_EQ(0u, NumBlockedSpecialStreams());
EXPECT_FALSE(IsStreamBlocked(3));
EXPECT_EQ(23u, PopFront());
EXPECT_FALSE(IsStreamBlocked(23));
EXPECT_EQ(17u, PopFront());
EXPECT_FALSE(IsStreamBlocked(17));
EXPECT_EQ(40u, PopFront());
EXPECT_FALSE(IsStreamBlocked(40));
EXPECT_EQ(0u, NumBlockedStreams());
EXPECT_FALSE(HasWriteBlockedSpecialStream());
EXPECT_FALSE(HasWriteBlockedDataStreams());
}
TEST_F(QuicWriteBlockedListTest, SingleStaticStream) {
RegisterStream(5, kStatic, {kV3HighestPriority, kNotIncremental});
AddStream(5);
EXPECT_EQ(1u, NumBlockedStreams());
EXPECT_TRUE(HasWriteBlockedSpecialStream());
EXPECT_EQ(5u, PopFront());
EXPECT_EQ(0u, NumBlockedStreams());
EXPECT_FALSE(HasWriteBlockedSpecialStream());
}
TEST_F(QuicWriteBlockedListTest, StaticStreamsComeFirst) {
RegisterStream(5, kNotStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(3, kStatic, {kV3LowestPriority, kNotIncremental});
AddStream(5);
AddStream(3);
EXPECT_EQ(2u, NumBlockedStreams());
EXPECT_TRUE(HasWriteBlockedSpecialStream());
EXPECT_TRUE(HasWriteBlockedDataStreams());
EXPECT_EQ(3u, PopFront());
EXPECT_EQ(5u, PopFront());
EXPECT_EQ(0u, NumBlockedStreams());
EXPECT_FALSE(HasWriteBlockedSpecialStream());
EXPECT_FALSE(HasWriteBlockedDataStreams());
}
TEST_F(QuicWriteBlockedListTest, NoDuplicateEntries) {
const QuicStreamId kBlockedId = 5;
RegisterStream(kBlockedId, kNotStatic, {kV3HighestPriority, kNotIncremental});
AddStream(kBlockedId);
AddStream(kBlockedId);
AddStream(kBlockedId);
EXPECT_EQ(1u, NumBlockedStreams());
EXPECT_TRUE(HasWriteBlockedDataStreams());
EXPECT_EQ(kBlockedId, PopFront());
EXPECT_EQ(0u, NumBlockedStreams());
EXPECT_FALSE(HasWriteBlockedDataStreams());
}
TEST_F(QuicWriteBlockedListTest, IncrementalStreamsRoundRobin) {
const QuicStreamId id1 = 5;
const QuicStreamId id2 = 7;
const QuicStreamId id3 = 9;
RegisterStream(id1, kNotStatic, {kV3LowestPriority, kIncremental});
RegisterStream(id2, kNotStatic, {kV3LowestPriority, kIncremental});
RegisterStream(id3, kNotStatic, {kV3LowestPriority, kIncremental});
AddStream(id1);
AddStream(id2);
AddStream(id3);
EXPECT_EQ(id1, PopFront());
const size_t kLargeWriteSize = 1000 * 1000 * 1000;
UpdateBytesForStream(id1, kLargeWriteSize);
AddStream(id1);
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, kLargeWriteSize);
EXPECT_EQ(id3, PopFront());
UpdateBytesForStream(id3, kLargeWriteSize);
AddStream(id3);
AddStream(id2);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
EXPECT_EQ(id3, PopFront());
UpdateBytesForStream(id3, kLargeWriteSize);
AddStream(id3);
EXPECT_EQ(id2, PopFront());
EXPECT_EQ(id3, PopFront());
}
class QuicWriteBlockedListParameterizedTest
: public QuicWriteBlockedListTest,
public ::testing::WithParamInterface<std::tuple<bool, bool>> {
protected:
QuicWriteBlockedListParameterizedTest()
: priority_respect_incremental_(std::get<0>(GetParam())),
disable_batch_write_(std::get<1>(GetParam())) {
SetQuicReloadableFlag(quic_priority_respect_incremental,
priority_respect_incremental_);
SetQuicReloadableFlag(quic_disable_batch_write, disable_batch_write_);
}
const bool priority_respect_incremental_;
const bool disable_batch_write_;
};
INSTANTIATE_TEST_SUITE_P(
BatchWrite, QuicWriteBlockedListParameterizedTest,
::testing::Combine(::testing::Bool(), ::testing::Bool()),
[](const testing::TestParamInfo<
QuicWriteBlockedListParameterizedTest::ParamType>& info) {
return absl::StrCat(std::get<0>(info.param) ? "RespectIncrementalTrue"
: "RespectIncrementalFalse",
std::get<1>(info.param) ? "DisableBatchWriteTrue"
: "DisableBatchWriteFalse");
});
TEST_P(QuicWriteBlockedListParameterizedTest, BatchingWrites) {
if (disable_batch_write_) {
return;
}
const QuicStreamId id1 = 5;
const QuicStreamId id2 = 7;
const QuicStreamId id3 = 9;
RegisterStream(id1, kNotStatic, {kV3LowestPriority, kIncremental});
RegisterStream(id2, kNotStatic, {kV3LowestPriority, kIncremental});
RegisterStream(id3, kNotStatic, {kV3HighestPriority, kIncremental});
AddStream(id1);
AddStream(id2);
EXPECT_EQ(2u, NumBlockedStreams());
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, 15999);
AddStream(id1);
EXPECT_EQ(2u, NumBlockedStreams());
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, 1);
AddStream(id1);
EXPECT_EQ(2u, NumBlockedStreams());
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, 15999);
AddStream(id2);
EXPECT_EQ(2u, NumBlockedStreams());
AddStream(id3);
EXPECT_EQ(id3, PopFront());
UpdateBytesForStream(id3, 20000);
AddStream(id3);
EXPECT_EQ(id3, PopFront());
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, 1);
AddStream(id2);
EXPECT_EQ(2u, NumBlockedStreams());
EXPECT_EQ(id1, PopFront());
}
TEST_P(QuicWriteBlockedListParameterizedTest, RoundRobin) {
if (!disable_batch_write_) {
return;
}
const QuicStreamId id1 = 5;
const QuicStreamId id2 = 7;
const QuicStreamId id3 = 9;
RegisterStream(id1, kNotStatic, {kV3LowestPriority, kIncremental});
RegisterStream(id2, kNotStatic, {kV3LowestPriority, kIncremental});
RegisterStream(id3, kNotStatic, {kV3LowestPriority, kIncremental});
AddStream(id1);
AddStream(id2);
AddStream(id3);
EXPECT_EQ(id1, PopFront());
AddStream(id1);
EXPECT_EQ(id2, PopFront());
EXPECT_EQ(id3, PopFront());
AddStream(id3);
AddStream(id2);
EXPECT_EQ(id1, PopFront());
EXPECT_EQ(id3, PopFront());
AddStream(id3);
EXPECT_EQ(id2, PopFront());
EXPECT_EQ(id3, PopFront());
}
TEST_P(QuicWriteBlockedListParameterizedTest,
NonIncrementalStreamsKeepWriting) {
if (!priority_respect_incremental_) {
return;
}
const QuicStreamId id1 = 1;
const QuicStreamId id2 = 2;
const QuicStreamId id3 = 3;
const QuicStreamId id4 = 4;
RegisterStream(id1, kNotStatic, {kV3LowestPriority, kNotIncremental});
RegisterStream(id2, kNotStatic, {kV3LowestPriority, kNotIncremental});
RegisterStream(id3, kNotStatic, {kV3LowestPriority, kNotIncremental});
RegisterStream(id4, kNotStatic, {kV3HighestPriority, kNotIncremental});
AddStream(id1);
AddStream(id2);
AddStream(id3);
EXPECT_EQ(id1, PopFront());
const size_t kLargeWriteSize = 1000 * 1000 * 1000;
UpdateBytesForStream(id1, kLargeWriteSize);
AddStream(id1);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
AddStream(id1);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
AddStream(id1);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
AddStream(id1);
AddStream(id4);
EXPECT_EQ(id4, PopFront());
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, kLargeWriteSize);
AddStream(id2);
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, kLargeWriteSize);
AddStream(id2);
AddStream(id1);
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, kLargeWriteSize);
EXPECT_EQ(id3, PopFront());
UpdateBytesForStream(id2, kLargeWriteSize);
AddStream(id3);
EXPECT_EQ(id3, PopFront());
UpdateBytesForStream(id2, kLargeWriteSize);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
}
TEST_P(QuicWriteBlockedListParameterizedTest,
IncrementalAndNonIncrementalStreams) {
if (!priority_respect_incremental_) {
return;
}
const QuicStreamId id1 = 1;
const QuicStreamId id2 = 2;
RegisterStream(id1, kNotStatic, {kV3LowestPriority, kNotIncremental});
RegisterStream(id2, kNotStatic, {kV3LowestPriority, kIncremental});
AddStream(id1);
AddStream(id2);
EXPECT_EQ(id1, PopFront());
const size_t kSmallWriteSize = 1000;
UpdateBytesForStream(id1, kSmallWriteSize);
AddStream(id1);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kSmallWriteSize);
AddStream(id1);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kSmallWriteSize);
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, kSmallWriteSize);
AddStream(id2);
AddStream(id1);
if (!disable_batch_write_) {
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, kSmallWriteSize);
AddStream(id2);
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, kSmallWriteSize);
}
EXPECT_EQ(id1, PopFront());
const size_t kLargeWriteSize = 1000 * 1000 * 1000;
UpdateBytesForStream(id1, kLargeWriteSize);
AddStream(id1);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
AddStream(id1);
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
AddStream(id2);
AddStream(id1);
if (!disable_batch_write_) {
EXPECT_EQ(id2, PopFront());
UpdateBytesForStream(id2, kLargeWriteSize);
AddStream(id2);
}
EXPECT_EQ(id1, PopFront());
UpdateBytesForStream(id1, kLargeWriteSize);
}
TEST_F(QuicWriteBlockedListTest, Ceding) {
RegisterStream(15, kNotStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(16, kNotStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(5, kNotStatic, {5, kNotIncremental});
RegisterStream(4, kNotStatic, {5, kNotIncremental});
RegisterStream(7, kNotStatic, {7, kNotIncremental});
RegisterStream(1, kStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(3, kStatic, {kV3HighestPriority, kNotIncremental});
EXPECT_FALSE(ShouldYield(5));
AddStream(5);
EXPECT_FALSE(ShouldYield(5));
EXPECT_TRUE(ShouldYield(4));
EXPECT_TRUE(ShouldYield(7));
EXPECT_FALSE(ShouldYield(15));
EXPECT_FALSE(ShouldYield(3));
EXPECT_FALSE(ShouldYield(1));
AddStream(15);
EXPECT_TRUE(ShouldYield(16));
EXPECT_FALSE(ShouldYield(3));
EXPECT_FALSE(ShouldYield(1));
AddStream(3);
EXPECT_TRUE(ShouldYield(16));
EXPECT_TRUE(ShouldYield(15));
EXPECT_FALSE(ShouldYield(3));
EXPECT_FALSE(ShouldYield(1));
AddStream(1);
EXPECT_TRUE(ShouldYield(16));
EXPECT_TRUE(ShouldYield(15));
EXPECT_TRUE(ShouldYield(3));
EXPECT_FALSE(ShouldYield(1));
}
TEST_F(QuicWriteBlockedListTest, UnregisterStream) {
RegisterStream(40, kNotStatic, {kV3LowestPriority, kNotIncremental});
RegisterStream(23, kNotStatic, {6, kNotIncremental});
RegisterStream(12, kNotStatic, {3, kNotIncremental});
RegisterStream(17, kNotStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(1, kStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(3, kStatic, {kV3HighestPriority, kNotIncremental});
AddStream(40);
AddStream(23);
AddStream(12);
AddStream(17);
AddStream(1);
AddStream(3);
UnregisterStream(23);
UnregisterStream(1);
EXPECT_EQ(3u, PopFront());
EXPECT_EQ(17u, PopFront());
EXPECT_EQ(12u, PopFront());
EXPECT_EQ(40, PopFront());
}
TEST_F(QuicWriteBlockedListTest, UnregisterNotRegisteredStream) {
EXPECT_QUICHE_BUG(UnregisterStream(1), "Stream 1 not registered");
RegisterStream(2, kNotStatic, {kV3HighestPriority, kIncremental});
UnregisterStream(2);
EXPECT_QUICHE_BUG(UnregisterStream(2), "Stream 2 not registered");
}
TEST_F(QuicWriteBlockedListTest, UpdateStreamPriority) {
RegisterStream(40, kNotStatic, {kV3LowestPriority, kNotIncremental});
RegisterStream(23, kNotStatic, {6, kIncremental});
RegisterStream(17, kNotStatic, {kV3HighestPriority, kNotIncremental});
RegisterStream(1, kStatic, {2, kNotIncremental});
RegisterStream(3, kStatic, {kV3HighestPriority, kNotIncremental});
EXPECT_EQ(kV3LowestPriority, GetPriorityOfStream(40).http().urgency);
EXPECT_EQ(kNotIncremental, GetPriorityOfStream(40).http().incremental);
EXPECT_EQ(6, GetPriorityOfStream(23).http().urgency);
EXPECT_EQ(kIncremental, GetPriorityOfStream(23).http().incremental);
EXPECT_EQ(kV3HighestPriority, GetPriorityOfStream(17).http().urgency);
EXPECT_EQ(kNotIncremental, GetPriorityOfStream(17).http().incremental);
UpdateStreamPriority(40, {3, kIncremental});
UpdateStreamPriority(23, {kV3HighestPriority, kNotIncremental});
UpdateStreamPriority(17, {5, kNotIncremental});
EXPECT_EQ(3, GetPriorityOfStream(40).http().urgency);
EXPECT_EQ(kIncremental, GetPriorityOfStream(40).http().incremental);
EXPECT_EQ(kV3HighestPriority, GetPriorityOfStream(23).http().urgency);
EXPECT_EQ(kNotIncremental, GetPriorityOfStream(23).http().incremental);
EXPECT_EQ(5, GetPriorityOfStream(17).http().urgency);
EXPECT_EQ(kNotIncremental, GetPriorityOfStream(17).http().incremental);
AddStream(40);
AddStream(23);
AddStream(17);
AddStream(1);
AddStream(3);
EXPECT_EQ(1u, PopFront());
EXPECT_EQ(3u, PopFront());
EXPECT_EQ(23u, PopFront());
EXPECT_EQ(40u, PopFront());
EXPECT_EQ(17u, PopFront());
}
TEST_F(QuicWriteBlockedListTest, UpdateStaticStreamPriority) {
RegisterStream(2, kStatic, {kV3LowestPriority, kNotIncremental});
EXPECT_QUICHE_DEBUG_DEATH(
UpdateStreamPriority(2, {kV3HighestPriority, kNotIncremental}),
"IsRegistered");
}
TEST_F(QuicWriteBlockedListTest, UpdateStreamPrioritySameUrgency) {
RegisterStream(1, kNotStatic, {6, kNotIncremental});
RegisterStream(2, kNotStatic, {6, kNotIncremental});
AddStream(1);
AddStream(2);
EXPECT_EQ(1u, PopFront());
EXPECT_EQ(2u, PopFront());
RegisterStream(3, kNotStatic, {6, kNotIncremental});
RegisterStream(4, kNotStatic, {6, kNotIncremental});
EXPECT_EQ(6, GetPriorityOfStream(3).http().urgency);
EXPECT_EQ(kNotIncremental, GetPriorityOfStream(3).http().incremental);
UpdateStreamPriority(3, {6, kIncremental});
EXPECT_EQ(6, GetPriorityOfStream(3).http().urgency);
EXPECT_EQ(kIncremental, GetPriorityOfStream(3).http().incremental);
AddStream(3);
AddStream(4);
EXPECT_EQ(3u, PopFront());
EXPECT_EQ(4u, PopFront());
RegisterStream(5, kNotStatic, {6, kIncremental});
RegisterStream(6, kNotStatic, {6, kIncremental});
EXPECT_EQ(6, GetPriorityOfStream(6).http().urgency);
EXPECT_EQ(kIncremental, GetPriorityOfStream(6).http().incremental);
UpdateStreamPriority(6, {6, kNotIncremental});
EXPECT_EQ(6, GetPriorityOfStream(6).http().urgency);
EXPECT_EQ(kNotIncremental, GetPriorityOfStream(6).http().incremental);
AddStream(5);
AddStream(6);
EXPECT_EQ(5u, PopFront());
EXPECT_EQ(6u, PopFront());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_write_blocked_list.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_write_blocked_list_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
5a602f40-4697-4fe5-b527-76ff68a9646e | cpp | google/quiche | web_transport_priority_scheduler | quiche/web_transport/web_transport_priority_scheduler.cc | quiche/web_transport/web_transport_priority_scheduler_test.cc | #include "quiche/web_transport/web_transport_priority_scheduler.h"
#include <optional>
#include <utility>
#include "absl/cleanup/cleanup.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "quiche/common/quiche_status_utils.h"
#include "quiche/web_transport/web_transport.h"
namespace webtransport {
absl::Status PriorityScheduler::Register(StreamId stream_id,
const StreamPriority& priority) {
auto [it, success] = stream_to_group_map_.insert({stream_id, nullptr});
if (!success) {
return absl::AlreadyExistsError("Provided stream ID already registered");
}
auto cleanup_nullptr_map_entry =
absl::MakeCleanup([&] { stream_to_group_map_.erase(stream_id); });
auto [scheduler_it, scheduler_created] =
per_group_schedulers_.try_emplace(priority.send_group_id);
if (scheduler_created) {
QUICHE_RETURN_IF_ERROR(active_groups_.Register(priority.send_group_id, {}));
}
PerGroupScheduler& scheduler = scheduler_it->second;
QUICHE_RETURN_IF_ERROR(scheduler.Register(stream_id, priority.send_order));
it->second = &*scheduler_it;
std::move(cleanup_nullptr_map_entry).Cancel();
return absl::OkStatus();
}
absl::Status PriorityScheduler::Unregister(StreamId stream_id) {
auto it = stream_to_group_map_.find(stream_id);
if (it == stream_to_group_map_.end()) {
return absl::NotFoundError("Stream ID not registered");
}
SendGroupId group_id = it->second->first;
PerGroupScheduler* group_scheduler = &it->second->second;
stream_to_group_map_.erase(it);
QUICHE_RETURN_IF_ERROR(group_scheduler->Unregister(stream_id));
if (!group_scheduler->HasRegistered()) {
per_group_schedulers_.erase(group_id);
QUICHE_RETURN_IF_ERROR(active_groups_.Unregister(group_id));
}
return absl::OkStatus();
}
absl::Status PriorityScheduler::UpdateSendOrder(StreamId stream_id,
SendOrder new_send_order) {
PerGroupScheduler* scheduler = SchedulerForStream(stream_id);
if (scheduler == nullptr) {
return absl::NotFoundError("Stream ID not registered");
}
return scheduler->UpdatePriority(stream_id, new_send_order);
}
absl::Status PriorityScheduler::UpdateSendGroup(StreamId stream_id,
SendGroupId new_send_group) {
PerGroupScheduler* scheduler = SchedulerForStream(stream_id);
if (scheduler == nullptr) {
return absl::NotFoundError("Stream ID not registered");
}
bool is_scheduled = scheduler->IsScheduled(stream_id);
std::optional<SendOrder> send_order = scheduler->GetPriorityFor(stream_id);
if (!send_order.has_value()) {
return absl::InternalError(
"Stream registered at the top level scheduler, but not at the "
"per-group one");
}
QUICHE_RETURN_IF_ERROR(Unregister(stream_id));
QUICHE_RETURN_IF_ERROR(
Register(stream_id, StreamPriority{new_send_group, *send_order}));
if (is_scheduled) {
QUICHE_RETURN_IF_ERROR(Schedule(stream_id));
}
return absl::OkStatus();
}
std::optional<StreamPriority> PriorityScheduler::GetPriorityFor(
StreamId stream_id) const {
auto it = stream_to_group_map_.find(stream_id);
if (it == stream_to_group_map_.end()) {
return std::nullopt;
}
const auto& [group_id, group_scheduler] = *it->second;
std::optional<SendOrder> send_order =
group_scheduler.GetPriorityFor(stream_id);
if (!send_order.has_value()) {
return std::nullopt;
}
return StreamPriority{group_id, *send_order};
}
absl::StatusOr<bool> PriorityScheduler::ShouldYield(StreamId stream_id) const {
auto it = stream_to_group_map_.find(stream_id);
if (it == stream_to_group_map_.end()) {
return absl::NotFoundError("Stream ID not registered");
}
const auto& [group_id, group_scheduler] = *it->second;
absl::StatusOr<bool> per_group_result = active_groups_.ShouldYield(group_id);
QUICHE_RETURN_IF_ERROR(per_group_result.status());
if (*per_group_result) {
return true;
}
return group_scheduler.ShouldYield(stream_id);
}
absl::StatusOr<StreamId> PriorityScheduler::PopFront() {
absl::StatusOr<SendGroupId> group_id = active_groups_.PopFront();
QUICHE_RETURN_IF_ERROR(group_id.status());
auto it = per_group_schedulers_.find(*group_id);
if (it == per_group_schedulers_.end()) {
return absl::InternalError(
"Scheduled a group with no per-group scheduler attached");
}
PerGroupScheduler& scheduler = it->second;
absl::StatusOr<StreamId> result = scheduler.PopFront();
if (!result.ok()) {
return absl::InternalError("Inactive group found in top-level schedule");
}
if (scheduler.HasScheduled()) {
QUICHE_RETURN_IF_ERROR(active_groups_.Schedule(*group_id));
}
return result;
}
absl::Status PriorityScheduler::Schedule(StreamId stream_id) {
auto it = stream_to_group_map_.find(stream_id);
if (it == stream_to_group_map_.end()) {
return absl::NotFoundError("Stream ID not registered");
}
auto& [group_id, group_scheduler] = *it->second;
QUICHE_RETURN_IF_ERROR(active_groups_.Schedule(group_id));
return group_scheduler.Schedule(stream_id);
}
bool PriorityScheduler::IsScheduled(StreamId stream_id) const {
const PerGroupScheduler* scheduler = SchedulerForStream(stream_id);
if (scheduler == nullptr) {
return false;
}
return scheduler->IsScheduled(stream_id);
}
} | #include "quiche/web_transport/web_transport_priority_scheduler.h"
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
#include "quiche/web_transport/web_transport.h"
namespace webtransport {
namespace {
using ::quiche::test::IsOkAndHolds;
using ::quiche::test::StatusIs;
using ::testing::ElementsAre;
void ScheduleIds(PriorityScheduler& scheduler, absl::Span<const StreamId> ids) {
for (StreamId id : ids) {
QUICHE_EXPECT_OK(scheduler.Schedule(id));
}
}
std::vector<StreamId> PopAll(PriorityScheduler& scheduler) {
std::vector<StreamId> result;
result.reserve(scheduler.NumScheduled());
for (;;) {
absl::StatusOr<StreamId> id = scheduler.PopFront();
if (!id.ok()) {
EXPECT_THAT(id, StatusIs(absl::StatusCode::kNotFound));
break;
}
result.push_back(*id);
}
return result;
}
TEST(WebTransportSchedulerTest, Register) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(2, StreamPriority{1, 0}));
QUICHE_EXPECT_OK(scheduler.Register(3, StreamPriority{1, 0}));
QUICHE_EXPECT_OK(scheduler.Register(4, StreamPriority{0, 0}));
EXPECT_THAT(scheduler.Register(4, StreamPriority{0, 0}),
StatusIs(absl::StatusCode::kAlreadyExists));
EXPECT_THAT(scheduler.Register(4, StreamPriority{1, 0}),
StatusIs(absl::StatusCode::kAlreadyExists));
}
TEST(WebTransportSchedulerTest, Unregister) {
PriorityScheduler scheduler;
EXPECT_FALSE(scheduler.HasRegistered());
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
EXPECT_TRUE(scheduler.HasRegistered());
QUICHE_EXPECT_OK(scheduler.Unregister(1));
EXPECT_TRUE(scheduler.HasRegistered());
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
ScheduleIds(scheduler, {0, 1});
QUICHE_EXPECT_OK(scheduler.Unregister(0));
QUICHE_EXPECT_OK(scheduler.Unregister(1));
EXPECT_FALSE(scheduler.HasRegistered());
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
EXPECT_TRUE(scheduler.HasRegistered());
EXPECT_FALSE(scheduler.HasScheduled());
}
TEST(WebTransportSchedulerTest, UpdatePriority) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 10}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 20}));
EXPECT_EQ(scheduler.GetPriorityFor(0), (StreamPriority{0, 10}));
EXPECT_EQ(scheduler.GetPriorityFor(1), (StreamPriority{0, 20}));
QUICHE_EXPECT_OK(scheduler.UpdateSendGroup(0, 1));
QUICHE_EXPECT_OK(scheduler.UpdateSendOrder(1, 40));
EXPECT_EQ(scheduler.GetPriorityFor(0), (StreamPriority{1, 10}));
EXPECT_EQ(scheduler.GetPriorityFor(1), (StreamPriority{0, 40}));
EXPECT_THAT(scheduler.UpdateSendGroup(1000, 1),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_THAT(scheduler.UpdateSendOrder(1000, 1),
StatusIs(absl::StatusCode::kNotFound));
EXPECT_EQ(scheduler.GetPriorityFor(1000), std::nullopt);
}
TEST(WebTransportSchedulerTest, Schedule) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
EXPECT_FALSE(scheduler.IsScheduled(0));
EXPECT_FALSE(scheduler.IsScheduled(1));
EXPECT_FALSE(scheduler.IsScheduled(1000));
QUICHE_EXPECT_OK(scheduler.Schedule(0));
EXPECT_TRUE(scheduler.IsScheduled(0));
EXPECT_FALSE(scheduler.IsScheduled(1));
QUICHE_EXPECT_OK(scheduler.Schedule(1));
EXPECT_TRUE(scheduler.IsScheduled(0));
EXPECT_TRUE(scheduler.IsScheduled(1));
EXPECT_THAT(scheduler.Schedule(0), StatusIs(absl::StatusCode::kOk));
EXPECT_THAT(scheduler.Schedule(2), StatusIs(absl::StatusCode::kNotFound));
}
TEST(WebTransportSchedulerTest, SamePriority) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(2, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(3, StreamPriority{0, 0}));
ScheduleIds(scheduler, {0, 1, 2, 3});
EXPECT_EQ(scheduler.NumScheduled(), 4);
EXPECT_THAT(PopAll(scheduler), ElementsAre(0, 1, 2, 3));
ScheduleIds(scheduler, {3, 1, 2});
EXPECT_THAT(PopAll(scheduler), ElementsAre(3, 1, 2));
}
TEST(WebTransportSchedulerTest, SingleBucketOrdered) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 1}));
QUICHE_EXPECT_OK(scheduler.Register(2, StreamPriority{0, 2}));
QUICHE_EXPECT_OK(scheduler.Register(3, StreamPriority{0, 3}));
ScheduleIds(scheduler, {0, 1, 2, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(3, 2, 1, 0));
ScheduleIds(scheduler, {3, 1, 2, 0});
EXPECT_THAT(PopAll(scheduler), ElementsAre(3, 2, 1, 0));
}
TEST(WebTransportSchedulerTest, EveryStreamInItsOwnBucket) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{1, 1}));
QUICHE_EXPECT_OK(scheduler.Register(2, StreamPriority{2, 2}));
QUICHE_EXPECT_OK(scheduler.Register(3, StreamPriority{3, 3}));
ScheduleIds(scheduler, {0, 1, 2, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(0, 1, 2, 3));
ScheduleIds(scheduler, {3, 1, 2});
EXPECT_THAT(PopAll(scheduler), ElementsAre(3, 1, 2));
}
TEST(WebTransportSchedulerTest, TwoBucketsNoSendOrder) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(2, StreamPriority{1, 0}));
QUICHE_EXPECT_OK(scheduler.Register(3, StreamPriority{1, 0}));
ScheduleIds(scheduler, {0, 1, 2, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(0, 2, 1, 3));
ScheduleIds(scheduler, {0, 2, 1, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(0, 2, 1, 3));
ScheduleIds(scheduler, {3, 2, 1, 0});
EXPECT_THAT(PopAll(scheduler), ElementsAre(3, 1, 2, 0));
ScheduleIds(scheduler, {0, 2});
EXPECT_THAT(scheduler.PopFront(), IsOkAndHolds(0));
ScheduleIds(scheduler, {1, 3, 0});
EXPECT_THAT(PopAll(scheduler), ElementsAre(2, 1, 3, 0));
}
TEST(WebTransportSchedulerTest, TwoBucketsWithSendOrder) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 10}));
QUICHE_EXPECT_OK(scheduler.Register(2, StreamPriority{1, 20}));
QUICHE_EXPECT_OK(scheduler.Register(3, StreamPriority{1, 30}));
ScheduleIds(scheduler, {0, 1, 2, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(1, 3, 0, 2));
ScheduleIds(scheduler, {3, 2, 1, 0});
EXPECT_THAT(PopAll(scheduler), ElementsAre(3, 1, 2, 0));
}
TEST(WebTransportSchedulerTest, ShouldYield) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(2, StreamPriority{0, 10}));
QUICHE_EXPECT_OK(scheduler.Register(3, StreamPriority{1, 0}));
EXPECT_THAT(scheduler.ShouldYield(0), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(1), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(2), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(3), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(4), StatusIs(absl::StatusCode::kNotFound));
QUICHE_EXPECT_OK(scheduler.Schedule(0));
EXPECT_THAT(scheduler.ShouldYield(0), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(1), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(2), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(3), IsOkAndHolds(true));
PopAll(scheduler);
QUICHE_EXPECT_OK(scheduler.Schedule(2));
EXPECT_THAT(scheduler.ShouldYield(0), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(1), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(2), IsOkAndHolds(false));
EXPECT_THAT(scheduler.ShouldYield(3), IsOkAndHolds(true));
PopAll(scheduler);
QUICHE_EXPECT_OK(scheduler.Schedule(3));
EXPECT_THAT(scheduler.ShouldYield(0), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(1), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(2), IsOkAndHolds(true));
EXPECT_THAT(scheduler.ShouldYield(3), IsOkAndHolds(false));
PopAll(scheduler);
}
TEST(WebTransportSchedulerTest, UpdatePriorityWhileScheduled) {
PriorityScheduler scheduler;
QUICHE_EXPECT_OK(scheduler.Register(0, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(1, StreamPriority{0, 0}));
QUICHE_EXPECT_OK(scheduler.Register(2, StreamPriority{1, 0}));
QUICHE_EXPECT_OK(scheduler.Register(3, StreamPriority{1, 0}));
ScheduleIds(scheduler, {0, 1, 2, 3});
EXPECT_THAT(PopAll(scheduler), ElementsAre(0, 2, 1, 3));
ScheduleIds(scheduler, {0, 1, 2, 3});
QUICHE_EXPECT_OK(scheduler.UpdateSendOrder(1, 10));
EXPECT_THAT(PopAll(scheduler), ElementsAre(1, 2, 0, 3));
ScheduleIds(scheduler, {0, 1, 2, 3});
QUICHE_EXPECT_OK(scheduler.UpdateSendGroup(1, 1));
EXPECT_THAT(PopAll(scheduler), ElementsAre(0, 1, 2, 3));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/web_transport/web_transport_priority_scheduler.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/web_transport/web_transport_priority_scheduler_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
90e26224-4975-4dca-8430-82008ab8aee2 | cpp | google/libphonenumber | area_code_map | cpp/src/phonenumbers/geocoding/area_code_map.cc | cpp/test/phonenumbers/geocoding/area_code_map_test.cc | #include "phonenumbers/geocoding/area_code_map.h"
#include <cstddef>
#include "phonenumbers/geocoding/default_map_storage.h"
#include "phonenumbers/phonenumber.pb.h"
#include "phonenumbers/phonenumberutil.h"
#include "phonenumbers/stringutil.h"
namespace i18n {
namespace phonenumbers {
AreaCodeMap::AreaCodeMap()
: phone_util_(*PhoneNumberUtil::GetInstance()) {
}
AreaCodeMap::~AreaCodeMap() {
}
void AreaCodeMap::ReadAreaCodeMap(const PrefixDescriptions* descriptions) {
DefaultMapStorage* storage = new DefaultMapStorage();
storage->ReadFromMap(descriptions);
storage_.reset(storage);
}
const char* AreaCodeMap::Lookup(const PhoneNumber& number) const {
const int entries = storage_->GetNumOfEntries();
if (!entries) {
return NULL;
}
string national_number;
phone_util_.GetNationalSignificantNumber(number, &national_number);
int64 phone_prefix;
safe_strto64(SimpleItoa(number.country_code()) + national_number,
&phone_prefix);
const int* const lengths = storage_->GetPossibleLengths();
const int lengths_size = storage_->GetPossibleLengthsSize();
int current_index = entries - 1;
for (int lengths_index = lengths_size - 1; lengths_index >= 0;
--lengths_index) {
const int possible_length = lengths[lengths_index];
string phone_prefix_str = SimpleItoa(phone_prefix);
if (static_cast<int>(phone_prefix_str.length()) > possible_length) {
safe_strto64(phone_prefix_str.substr(0, possible_length), &phone_prefix);
}
current_index = BinarySearch(0, current_index, phone_prefix);
if (current_index < 0) {
return NULL;
}
const int32 current_prefix = storage_->GetPrefix(current_index);
if (phone_prefix == current_prefix) {
return storage_->GetDescription(current_index);
}
}
return NULL;
}
int AreaCodeMap::BinarySearch(int start, int end, int64 value) const {
int current = 0;
while (start <= end) {
current = (start + end) / 2;
int32 current_value = storage_->GetPrefix(current);
if (current_value == value) {
return current;
} else if (current_value > value) {
--current;
end = current;
} else {
start = current + 1;
}
}
return current;
}
}
} | #include "phonenumbers/geocoding/area_code_map.h"
#include <cstddef>
#include <vector>
#include <gtest/gtest.h>
#include "phonenumbers/geocoding/geocoding_data.h"
#include "phonenumbers/phonenumber.pb.h"
namespace i18n {
namespace phonenumbers {
namespace {
void MakeCodeMap(const PrefixDescriptions* descriptions,
scoped_ptr<AreaCodeMap>* code_map) {
scoped_ptr<AreaCodeMap> cm(new AreaCodeMap());
cm->ReadAreaCodeMap(descriptions);
code_map->swap(cm);
}
const int32 prefix_1_us_prefixes[] = {
1212,
1480,
1650,
1907,
1201664,
1480893,
1501372,
1626308,
1650345,
1867993,
1972480,
};
const char* prefix_1_us_descriptions[] = {
"New York",
"Arizona",
"California",
"Alaska",
"Westwood, NJ",
"Phoenix, AZ",
"Little Rock, AR",
"Alhambra, CA",
"San Mateo, CA",
"Dawson, YT",
"Richardson, TX",
};
const int32 prefix_1_us_lengths[] = {
4, 7,
};
const PrefixDescriptions prefix_1_us = {
prefix_1_us_prefixes,
sizeof(prefix_1_us_prefixes) / sizeof(*prefix_1_us_prefixes),
prefix_1_us_descriptions,
prefix_1_us_lengths,
sizeof(prefix_1_us_lengths) / sizeof(*prefix_1_us_lengths),
};
const int32 prefix_39_it_prefixes[] = {
3902,
3906,
39010,
390131,
390321,
390975,
};
const char* prefix_39_it_descriptions[] = {
"Milan",
"Rome",
"Genoa",
"Alessandria",
"Novara",
"Potenza",
};
const int32 prefix_39_it_lengths[] = {
4, 5, 6,
};
const PrefixDescriptions prefix_39_it = {
prefix_39_it_prefixes,
sizeof(prefix_39_it_prefixes) / sizeof(*prefix_39_it_prefixes),
prefix_39_it_descriptions,
prefix_39_it_lengths,
sizeof(prefix_39_it_lengths) / sizeof(*prefix_1_us_lengths),
};
void MakeCodeMapUS(scoped_ptr<AreaCodeMap>* code_map) {
MakeCodeMap(&prefix_1_us, code_map);
}
void MakeCodeMapIT(scoped_ptr<AreaCodeMap>* code_map) {
MakeCodeMap(&prefix_39_it, code_map);
}
PhoneNumber MakePhoneNumber(int32 country_code, uint64 national_number) {
PhoneNumber number;
number.set_country_code(country_code);
number.set_national_number(national_number);
return number;
}
}
class AreaCodeMapTest : public testing::Test {
protected:
virtual void SetUp() {
MakeCodeMapUS(&map_US_);
MakeCodeMapIT(&map_IT_);
}
scoped_ptr<AreaCodeMap> map_US_;
scoped_ptr<AreaCodeMap> map_IT_;
};
TEST_F(AreaCodeMapTest, TestLookupInvalidNumberUS) {
EXPECT_STREQ("New York", map_US_->Lookup(MakePhoneNumber(1, 2121234567L)));
}
TEST_F(AreaCodeMapTest, TestLookupNumberNJ) {
EXPECT_STREQ("Westwood, NJ",
map_US_->Lookup(MakePhoneNumber(1, 2016641234L)));
}
TEST_F(AreaCodeMapTest, TestLookupNumberNY) {
EXPECT_STREQ("New York", map_US_->Lookup(MakePhoneNumber(1, 2126641234L)));
}
TEST_F(AreaCodeMapTest, TestLookupNumberCA1) {
EXPECT_STREQ("San Mateo, CA",
map_US_->Lookup(MakePhoneNumber(1, 6503451234LL)));
}
TEST_F(AreaCodeMapTest, TestLookupNumberCA2) {
EXPECT_STREQ("California", map_US_->Lookup(MakePhoneNumber(1, 6502531234LL)));
}
TEST_F(AreaCodeMapTest, TestLookupNumberTX) {
EXPECT_STREQ("Richardson, TX",
map_US_->Lookup(MakePhoneNumber(1, 9724801234LL)));
}
TEST_F(AreaCodeMapTest, TestLookupNumberNotFoundTX) {
EXPECT_STREQ(NULL, map_US_->Lookup(MakePhoneNumber(1, 9724811234LL)));
}
TEST_F(AreaCodeMapTest, TestLookupNumberCH) {
EXPECT_STREQ(NULL, map_US_->Lookup(MakePhoneNumber(41, 446681300L)));
}
TEST_F(AreaCodeMapTest, TestLookupNumberIT) {
PhoneNumber number = MakePhoneNumber(39, 212345678L);
number.set_italian_leading_zero(true);
EXPECT_STREQ("Milan", map_IT_->Lookup(number));
number.set_national_number(612345678L);
EXPECT_STREQ("Rome", map_IT_->Lookup(number));
number.set_national_number(3211234L);
EXPECT_STREQ("Novara", map_IT_->Lookup(number));
number.set_national_number(321123456L);
number.set_italian_leading_zero(false);
EXPECT_STREQ(NULL, map_IT_->Lookup(number));
number.set_national_number(321123L);
number.set_italian_leading_zero(true);
EXPECT_STREQ("Novara", map_IT_->Lookup(number));
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/geocoding/area_code_map.cc | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/geocoding/area_code_map_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
4c628d76-0061-41ef-ac7a-3ab387fdc529 | cpp | tensorflow/tensorflow | benchmark_tflite_model | tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc | tensorflow/lite/tools/benchmark/benchmark_tflite_model_test.cc | #include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include <algorithm>
#include <cstdarg>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <functional>
#include <iostream>
#include <memory>
#include <random>
#include <sstream>
#include <string>
#include <string_view>
#include <unordered_set>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "ruy/profiler/profiler.h"
#include "tensorflow/core/example/example.pb.h"
#include "tensorflow/core/example/feature.pb.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/register.h"
#include "tensorflow/lite/core/model.h"
#include "tensorflow/lite/core/model_builder.h"
#include "tensorflow/lite/core/signature_runner.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/op_resolver.h"
#include "tensorflow/lite/optional_debug_tools.h"
#include "tensorflow/lite/profiling/model_runtime_info.h"
#include "tensorflow/lite/profiling/profile_summary_formatter.h"
#include "tensorflow/lite/string_util.h"
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
#include "tensorflow/lite/tools/benchmark/benchmark_utils.h"
#include "tensorflow/lite/tools/benchmark/profiling_listener.h"
#include "tensorflow/lite/tools/delegates/delegate_provider.h"
#include "tensorflow/lite/tools/logging.h"
#include "tensorflow/lite/tools/model_loader.h"
#include "tensorflow/lite/tools/utils.h"
void RegisterSelectedOps(::tflite::MutableOpResolver* resolver);
void ABSL_ATTRIBUTE_WEAK
RegisterSelectedOps(::tflite::MutableOpResolver* resolver) {}
namespace tflite {
namespace benchmark {
namespace {
using utils::InputTensorData;
using utils::VoidUniquePtr;
#if defined(TFLITE_PROFILING_ENABLED)
constexpr bool kOpProfilingEnabledDefault = true;
#else
constexpr bool kOpProfilingEnabledDefault = false;
#endif
constexpr char kOpProfilingOutputModeStdout[] = "stdout";
constexpr char kOpProfilingOutputModeCsv[] = "csv";
constexpr char kOpProfilingOutputModeProto[] = "proto";
const char* kOpProfilingOutputModes[] = {kOpProfilingOutputModeStdout,
kOpProfilingOutputModeCsv,
kOpProfilingOutputModeProto};
TfLiteStatus MaybeSetFeatureValuesFromTensor(const TfLiteTensor& tensor,
tensorflow::Example& example) {
if (tensor.dims == nullptr) {
return kTfLiteError;
}
int total_elements = 1;
for (int i = 0; i < tensor.dims->size; i++) {
total_elements *= tensor.dims->data[i];
}
tensorflow::Feature& feature =
(*example.mutable_features()->mutable_feature())[tensor.name];
switch (tensor.type) {
case kTfLiteFloat32:
case kTfLiteFloat64:
feature.mutable_float_list()->mutable_value()->Resize(total_elements, 0);
return utils::TfLiteTensorToFloat32Array(
tensor,
absl::MakeSpan(
feature.mutable_float_list()->mutable_value()->mutable_data(),
feature.float_list().value_size()));
case kTfLiteUInt8:
case kTfLiteInt8:
case kTfLiteUInt16:
case kTfLiteInt16:
case kTfLiteInt32:
case kTfLiteUInt32:
case kTfLiteUInt64:
case kTfLiteInt64:
feature.mutable_int64_list()->mutable_value()->Resize(total_elements, 0);
return utils::TfLiteTensorToInt64Array(
tensor,
absl::MakeSpan(
feature.mutable_int64_list()->mutable_value()->mutable_data(),
feature.int64_list().value_size()));
default:
return kTfLiteError;
}
}
class RuyProfileListener : public BenchmarkListener {
public:
void OnBenchmarkStart(const BenchmarkParams& params) override;
void OnBenchmarkEnd(const BenchmarkResults& results) override;
private:
std::unique_ptr<ruy::profiler::ScopeProfile> ruy_profile_;
};
void RuyProfileListener::OnBenchmarkStart(const BenchmarkParams& params) {
ruy_profile_ = std::make_unique<ruy::profiler::ScopeProfile>();
}
void RuyProfileListener::OnBenchmarkEnd(const BenchmarkResults& results) {
ruy_profile_ = nullptr;
}
class InterpreterStatePrinter : public BenchmarkListener {
public:
explicit InterpreterStatePrinter(Interpreter* interpreter)
: interpreter_(interpreter) {}
void OnBenchmarkStart(const BenchmarkParams& params) override {
params_ = ¶ms;
if (params_->Get<bool>("print_preinvoke_state")) {
TFLITE_LOG(INFO) << "\n====Printing out TfLite interpreter pre-invoke "
"state begins====";
tflite::PrintInterpreterState(
interpreter_, params_->Get<int32_t>("tensor_name_display_length"),
params_->Get<int32_t>("tensor_type_display_length"),
params_->Get<int32_t>("alloc_type_display_length"));
TFLITE_LOG(INFO) << "====Printing out TfLite interpreter pre-invoke "
"state ends====\n";
}
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
if (params_->Get<bool>("print_postinvoke_state")) {
TFLITE_LOG(INFO) << "\n====Printing out TfLite interpreter post-invoke "
"state begins====";
tflite::PrintInterpreterState(
interpreter_, params_->Get<int32_t>("tensor_name_display_length"),
params_->Get<int32_t>("tensor_type_display_length"),
params_->Get<int32_t>("alloc_type_display_length"));
TFLITE_LOG(INFO) << "====Printing out TfLite interpreter post-invoke "
"state ends====\n";
}
}
private:
Interpreter* const interpreter_ = nullptr;
const BenchmarkParams* params_ = nullptr;
};
class OutputSaver : public BenchmarkListener {
public:
explicit OutputSaver(BenchmarkInterpreterRunner* runner)
: interpreter_runner_(runner) {}
void OnBenchmarkStart(const BenchmarkParams& params) override {
params_ = ¶ms;
}
void OnBenchmarkEnd(const BenchmarkResults& results) override {
const std::string path = params_->Get<std::string>("output_filepath");
if (!path.empty()) {
std::ofstream ofs(path, std::ofstream::out);
if (ofs.good()) {
for (int i = 0; i < interpreter_runner_->outputs().size(); i++) {
int tensor_index = interpreter_runner_->outputs()[i];
ofs.write(interpreter_runner_->tensor(tensor_index)->data.raw,
interpreter_runner_->tensor(tensor_index)->bytes);
}
ofs.close();
}
}
const std::string output_proto_path =
params_->Get<std::string>("output_proto_filepath");
if (!output_proto_path.empty()) {
tensorflow::Example example;
for (int i = 0; i < interpreter_runner_->outputs().size(); i++) {
const int tensor_index = interpreter_runner_->outputs()[i];
const TfLiteTensor& tensor =
*(interpreter_runner_->tensor(tensor_index));
MaybeSetFeatureValuesFromTensor(tensor, example);
}
std::ofstream ofs(output_proto_path, std::ios::out | std::ios::binary);
if (ofs.good()) {
example.SerializeToOstream(&ofs);
ofs.close();
}
}
}
private:
BenchmarkInterpreterRunner* const interpreter_runner_;
const BenchmarkParams* params_ = nullptr;
};
class ModelRuntimeInfoListener : public BenchmarkListener {
public:
explicit ModelRuntimeInfoListener(Interpreter* interpreter)
: interpreter_(interpreter) {}
void OnBenchmarkStart(const BenchmarkParams& params) override {
const std::string output_file_path =
params.Get<std::string>("model_runtime_info_output_file");
const auto status =
profiling::GenerateModelRuntimeInfo(*interpreter_, output_file_path);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to generate model runtime info: " << status;
}
}
private:
Interpreter* const interpreter_ = nullptr;
};
std::vector<std::string> Split(const std::string& str, const char delim) {
if (str.empty()) {
return {};
}
return absl::StrSplit(str, delim);
}
int GetNumElements(const TfLiteIntArray* dim_array) {
int num_elements = 1;
for (size_t i = 0; i < dim_array->size; i++) {
num_elements *= dim_array->data[i];
}
return num_elements;
}
void FillRandomString(tflite::DynamicBuffer* buffer,
const TfLiteIntArray* dim_array,
const std::function<std::string()>& random_func) {
int num_elements = GetNumElements(dim_array);
for (int i = 0; i < num_elements; ++i) {
auto str = random_func();
buffer->AddString(str.data(), str.length());
}
}
int FindLayerInfoIndex(std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info,
const std::string& input_name,
const string& names_string) {
for (int i = 0; i < info->size(); ++i) {
if (info->at(i).name == input_name) {
return i;
}
}
TFLITE_LOG(FATAL) << "Cannot find the corresponding input_layer name("
<< input_name << ") in --input_layer as " << names_string;
return -1;
}
TfLiteStatus PopulateInputValueRanges(
const std::string& names_string, const std::string& value_ranges_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
std::vector<std::string> value_ranges = Split(value_ranges_string, ':');
for (const auto& val : value_ranges) {
std::vector<std::string> name_range = Split(val, ',');
if (name_range.size() != 3) {
TFLITE_LOG(ERROR) << "Wrong input value range item specified: " << val;
return kTfLiteError;
}
int layer_info_idx = FindLayerInfoIndex(info, name_range[0], names_string);
int low, high;
bool has_low = absl::SimpleAtoi(name_range[1], &low);
bool has_high = absl::SimpleAtoi(name_range[2], &high);
if (!has_low || !has_high || low > high) {
TFLITE_LOG(ERROR)
<< "Wrong low and high value of the input value range specified: "
<< val;
return kTfLiteError;
}
info->at(layer_info_idx).has_value_range = true;
info->at(layer_info_idx).low = low;
info->at(layer_info_idx).high = high;
}
return kTfLiteOk;
}
TfLiteStatus PopulateInputValueFiles(
const std::string& names_string, const std::string& value_files_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
std::vector<std::string> value_files = Split(value_files_string, ',');
for (const auto& val : value_files) {
std::pair<std::string, std::string> name_file_pair;
TfLiteStatus status = SplitInputLayerNameAndValueFile(val, name_file_pair);
if (status != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Wrong input value file item specified: " << val;
TFLITE_LOG(ERROR) << status;
return status;
}
int layer_info_idx =
FindLayerInfoIndex(info, name_file_pair.first, names_string);
if (info->at(layer_info_idx).has_value_range) {
TFLITE_LOG(WARN)
<< "The input_name:" << info->at(layer_info_idx).name
<< " appears both in input_layer_value_files and "
"input_layer_value_range. The input_layer_value_range of the "
"input_name will be ignored.";
}
info->at(layer_info_idx).input_file_path = name_file_pair.second;
}
return kTfLiteOk;
}
TfLiteStatus PopulateInputLayerInfo(
const std::string& names_string, const std::string& shapes_string,
const std::string& value_ranges_string,
const std::string& value_files_string,
std::vector<BenchmarkTfLiteModel::InputLayerInfo>* info) {
info->clear();
std::vector<std::string> names = Split(names_string, ',');
std::vector<std::string> shapes = Split(shapes_string, ':');
if (names.size() != shapes.size()) {
TFLITE_LOG(ERROR)
<< "The number of items in --input_layer_shape (" << shapes_string
<< ", with " << shapes.size()
<< " items) must match the number of items in --input_layer ("
<< names_string << ", with " << names.size()
<< " items). For example --input_layer=input1,input2 "
"--input_layer_shape=1,224,224,4:1,20";
return kTfLiteError;
}
for (int i = 0; i < names.size(); ++i) {
info->push_back(BenchmarkTfLiteModel::InputLayerInfo());
BenchmarkTfLiteModel::InputLayerInfo& input = info->back();
input.name = names[i];
TFLITE_TOOLS_CHECK(util::SplitAndParse(shapes[i], ',', &input.shape))
<< "Incorrect size string specified: " << shapes[i];
for (int dim : input.shape) {
if (dim == -1) {
TFLITE_LOG(ERROR)
<< "Any unknown sizes in the shapes (-1's) must be replaced"
<< " with the size you want to benchmark with.";
return kTfLiteError;
}
}
}
TF_LITE_ENSURE_STATUS(
PopulateInputValueRanges(names_string, value_ranges_string, info));
TF_LITE_ENSURE_STATUS(
PopulateInputValueFiles(names_string, value_files_string, info));
return kTfLiteOk;
}
std::shared_ptr<profiling::ProfileSummaryFormatter>
CreateProfileSummaryFormatter(const std::string& output_mode) {
if (output_mode == kOpProfilingOutputModeCsv) {
return std::make_shared<profiling::ProfileSummaryCSVFormatter>();
} else if (output_mode == kOpProfilingOutputModeProto) {
return std::make_shared<profiling::ProfileSummaryProtoFormatter>();
} else {
return std::make_shared<profiling::ProfileSummaryDefaultFormatter>();
}
}
}
TfLiteStatus SplitInputLayerNameAndValueFile(
const std::string& name_and_value_file,
std::pair<std::string, std::string>& name_file_pair) {
int delim_index = -1;
for (int i = 0; i < name_and_value_file.length() - 1; ++i) {
if (name_and_value_file[i] == ':') {
if (name_and_value_file[i + 1] == ':') {
++i;
} else {
if (delim_index == -1) {
delim_index = i;
} else {
TFLITE_LOG(ERROR)
<< name_and_value_file << " contains more than one delimiter.";
return kTfLiteError;
}
}
}
}
if (delim_index == -1) {
TFLITE_LOG(ERROR) << name_and_value_file
<< " doesn't contain any delimiter.";
return kTfLiteError;
}
name_file_pair.first = absl::StrReplaceAll(
name_and_value_file.substr(0, delim_index), {{"::", ":"}});
name_file_pair.second = absl::StrReplaceAll(
name_and_value_file.substr(delim_index + 1), {{"::", ":"}});
return kTfLiteOk;
}
std::pair<TfLiteStatus, std::unique_ptr<BenchmarkInterpreterRunner>>
BenchmarkInterpreterRunner::Create(tflite::Interpreter* const interpreter,
std::string signature_key) {
if (!signature_key.empty()) {
const std::vector<const std::string*>& keys = interpreter->signature_keys();
bool found = std::any_of(
keys.begin(), keys.end(),
[&signature_key](const auto& k) { return *k == signature_key; });
if (keys.size() > 1 && (signature_key.empty() || !found)) {
TFLITE_LOG(ERROR)
<< "Signature not specified or incorrect for graph with multiple "
"signatures. Pass one of the following to the flag "
"\"--signature_to_run_for\"";
for (const std::string* k : keys) {
TFLITE_LOG(ERROR) << " #> Signature key: " << *k;
}
return {kTfLiteError, nullptr};
} else if (keys.size() == 1 && signature_key.empty()) {
signature_key = *keys[0];
}
if (!signature_key.empty() && !keys.empty()) {
TFLITE_LOG(INFO) << "Using signature: " << signature_key;
auto signature_runner =
interpreter->GetSignatureRunner(signature_key.c_str());
if (signature_runner == nullptr) {
return {kTfLiteError, nullptr};
} else {
int subgraph_index =
interpreter->GetSubgraphIndexFromSignature(signature_key.c_str());
return {kTfLiteOk, std::make_unique<BenchmarkInterpreterRunner>(
interpreter, signature_runner,
interpreter->subgraph(subgraph_index))};
}
}
}
return {kTfLiteOk, std::make_unique<BenchmarkInterpreterRunner>(
interpreter, nullptr, nullptr)};
}
TfLiteStatus BenchmarkInterpreterRunner::AllocateTensors() {
if (signature_runner_ != nullptr) {
return signature_runner_->AllocateTensors();
} else {
return interpreter_->AllocateTensors();
}
}
TfLiteStatus BenchmarkInterpreterRunner::Invoke() {
if (signature_runner_ != nullptr) {
return signature_runner_->Invoke();
} else {
return interpreter_->Invoke();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::execution_plan() const {
if (signature_runner_ != nullptr) {
return subgraph_->execution_plan();
} else {
return interpreter_->execution_plan();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::inputs() const {
if (signature_runner_ != nullptr) {
return subgraph_->inputs();
} else {
return interpreter_->inputs();
}
}
const std::vector<int>& BenchmarkInterpreterRunner::outputs() const {
if (signature_runner_ != nullptr) {
return subgraph_->outputs();
} else {
return interpreter_->outputs();
}
}
TfLiteTensor* BenchmarkInterpreterRunner::tensor(int tensor_index) {
if (signature_runner_ != nullptr) {
return subgraph_->tensor(tensor_index);
} else {
return interpreter_->tensor(tensor_index);
}
}
const std::pair<TfLiteNode, TfLiteRegistration>*
BenchmarkInterpreterRunner::node_and_registration(int node_index) const {
if (signature_runner_ != nullptr) {
return subgraph_->node_and_registration(node_index);
} else {
return interpreter_->node_and_registration(node_index);
}
}
TfLiteStatus BenchmarkInterpreterRunner::ResizeInputTensor(
int tensor_index, const std::vector<int>& new_size) {
if (signature_runner_ != nullptr) {
return subgraph_->ResizeInputTensor(tensor_index, new_size);
} else {
return interpreter_->ResizeInputTensor(tensor_index, new_size);
}
}
BenchmarkParams BenchmarkTfLiteModel::DefaultParams() {
BenchmarkParams default_params = BenchmarkModel::DefaultParams();
default_params.AddParam("graph", BenchmarkParam::Create<std::string>(""));
default_params.AddParam("signature_to_run_for",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("list_signatures",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("input_layer",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_shape",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_value_range",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("input_layer_value_files",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("allow_fp16", BenchmarkParam::Create<bool>(false));
default_params.AddParam("require_full_delegation",
BenchmarkParam::Create<bool>(false));
default_params.AddParam(
"enable_op_profiling",
BenchmarkParam::Create<bool>(kOpProfilingEnabledDefault));
default_params.AddParam(
"op_profiling_output_mode",
BenchmarkParam::Create<std::string>(kOpProfilingOutputModeStdout));
default_params.AddParam("op_profiling_output_file",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("max_profiling_buffer_entries",
BenchmarkParam::Create<int32_t>(1024));
default_params.AddParam("allow_dynamic_profiling_buffer_increase",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("profiling_output_csv_file",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("export_model_runtime_info",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("model_runtime_info_output_file",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("print_preinvoke_state",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("print_postinvoke_state",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("release_dynamic_tensors",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("optimize_memory_for_large_tensors",
BenchmarkParam::Create<int32_t>(0));
default_params.AddParam("disable_delegate_clustering",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("enable_builtin_cast_constant_cache",
BenchmarkParam::Create<bool>(false));
default_params.AddParam("output_filepath",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("output_proto_filepath",
BenchmarkParam::Create<std::string>(""));
default_params.AddParam("tensor_name_display_length",
BenchmarkParam::Create<int32_t>(25));
default_params.AddParam("tensor_type_display_length",
BenchmarkParam::Create<int32_t>(15));
default_params.AddParam("alloc_type_display_length",
BenchmarkParam::Create<int32_t>(18));
tools::ProvidedDelegateList delegate_providers(&default_params);
delegate_providers.AddAllDelegateParams();
return default_params;
}
BenchmarkTfLiteModel::BenchmarkTfLiteModel(BenchmarkParams params)
: BenchmarkModel(std::move(params)),
random_engine_(std::random_device()()) {
AddListener(&log_output_);
}
void BenchmarkTfLiteModel::CleanUp() {
inputs_data_.clear();
}
BenchmarkTfLiteModel::~BenchmarkTfLiteModel() {
CleanUp();
interpreter_runner_.reset();
interpreter_.reset();
}
std::vector<Flag> BenchmarkTfLiteModel::GetFlags() {
std::vector<Flag> flags = BenchmarkModel::GetFlags();
std::vector<Flag> specific_flags = {
CreateFlag<std::string>("graph", ¶ms_, "graph file name"),
CreateFlag<std::string>("input_layer", ¶ms_, "input layer names"),
CreateFlag<std::string>("input_layer_shape", ¶ms_,
"input layer shape"),
CreateFlag<std::string>(
"input_layer_value_range", ¶ms_,
"A map-like string representing value range for *integer* input "
"layers. Each item is separated by ':', and the item value consists "
"of input layer name and integer-only range values (both low and "
"high are inclusive) separated by ',', e.g. input1,1,2:input2,0,254"),
CreateFlag<std::string>(
"input_layer_value_files", ¶ms_,
"A map-like string representing value file. Each item is separated "
"by ',', and the item value consists "
"of input layer name and value file path separated by ':', e.g. "
"input1:file_path1,input2:file_path2. In case the input layer name "
"contains ':' e.g. \"input:0\", escape it with \"\\:\". If the "
"input_name appears both in input_layer_value_range and "
"input_layer_value_files, input_layer_value_range of the input_name "
"will be ignored. The file format is binary and it should be array "
"format or null separated strings format."),
CreateFlag<bool>("allow_fp16", ¶ms_, "allow fp16"),
CreateFlag<bool>("require_full_delegation", ¶ms_,
"require delegate to run the entire graph"),
CreateFlag<bool>("enable_op_profiling", ¶ms_, "enable op profiling"),
CreateFlag<std::string>(
"op_profiling_output_mode", ¶ms_,
"Output mode for op profiling results. Supported values are: "
"'stdout', 'csv' and 'proto'."),
CreateFlag<std::string>("op_profiling_output_file", ¶ms_,
"Output file for op profiling results."),
CreateFlag<int32_t>("max_profiling_buffer_entries", ¶ms_,
"max initial profiling buffer entries"),
CreateFlag<bool>("allow_dynamic_profiling_buffer_increase", ¶ms_,
"allow dynamic increase on profiling buffer entries"),
CreateFlag<std::string>("profiling_output_csv_file", ¶ms_,
"[DEPRECATED: Use op_profiling_output_file and "
"op_profiling_output_mode instead] File path to "
"export profile data as CSV, if not set "
"prints to stdout."),
CreateFlag<bool>("export_model_runtime_info", ¶ms_,
"Enable Model Runtime Info Export"),
CreateFlag<std::string>("model_runtime_info_output_file", ¶ms_,
"Proto File to export model runtime info to"),
CreateFlag<bool>(
"print_preinvoke_state", ¶ms_,
"print out the interpreter internals just before calling Invoke. The "
"internals will include allocated memory size of each tensor etc."),
CreateFlag<bool>(
"print_postinvoke_state", ¶ms_,
"print out the interpreter internals just before benchmark completes "
"(i.e. after all repeated Invoke calls complete). The internals will "
"include allocated memory size of each tensor etc."),
CreateFlag<bool>("release_dynamic_tensors", ¶ms_,
"Ensure dynamic tensor's memory is released when they "
"are not used."),
CreateFlag<int32_t>(
"optimize_memory_for_large_tensors", ¶ms_,
"Optimize memory usage for large tensors with sacrificing latency."),
CreateFlag<bool>("disable_delegate_clustering", ¶ms_,
"Disable delegate clustering."),
CreateFlag<bool>(
"enable_builtin_cast_constant_cache", ¶ms_,
"Cache the output of the builtin cast operation when its input "
"is a constant tensor."),
CreateFlag<std::string>(
"output_filepath", ¶ms_,
"File path to export outputs layer as binary data."),
CreateFlag<std::string>(
"output_proto_filepath", ¶ms_,
"File path to export outputs layer as tf example proto."),
CreateFlag<int32_t>(
"tensor_name_display_length", ¶ms_,
"The number of characters to show for the tensor's name when "
"printing the interpeter's state, defaults to 25."),
CreateFlag<int32_t>(
"tensor_type_display_length", ¶ms_,
"The number of characters to show for the tensor's type when "
"printing the interpeter's state, defaults to 15."),
CreateFlag<int32_t>(
"alloc_type_display_length", ¶ms_,
"The number of characters to show for the tensor's allocation type "
"when printing the interpeter's state, defaults to 18."),
CreateFlag<std::string>(
"signature_to_run_for", ¶ms_,
"If the model contains multiple signatures, use this flag to specify "
"the signature to benchmark. If multiple signatures are present and "
"this flag is not specified, the benchmark will throw an error. If "
"only one signature is present and this flag is not specified, the "
"default signature will be used."),
CreateFlag<bool>("list_signatures", ¶ms_,
"Displays all signatures present in the model and then "
"terminates the program.")};
flags.insert(flags.end(), specific_flags.begin(), specific_flags.end());
tools::ProvidedDelegateList delegate_providers(¶ms_);
delegate_providers.AppendCmdlineFlags(flags);
return flags;
}
void BenchmarkTfLiteModel::LogParams() {
BenchmarkModel::LogParams();
const bool verbose = params_.Get<bool>("verbose");
LOG_BENCHMARK_PARAM(std::string, "graph", "Graph", true);
LOG_BENCHMARK_PARAM(std::string, "signature_to_run_for", "Signature to run",
true);
LOG_BENCHMARK_PARAM(bool, "list_signatures",
"List signatures from the provided model", false);
LOG_BENCHMARK_PARAM(std::string, "input_layer", "Input layers", verbose);
LOG_BENCHMARK_PARAM(std::string, "input_layer_shape", "Input shapes",
verbose);
LOG_BENCHMARK_PARAM(std::string, "input_layer_value_range",
"Input value ranges", verbose);
LOG_BENCHMARK_PARAM(std::string, "input_layer_value_files",
"Input value files", verbose);
LOG_BENCHMARK_PARAM(bool, "allow_fp16", "Allow fp16", verbose);
LOG_BENCHMARK_PARAM(bool, "require_full_delegation",
"Require full delegation", verbose);
LOG_BENCHMARK_PARAM(bool, "enable_op_profiling", "Enable op profiling",
verbose);
LOG_BENCHMARK_PARAM(std::string, "op_profiling_output_mode",
"Op profiling output mode.", verbose);
LOG_BENCHMARK_PARAM(std::string, "op_profiling_output_file",
"Op profiling output file.", verbose);
LOG_BENCHMARK_PARAM(int32_t, "max_profiling_buffer_entries",
"Max initial profiling buffer entries", verbose);
LOG_BENCHMARK_PARAM(bool, "allow_dynamic_profiling_buffer_increase",
"Allow dynamic increase on profiling buffer entries",
verbose);
LOG_BENCHMARK_PARAM(std::string, "profiling_output_csv_file",
"CSV File to export profiling data to", verbose);
LOG_BENCHMARK_PARAM(bool, "export_model_runtime_info",
"Enable Model Runtime Info Export", verbose);
LOG_BENCHMARK_PARAM(std::string, "model_runtime_info_output_file",
"Proto File to export model runtime info to", verbose);
LOG_BENCHMARK_PARAM(bool, "print_preinvoke_state",
"Print pre-invoke interpreter state", verbose);
LOG_BENCHMARK_PARAM(bool, "print_postinvoke_state",
"Print post-invoke interpreter state", verbose);
LOG_BENCHMARK_PARAM(bool, "release_dynamic_tensors",
"Release dynamic tensor memory", verbose);
LOG_BENCHMARK_PARAM(int32_t, "optimize_memory_for_large_tensors",
"Optimize memory usage for large tensors", verbose);
LOG_BENCHMARK_PARAM(bool, "disable_delegate_clustering",
"Disable delegate clustering", verbose);
LOG_BENCHMARK_PARAM(bool, "enable_builtin_cast_constant_cache",
"Constant CAST output cache", verbose);
LOG_BENCHMARK_PARAM(std::string, "output_filepath",
"File path to export outputs layer to", verbose);
LOG_BENCHMARK_PARAM(std::string, "output_proto_filepath",
"File path to export outputs layer as tf example to",
verbose);
LOG_BENCHMARK_PARAM(int32_t, "tensor_name_display_length",
"Tensor name display length", verbose);
LOG_BENCHMARK_PARAM(int32_t, "tensor_type_display_length",
"Tensor type display length", verbose);
LOG_BENCHMARK_PARAM(int32_t, "alloc_type_display_length",
"Tensor allocation type display length", verbose);
for (const auto& delegate_provider :
tools::GetRegisteredDelegateProviders()) {
delegate_provider->LogParams(params_, verbose);
}
}
TfLiteStatus BenchmarkTfLiteModel::ValidateParams() {
TF_LITE_ENSURE_STATUS(BenchmarkModel::ValidateParams());
if (params_.Get<std::string>("graph").empty()) {
TFLITE_LOG(ERROR)
<< "Please specify the name of your TF Lite input file with --graph";
return kTfLiteError;
}
if (params_.Get<bool>("enable_op_profiling")) {
bool found =
std::find(std::begin(kOpProfilingOutputModes),
std::end(kOpProfilingOutputModes),
params_.Get<std::string>("op_profiling_output_mode")) !=
std::end(kOpProfilingOutputModes);
if (!found) {
TFLITE_LOG(ERROR) << "Output mode"
<< params_.Get<std::string>("op_profiling_output_mode")
<< " is not supported. Supported values are: 'stdout', "
"'csv' and 'proto'.";
return kTfLiteError;
}
if (!params_.Get<std::string>("profiling_output_csv_file").empty()) {
params_.Set<std::string>("op_profiling_output_mode",
kOpProfilingOutputModeCsv);
params_.Set<std::string>(
"op_profiling_output_file",
params_.Get<std::string>("profiling_output_csv_file"));
}
}
return PopulateInputLayerInfo(
params_.Get<std::string>("input_layer"),
params_.Get<std::string>("input_layer_shape"),
params_.Get<std::string>("input_layer_value_range"),
params_.Get<std::string>("input_layer_value_files"), &inputs_);
}
uint64_t BenchmarkTfLiteModel::ComputeInputBytes() {
TFLITE_TOOLS_CHECK(interpreter_runner_);
uint64_t total_input_bytes = 0;
for (int input : interpreter_runner_->inputs()) {
auto* t = interpreter_runner_->tensor(input);
total_input_bytes += t->bytes;
}
return total_input_bytes;
}
int64_t BenchmarkTfLiteModel::MayGetModelFileSize() {
std::string fd_or_graph_path = params_.Get<std::string>("graph");
std::vector<absl::string_view> parts = absl::StrSplit(fd_or_graph_path, ':');
if (!parts.empty() && parts[0] == "fd") {
int64_t model_size = -1;
if (parts.size() != 4 || !absl::SimpleAtoi(parts[3], &model_size)) {
TFLITE_LOG(ERROR) << "Failed to parse model file size: "
<< fd_or_graph_path;
}
return model_size;
}
std::ifstream in_file(fd_or_graph_path, std::ios::binary | std::ios::ate);
return in_file.tellg();
}
InputTensorData BenchmarkTfLiteModel::LoadInputTensorData(
const TfLiteTensor& t, const std::string& input_file_path) {
std::ifstream value_file(input_file_path, std::ios::binary);
if (!value_file.good()) {
TFLITE_LOG(FATAL) << "Failed to read the input_layer_value_file:"
<< input_file_path;
}
InputTensorData t_data;
if (t.type == kTfLiteString) {
t_data.data = VoidUniquePtr(
static_cast<void*>(new tflite::DynamicBuffer()),
[](void* ptr) { delete static_cast<DynamicBuffer*>(ptr); });
if (input_file_path.size() > 3 &&
input_file_path.substr(input_file_path.size() - 3) == ".pb") {
std::stringstream buffer;
buffer << value_file.rdbuf();
static_cast<DynamicBuffer*>(t_data.data.get())
->AddString(buffer.str().data(), buffer.str().length());
TFLITE_LOG(INFO) << "Read " << buffer.str().length()
<< " bytes data from " << input_file_path << ".";
} else {
std::string line;
size_t num_line = 0;
while (std::getline(value_file, line, '\0')) {
num_line++;
static_cast<DynamicBuffer*>(t_data.data.get())
->AddString(line.data(), line.length());
}
int num_elements = GetNumElements(t.dims);
if (num_line != num_elements) {
TFLITE_LOG(FATAL)
<< "The number of string in the input_layer_value_file("
<< input_file_path << ") is " << num_line << ". It should be "
<< num_elements << ".";
}
}
} else {
value_file.seekg(0, std::ios_base::end);
if (value_file.tellg() != t.bytes) {
TFLITE_LOG(FATAL) << "The size of " << input_file_path << " is "
<< value_file.tellg() << " bytes. It should be "
<< t.bytes << " bytes.";
}
t_data.bytes = t.bytes;
t_data.data =
VoidUniquePtr(static_cast<void*>(new char[t.bytes]),
[](void* ptr) { delete[] static_cast<char*>(ptr); });
value_file.clear();
value_file.seekg(0, std::ios_base::beg);
value_file.read(static_cast<char*>(t_data.data.get()), t.bytes);
}
return t_data;
}
InputTensorData BenchmarkTfLiteModel::CreateRandomTensorData(
const TfLiteTensor& t, const InputLayerInfo* layer_info) {
float low_range = 0;
float high_range = 0;
if (layer_info && layer_info->has_value_range) {
low_range = layer_info->low;
high_range = layer_info->high;
} else {
utils::GetDataRangesForType(t.type, &low_range, &high_range);
}
return utils::CreateRandomTensorData(t, low_range, high_range);
}
TfLiteStatus BenchmarkTfLiteModel::PrepareInputData() {
CleanUp();
const std::vector<int>& runner_inputs = interpreter_runner_->inputs();
for (int i = 0; i < runner_inputs.size(); ++i) {
int tensor_index = runner_inputs[i];
const TfLiteTensor& t = *(interpreter_runner_->tensor(tensor_index));
const InputLayerInfo* input_layer_info = nullptr;
if (!inputs_.empty()) input_layer_info = &inputs_[i];
InputTensorData t_data;
if (input_layer_info && !input_layer_info->input_file_path.empty()) {
t_data = LoadInputTensorData(t, input_layer_info->input_file_path);
} else {
t_data = CreateRandomTensorData(t, input_layer_info);
}
inputs_data_.push_back(std::move(t_data));
}
return kTfLiteOk;
}
TfLiteStatus BenchmarkTfLiteModel::ResetInputsAndOutputs() {
const std::vector<int>& runner_inputs = interpreter_runner_->inputs();
for (int j = 0; j < runner_inputs.size(); ++j) {
int i = runner_inputs[j];
TfLiteTensor* t = interpreter_runner_->tensor(i);
if (t->type == kTfLiteString) {
if (inputs_data_[j].data) {
static_cast<DynamicBuffer*>(inputs_data_[j].data.get())
->WriteToTensor(t, nullptr);
} else {
tflite::DynamicBuffer buffer;
FillRandomString(&buffer, t->dims, []() {
return "we're have some friends over saturday to hang out in "
"the "
"yard";
});
buffer.WriteToTensor(t, nullptr);
}
} else {
std::memcpy(t->data.raw, inputs_data_[j].data.get(),
inputs_data_[j].bytes);
}
}
return kTfLiteOk;
}
TfLiteStatus BenchmarkTfLiteModel::InitInterpreter() {
auto resolver = GetOpResolver();
const int32_t num_threads = params_.Get<int32_t>("num_threads");
const bool use_caching = params_.Get<bool>("use_caching");
InterpreterOptions options;
options.SetEnsureDynamicTensorsAreReleased(
params_.Get<bool>("release_dynamic_tensors"));
options.OptimizeMemoryForLargeTensors(
params_.Get<int32_t>("optimize_memory_for_large_tensors"));
options.SetDisableDelegateClustering(
params_.Get<bool>("disable_delegate_clustering"));
options.SetCacheConstantCastOp(
params_.Get<bool>("enable_builtin_cast_constant_cache"));
tflite::InterpreterBuilder builder(*model_, *resolver, &options);
if (builder.SetNumThreads(num_threads) != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to set thread number";
return kTfLiteError;
}
builder(&interpreter_);
if (!interpreter_) {
TFLITE_LOG(ERROR) << "Failed to initialize the interpreter";
return kTfLiteError;
}
if (use_caching) {
external_context_ = std::make_unique<tflite::ExternalCpuBackendContext>();
std::unique_ptr<tflite::CpuBackendContext> cpu_backend_context(
new tflite::CpuBackendContext());
cpu_backend_context->SetUseCaching(true);
cpu_backend_context->SetMaxNumThreads(num_threads);
external_context_->set_internal_backend_context(
std::move(cpu_backend_context));
interpreter_->SetExternalContext(kTfLiteCpuBackendContext,
external_context_.get());
}
return kTfLiteOk;
}
TfLiteStatus BenchmarkTfLiteModel::Init() {
TF_LITE_ENSURE_STATUS(LoadModel());
TF_LITE_ENSURE_STATUS(InitInterpreter());
if (params_.Get<bool>("list_signatures")) {
const std::vector<const std::string*>& keys =
interpreter_->signature_keys();
TFLITE_LOG(INFO) << "The Model contains " << keys.size()
<< " signature key(s).";
if (!keys.empty()) {
TFLITE_LOG(INFO) << "They are listed below: ";
}
for (const std::string* key : keys) {
TFLITE_LOG(INFO) << "-> Signature Key: " << *key;
}
return kTfLiteError;
}
int total_nodes = 0;
for (int i = 0; i < interpreter_->subgraphs_size(); ++i) {
total_nodes += static_cast<int>(interpreter_->subgraph(i)->nodes_size());
}
if (total_nodes > params_.Get<int32_t>("max_profiling_buffer_entries")) {
constexpr int kProfilingBufferHeadrooms = 512;
params_.Set<int32_t>("max_profiling_buffer_entries",
total_nodes + kProfilingBufferHeadrooms);
}
AddOwnedListener(MayCreateProfilingListener());
AddOwnedListener(std::unique_ptr<BenchmarkListener>(
new InterpreterStatePrinter(interpreter_.get())));
if (params_.Get<bool>("export_model_runtime_info")) {
AddOwnedListener(std::unique_ptr<BenchmarkListener>(
new ModelRuntimeInfoListener(interpreter_.get())));
}
interpreter_->SetAllowFp16PrecisionForFp32(params_.Get<bool>("allow_fp16"));
std::pair<TfLiteStatus, std::unique_ptr<BenchmarkInterpreterRunner>>
status_and_runner = BenchmarkInterpreterRunner::Create(
interpreter_.get(), params_.Get<std::string>("signature_to_run_for"));
TF_LITE_ENSURE_STATUS(status_and_runner.first);
interpreter_runner_ = std::move(status_and_runner.second);
const std::vector<int>& runner_inputs = interpreter_runner_->inputs();
if (!inputs_.empty()) {
TFLITE_TOOLS_CHECK_EQ(inputs_.size(), runner_inputs.size())
<< "Inputs mismatch: Model inputs #:" << inputs_.size()
<< " expected: " << runner_inputs.size();
}
for (int j = 0; j < inputs_.size(); ++j) {
const InputLayerInfo& input = inputs_[j];
int i = runner_inputs[j];
TfLiteTensor* t = interpreter_runner_->tensor(i);
if (input.name != t->name) {
TFLITE_LOG(WARN) << "Tensor # " << i << " is named " << t->name
<< " but flags call it " << input.name;
}
if (t->type != kTfLiteString && input.shape.size() != t->dims->size) {
TFLITE_LOG(ERROR) << "Input tensor #" << i << " should have "
<< t->dims->size << " dimensions!";
return kTfLiteError;
}
}
for (int j = 0; j < inputs_.size(); ++j) {
const InputLayerInfo& input = inputs_[j];
int i = runner_inputs[j];
TfLiteTensor* t = interpreter_runner_->tensor(i);
if (t->type != kTfLiteString) {
interpreter_runner_->ResizeInputTensor(i, input.shape);
}
}
owned_delegates_.clear();
std::unordered_set<int> checked_node_ids;
tools::ProvidedDelegateList delegate_providers(¶ms_);
auto created_delegates = delegate_providers.CreateAllRankedDelegates();
TFLITE_MAY_LOG(INFO, (created_delegates.size() >= 2))
<< "Going to apply " << created_delegates.size()
<< " delegates one after another.";
if (created_delegates.empty() &&
params_.Get<bool>("require_full_delegation")) {
TFLITE_LOG(ERROR) << "Disallowed CPU fallback detected.";
return kTfLiteError;
}
for (auto& created_delegate : created_delegates) {
const auto* delegate_provider = created_delegate.provider;
TfLiteDelegate* delegate = created_delegate.delegate.get();
TFLITE_TOOLS_CHECK(delegate != nullptr)
<< "The created delegate by the delegate provider should not be "
"nullptr!";
owned_delegates_.emplace_back(std::move(created_delegate.delegate));
if (interpreter_->ModifyGraphWithDelegate(delegate) != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to apply " << delegate_provider->GetName()
<< " delegate.";
return kTfLiteError;
} else {
int num_delegated_kernels = 0;
for (int i = 0; i < interpreter_runner_->execution_plan().size(); ++i) {
int node_id = interpreter_runner_->execution_plan()[i];
if (checked_node_ids.find(node_id) != checked_node_ids.end()) {
continue;
}
const TfLiteNode& node =
interpreter_runner_->node_and_registration(node_id)->first;
if (node.delegate != nullptr) {
num_delegated_kernels++;
checked_node_ids.insert(node_id);
}
}
bool fully_delegated =
(num_delegated_kernels == 1 &&
interpreter_runner_->execution_plan().size() == 1);
if (params_.Get<bool>("require_full_delegation") && !fully_delegated) {
TFLITE_LOG(ERROR) << "Disallowed CPU fallback detected.";
return kTfLiteError;
}
if (fully_delegated) {
TFLITE_LOG(INFO) << "Explicitly applied "
<< delegate_provider->GetName()
<< " delegate, and the model graph will be completely"
<< " executed by the delegate.";
} else if (num_delegated_kernels > 0) {
TFLITE_LOG(INFO) << "Explicitly applied "
<< delegate_provider->GetName()
<< " delegate, and the model graph will be partially"
<< " executed by the delegate w/ "
<< num_delegated_kernels << " delegate kernels.";
} else {
TFLITE_LOG(INFO) << "Though " << delegate_provider->GetName()
<< " delegate is explicitly applied, the model "
"graph will not be"
<< " executed by the delegate.";
}
}
}
if (interpreter_runner_->AllocateTensors() != kTfLiteOk) {
TFLITE_LOG(ERROR) << "Failed to allocate tensors!";
return kTfLiteError;
}
AddOwnedListener(
std::unique_ptr<BenchmarkListener>(new RuyProfileListener()));
AddOwnedListener(std::unique_ptr<BenchmarkListener>(
new OutputSaver(interpreter_runner_.get())));
return kTfLiteOk;
}
TfLiteStatus BenchmarkTfLiteModel::LoadModel() {
std::string fd_or_graph_path = params_.Get<std::string>("graph");
model_loader_ = tools::CreateModelLoaderFromPath(fd_or_graph_path);
if (!model_loader_) {
TFLITE_LOG(ERROR) << "Failed to initialize model loader with path "
<< fd_or_graph_path;
return kTfLiteError;
}
if (!model_loader_->Init()) {
TFLITE_LOG(ERROR) << "Failed to load model " << fd_or_graph_path;
return kTfLiteError;
}
model_ = tflite::FlatBufferModel::BuildFromBuffer(
reinterpret_cast<const char*>(
model_loader_->GetModel()->allocation()->base()),
model_loader_->GetModel()->allocation()->bytes());
TFLITE_LOG(INFO) << "Loaded model " << fd_or_graph_path;
return kTfLiteOk;
}
std::unique_ptr<tflite::OpResolver> BenchmarkTfLiteModel::GetOpResolver()
const {
tflite::ops::builtin::BuiltinOpResolver* resolver = nullptr;
if (params_.HasParam("use_xnnpack") &&
params_.HasValueSet<bool>("use_xnnpack") &&
!params_.Get<bool>("use_xnnpack")) {
resolver =
new tflite::ops::builtin::BuiltinOpResolverWithoutDefaultDelegates();
} else {
resolver = new tflite::ops::builtin::BuiltinOpResolver();
}
RegisterSelectedOps(resolver);
return std::unique_ptr<tflite::OpResolver>(resolver);
}
std::unique_ptr<BenchmarkListener>
BenchmarkTfLiteModel::MayCreateProfilingListener() const {
if (!params_.Get<bool>("enable_op_profiling")) return nullptr;
return std::unique_ptr<BenchmarkListener>(new ProfilingListener(
interpreter_.get(), params_.Get<int32_t>("max_profiling_buffer_entries"),
params_.Get<bool>("allow_dynamic_profiling_buffer_increase"),
params_.Get<std::string>("op_profiling_output_file"),
CreateProfileSummaryFormatter(
params_.Get<std::string>("op_profiling_output_mode"))));
}
TfLiteStatus BenchmarkTfLiteModel::RunImpl() {
return interpreter_runner_->Invoke();
}
}
} | #include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h"
#include <fcntl.h>
#include <sys/stat.h>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/tools/benchmark/benchmark_model.h"
#include "tensorflow/lite/tools/benchmark/benchmark_params.h"
#include "tensorflow/lite/tools/tool_params.h"
namespace tflite {
namespace benchmark {
namespace {
static constexpr char kModelPath[] =
"../tflite_mobilenet_float/"
"mobilenet_v1_1.0_224.tflite";
class TestBenchmarkListener : public BenchmarkListener {
public:
void OnBenchmarkEnd(const BenchmarkResults& results) override {
results_ = results;
}
BenchmarkResults results_;
};
TEST(BenchmarkTfLiteModelTest, GetModelSizeFromPathSucceeded) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
params.Set<std::string>("graph", kModelPath);
params.Set<int>("num_runs", 1);
params.Set<int>("warmup_runs", 0);
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
TestBenchmarkListener listener;
benchmark.AddListener(&listener);
benchmark.Run();
EXPECT_GE(listener.results_.model_size_mb(), 0);
}
TEST(BenchmarkTfLiteModelTest, GetModelSizeFromFileDescriptorSucceeded) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
int fd = open(kModelPath, O_RDONLY);
ASSERT_GE(fd, 0);
int model_offset = 0;
struct stat stat_buf = {0};
ASSERT_EQ(fstat(fd, &stat_buf), 0);
params.Set<std::string>("graph", absl::StrCat("fd:", fd, ":", model_offset,
":", stat_buf.st_size));
params.Set<int>("num_runs", 1);
params.Set<int>("warmup_runs", 0);
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
TestBenchmarkListener listener;
benchmark.AddListener(&listener);
benchmark.Run();
EXPECT_EQ(listener.results_.model_size_mb(), stat_buf.st_size / 1e6);
}
TEST(BenchmarkTfLiteModelTest, ResizeInputWithDelegate) {
BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams();
params.Set<std::string>("graph", kModelPath);
params.Set<bool>("use_xnnpack", true);
params.Set<std::string>("input_layer", "input_87");
params.Set<std::string>("input_layer_shape", "2,224,224,3");
BenchmarkTfLiteModel benchmark = BenchmarkTfLiteModel(std::move(params));
EXPECT_EQ(benchmark.Run(), kTfLiteOk);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_tflite_model.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/tools/benchmark/benchmark_tflite_model_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ab55e1df-78ae-42c3-b2c8-909a704646da | cpp | google/quiche | goaway_payload_decoder | quiche/http2/decoder/payload_decoders/goaway_payload_decoder.cc | quiche/http2/decoder/payload_decoders/goaway_payload_decoder_test.cc | #include "quiche/http2/decoder/payload_decoders/goaway_payload_decoder.h"
#include <stddef.h>
#include <ostream>
#include "absl/base/macros.h"
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/http2_structures.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace http2 {
std::ostream& operator<<(std::ostream& out,
GoAwayPayloadDecoder::PayloadState v) {
switch (v) {
case GoAwayPayloadDecoder::PayloadState::kStartDecodingFixedFields:
return out << "kStartDecodingFixedFields";
case GoAwayPayloadDecoder::PayloadState::kHandleFixedFieldsStatus:
return out << "kHandleFixedFieldsStatus";
case GoAwayPayloadDecoder::PayloadState::kReadOpaqueData:
return out << "kReadOpaqueData";
case GoAwayPayloadDecoder::PayloadState::kResumeDecodingFixedFields:
return out << "kResumeDecodingFixedFields";
}
int unknown = static_cast<int>(v);
QUICHE_BUG(http2_bug_167_1)
<< "Invalid GoAwayPayloadDecoder::PayloadState: " << unknown;
return out << "GoAwayPayloadDecoder::PayloadState(" << unknown << ")";
}
DecodeStatus GoAwayPayloadDecoder::StartDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2) << "GoAwayPayloadDecoder::StartDecodingPayload: "
<< state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::GOAWAY, state->frame_header().type);
QUICHE_DCHECK_LE(db->Remaining(), state->frame_header().payload_length);
QUICHE_DCHECK_EQ(0, state->frame_header().flags);
state->InitializeRemainders();
payload_state_ = PayloadState::kStartDecodingFixedFields;
return ResumeDecodingPayload(state, db);
}
DecodeStatus GoAwayPayloadDecoder::ResumeDecodingPayload(
FrameDecoderState* state, DecodeBuffer* db) {
QUICHE_DVLOG(2)
<< "GoAwayPayloadDecoder::ResumeDecodingPayload: remaining_payload="
<< state->remaining_payload() << ", db->Remaining=" << db->Remaining();
const Http2FrameHeader& frame_header = state->frame_header();
QUICHE_DCHECK_EQ(Http2FrameType::GOAWAY, frame_header.type);
QUICHE_DCHECK_LE(db->Remaining(), frame_header.payload_length);
QUICHE_DCHECK_NE(PayloadState::kHandleFixedFieldsStatus, payload_state_);
DecodeStatus status = DecodeStatus::kDecodeError;
size_t avail;
while (true) {
QUICHE_DVLOG(2)
<< "GoAwayPayloadDecoder::ResumeDecodingPayload payload_state_="
<< payload_state_;
switch (payload_state_) {
case PayloadState::kStartDecodingFixedFields:
status = state->StartDecodingStructureInPayload(&goaway_fields_, db);
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kHandleFixedFieldsStatus:
if (status == DecodeStatus::kDecodeDone) {
state->listener()->OnGoAwayStart(frame_header, goaway_fields_);
} else {
QUICHE_DCHECK((status == DecodeStatus::kDecodeInProgress &&
state->remaining_payload() > 0) ||
(status == DecodeStatus::kDecodeError &&
state->remaining_payload() == 0))
<< "\n status=" << status
<< "; remaining_payload=" << state->remaining_payload();
payload_state_ = PayloadState::kResumeDecodingFixedFields;
return status;
}
ABSL_FALLTHROUGH_INTENDED;
case PayloadState::kReadOpaqueData:
avail = db->Remaining();
if (avail > 0) {
state->listener()->OnGoAwayOpaqueData(db->cursor(), avail);
db->AdvanceCursor(avail);
state->ConsumePayload(avail);
}
if (state->remaining_payload() > 0) {
payload_state_ = PayloadState::kReadOpaqueData;
return DecodeStatus::kDecodeInProgress;
}
state->listener()->OnGoAwayEnd();
return DecodeStatus::kDecodeDone;
case PayloadState::kResumeDecodingFixedFields:
status = state->ResumeDecodingStructureInPayload(&goaway_fields_, db);
payload_state_ = PayloadState::kHandleFixedFieldsStatus;
continue;
}
QUICHE_BUG(http2_bug_167_2) << "PayloadState: " << payload_state_;
}
}
} | #include "quiche/http2/decoder/payload_decoders/goaway_payload_decoder.h"
#include <stddef.h>
#include <string>
#include "quiche/http2/decoder/http2_frame_decoder_listener.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/frame_parts.h"
#include "quiche/http2/test_tools/frame_parts_collector.h"
#include "quiche/http2/test_tools/http2_frame_builder.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/http2/test_tools/http2_structures_test_util.h"
#include "quiche/http2/test_tools/payload_decoder_base_test_util.h"
#include "quiche/http2/test_tools/random_decoder_test_base.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
class GoAwayPayloadDecoderPeer {
public:
static constexpr Http2FrameType FrameType() { return Http2FrameType::GOAWAY; }
static constexpr uint8_t FlagsAffectingPayloadDecoding() { return 0; }
};
namespace {
struct Listener : public FramePartsCollector {
void OnGoAwayStart(const Http2FrameHeader& header,
const Http2GoAwayFields& goaway) override {
QUICHE_VLOG(1) << "OnGoAwayStart header: " << header
<< "; goaway: " << goaway;
StartFrame(header)->OnGoAwayStart(header, goaway);
}
void OnGoAwayOpaqueData(const char* data, size_t len) override {
QUICHE_VLOG(1) << "OnGoAwayOpaqueData: len=" << len;
CurrentFrame()->OnGoAwayOpaqueData(data, len);
}
void OnGoAwayEnd() override {
QUICHE_VLOG(1) << "OnGoAwayEnd";
EndFrame()->OnGoAwayEnd();
}
void OnFrameSizeError(const Http2FrameHeader& header) override {
QUICHE_VLOG(1) << "OnFrameSizeError: " << header;
FrameError(header)->OnFrameSizeError(header);
}
};
class GoAwayPayloadDecoderTest
: public AbstractPayloadDecoderTest<GoAwayPayloadDecoder,
GoAwayPayloadDecoderPeer, Listener> {};
TEST_F(GoAwayPayloadDecoderTest, Truncated) {
auto approve_size = [](size_t size) {
return size != Http2GoAwayFields::EncodedSize();
};
Http2FrameBuilder fb;
fb.Append(Http2GoAwayFields(123, Http2ErrorCode::ENHANCE_YOUR_CALM));
EXPECT_TRUE(VerifyDetectsFrameSizeError(0, fb.buffer(), approve_size));
}
class GoAwayOpaqueDataLengthTests
: public GoAwayPayloadDecoderTest,
public ::testing::WithParamInterface<uint32_t> {
protected:
GoAwayOpaqueDataLengthTests() : length_(GetParam()) {
QUICHE_VLOG(1) << "################ length_=" << length_
<< " ################";
}
const uint32_t length_;
};
INSTANTIATE_TEST_SUITE_P(VariousLengths, GoAwayOpaqueDataLengthTests,
::testing::Values(0, 1, 2, 3, 4, 5, 6));
TEST_P(GoAwayOpaqueDataLengthTests, ValidLength) {
Http2GoAwayFields goaway;
Randomize(&goaway, RandomPtr());
std::string opaque_data = Random().RandString(length_);
Http2FrameBuilder fb;
fb.Append(goaway);
fb.Append(opaque_data);
Http2FrameHeader header(fb.size(), Http2FrameType::GOAWAY, RandFlags(),
RandStreamId());
set_frame_header(header);
FrameParts expected(header, opaque_data);
expected.SetOptGoaway(goaway);
ASSERT_TRUE(DecodePayloadAndValidateSeveralWays(fb.buffer(), expected));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/goaway_payload_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/payload_decoders/goaway_payload_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
25ccb5a1-cdaa-4877-a165-ec2b78f26bea | cpp | tensorflow/tensorflow | hlo_extractor | third_party/xla/xla/tools/hlo_extractor.cc | third_party/xla/xla/tools/hlo_extractor_test.cc | #include "xla/tools/hlo_extractor.h"
#ifndef _WIN32
#include <unistd.h>
#endif
#include <cstdint>
#include <deque>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/compilation_environments.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_verifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/test_utils.h"
#include "tsl/platform/status.h"
namespace xla {
namespace {
class ExtractionVisitor : public ConstDfsHloVisitorWithDefault {
public:
explicit ExtractionVisitor(
const HloInstruction* root_instruction,
absl::flat_hash_set<const HloInstruction*>* boundary,
ExtractSelector extract_selector,
ReplaceTypeSelector replace_type_selector)
: root_instruction_(root_instruction),
old_module_(root_instruction->GetModule()),
module_(std::make_unique<HloModule>(
"extracted", config_,
std::make_unique<CompilationEnvironments>(
old_module_->comp_envs()))),
clone_context_(module_.get()),
boundary_(boundary),
extract_selector_(extract_selector),
replace_type_selector_(replace_type_selector) {
for (auto computation : old_module_->computations()) {
old_computations_to_builders_.insert(
{computation,
std::make_unique<HloComputation::Builder>(computation->name())});
}
for (auto computation : old_module_->computations()) {
parameter_numbers_[computation] = 0;
}
}
absl::Status HandleParameter(const HloInstruction* parameter) override {
return ReplaceWithParameter(parameter);
}
absl::Status DefaultAction(const HloInstruction* hlo) override {
if ((boundary_ != nullptr && boundary_->contains(hlo) > 0) ||
(extract_selector_ != nullptr && !extract_selector_(hlo))) {
if (replace_type_selector_ != nullptr) {
switch (replace_type_selector_(hlo)) {
case ReplaceType::kReplaceConst:
return ReplaceWithConstant(hlo);
case ReplaceType::kReplaceParam:
CHECK(hlo->parent() == root_instruction_->parent())
<< "Replacing instructions at non-entry computation with "
"parameters is not supported.";
return ReplaceWithParameter(hlo);
case ReplaceType::kReplaceZeroBroadcast:
return ReplaceWithConstantBroadcast(
hlo, ReplaceType::kReplaceZeroBroadcast);
case ReplaceType::kReplaceRandomBroadcast:
return ReplaceWithConstantBroadcast(
hlo, ReplaceType::kReplaceRandomBroadcast);
default:
QCHECK(false) << "Unsupported replacement type";
}
}
return ReplaceWithParameter(hlo);
}
std::vector<HloInstruction*> new_operands;
for (auto operand : hlo->operands()) {
new_operands.push_back(clone_context_.GetInstruction(operand));
}
auto instruction =
hlo->CloneWithNewOperands(hlo->shape(), new_operands, &clone_context_);
auto it = old_computations_to_builders_.find(hlo->parent());
CHECK(it != old_computations_to_builders_.end());
auto builder = it->second.get();
builder->AddInstruction(std::move(instruction));
if (hlo->IsRoot() && hlo != root_instruction_) {
CHECK(clone_context_.FindComputation(hlo->parent()) == nullptr);
auto new_computation = module_->AddEmbeddedComputation(builder->Build());
clone_context_.MapComputation(hlo->parent(), new_computation);
}
return absl::OkStatus();
}
absl::Status FinishVisit(const HloInstruction* ) override {
auto new_entry_computation = module_->AddEntryComputation(
old_computations_to_builders_.at(root_instruction_->parent())->Build());
clone_context_.MapComputation(root_instruction_->parent(),
new_entry_computation);
for (auto computation : old_module_->MakeComputationPostOrder()) {
for (auto old_instruction : computation->MakeInstructionPostOrder()) {
if (auto new_instruction =
clone_context_.FindInstruction(old_instruction)) {
new_instruction->SetAndSanitizeName(old_instruction->name());
}
}
}
for (HloInstruction* instruction : extra_created_instructions_) {
module_->SetAndUniquifyInstrName(instruction, instruction->name());
}
return absl::OkStatus();
}
HloModule* module() { return module_.get(); }
std::unique_ptr<HloModule> ConsumeModule() { return std::move(module_); }
private:
absl::Status ReplaceWithConstant(const HloInstruction* hlo) {
absl::StatusOr<Literal> literal_status = MakeFakeLiteral(hlo->shape());
TF_CHECK_OK(literal_status.status());
auto new_const =
HloInstruction::CreateConstant(std::move(literal_status.value()));
clone_context_.MapInstruction(hlo, new_const.get());
auto it = old_computations_to_builders_.find(hlo->parent());
CHECK(it != old_computations_to_builders_.end());
auto builder = it->second.get();
builder->AddInstruction(std::move(new_const));
return absl::OkStatus();
}
absl::Status ReplaceWithParameter(const HloInstruction* hlo) {
CHECK(parameter_numbers_.contains(hlo->parent()));
auto new_parameter = HloInstruction::CreateParameter(
parameter_numbers_.at(hlo->parent())++, hlo->shape(), hlo->name());
clone_context_.MapInstruction(hlo, new_parameter.get());
CHECK(old_computations_to_builders_.contains(hlo->parent()));
auto builder = old_computations_to_builders_[hlo->parent()].get();
builder->AddInstruction(std::move(new_parameter));
return absl::OkStatus();
}
HloInstruction* ReplaceWithConstantBroadcastHelper(
const Shape& shape, HloComputation::Builder* builder,
ReplaceType replace_type) {
if (shape.IsTuple()) {
std::vector<HloInstruction*> tuple_operands;
for (const auto& subshape : shape.tuple_shapes()) {
tuple_operands.push_back(ReplaceWithConstantBroadcastHelper(
subshape, builder, replace_type));
}
auto zero_tuple =
builder->AddInstruction(HloInstruction::CreateTuple(tuple_operands));
extra_created_instructions_.push_back(zero_tuple);
return zero_tuple;
} else {
Shape constant_shape = ShapeUtil::MakeShape(shape.element_type(), {});
HloInstruction* constant_instruction;
CHECK(replace_type == ReplaceType::kReplaceZeroBroadcast ||
replace_type == ReplaceType::kReplaceRandomBroadcast);
if (replace_type == ReplaceType::kReplaceZeroBroadcast) {
constant_instruction =
builder->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(constant_shape.element_type())));
} else {
absl::StatusOr<Literal> literal_status =
MakeFakeLiteral(constant_shape);
TF_CHECK_OK(literal_status.status());
constant_instruction = builder->AddInstruction(
HloInstruction::CreateConstant(std::move(literal_status.value())));
}
extra_created_instructions_.push_back(constant_instruction);
auto broadcast_constant_instruction = builder->AddInstruction(
HloInstruction::CreateBroadcast(shape, constant_instruction, {}));
extra_created_instructions_.push_back(broadcast_constant_instruction);
return broadcast_constant_instruction;
}
}
absl::Status ReplaceWithConstantBroadcast(const HloInstruction* hlo,
ReplaceType replace_type) {
CHECK(replace_type == ReplaceType::kReplaceZeroBroadcast ||
replace_type == ReplaceType::kReplaceRandomBroadcast);
CHECK(old_computations_to_builders_.contains(hlo->parent()));
auto builder = old_computations_to_builders_[hlo->parent()].get();
HloInstruction* zero_broadcast =
ReplaceWithConstantBroadcastHelper(hlo->shape(), builder, replace_type);
clone_context_.MapInstruction(hlo, zero_broadcast);
return absl::OkStatus();
}
const HloInstruction* root_instruction_;
HloModule* old_module_;
HloModuleConfig config_;
std::unique_ptr<HloModule> module_;
HloCloneContext clone_context_;
absl::flat_hash_map<const HloComputation*,
std::unique_ptr<HloComputation::Builder>>
old_computations_to_builders_;
absl::flat_hash_map<const HloComputation*, int> parameter_numbers_;
absl::flat_hash_set<const HloInstruction*>* boundary_;
ExtractSelector extract_selector_;
ReplaceTypeSelector replace_type_selector_;
std::vector<HloInstruction*> extra_created_instructions_;
};
void ComputeBoundary(const HloInstruction* root, int64_t limit,
absl::flat_hash_set<const HloInstruction*>* boundary) {
std::deque<const HloInstruction*> worklist;
absl::flat_hash_map<const HloInstruction*, int64_t> visited;
worklist.push_back(root);
visited.emplace(root, 0);
while (!worklist.empty()) {
auto hlo = worklist.front();
worklist.pop_front();
int64_t hops = visited[hlo];
if (hops > limit) {
boundary->insert(hlo);
continue;
}
for (const HloInstruction* operand : hlo->operands()) {
if (visited.count(operand)) {
continue;
}
worklist.push_back(operand);
visited.emplace(operand, hops + 1);
}
}
}
}
std::unique_ptr<HloModule> ExtractModule(
const HloInstruction* instruction, int64_t height,
ExtractSelector extract_selector, ReplaceTypeSelector replace_type_selector,
bool cross_computation) {
QCHECK(height == -1 || !cross_computation)
<< "Boundary cannnot be calculated across the computations.";
absl::flat_hash_set<const HloInstruction*> boundary;
if (height != -1) {
ComputeBoundary(instruction, height, &boundary);
}
ExtractionVisitor visitor(instruction, &boundary, extract_selector,
replace_type_selector);
TF_CHECK_OK(instruction->Accept(&visitor, true,
false,
cross_computation));
ExtractionVisitor cleanup_visitor(
visitor.module()->entry_computation()->root_instruction(),
nullptr,
nullptr,
nullptr);
TF_CHECK_OK(visitor.module()->entry_computation()->root_instruction()->Accept(
&cleanup_visitor, true,
false,
false));
HloVerifier verifier(false,
true);
TF_CHECK_OK(verifier.Run(cleanup_visitor.module()).status());
return cleanup_visitor.ConsumeModule();
}
} | #include "xla/tools/hlo_extractor.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace op = testing::opcode_matchers;
using HloExtractorTest = HloTestBase;
TEST_F(HloExtractorTest, ExtractTopLevel) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
param.0 = f32[4]{0} parameter(0)
negate = f32[4]{0} negate(f32[4]{0} param.0)
ROOT exp = f32[4]{0} exponential(f32[4]{0} negate)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "exp"));
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Exp(op::Negate(op::Parameter(0))));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "exp"), 0);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Exp(op::Parameter(0)));
}
{
auto extracted_module = ExtractModule(
FindInstruction(hlo_module.get(), "negate"), 0);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Negate(op::Parameter(0)));
}
}
TEST_F(HloExtractorTest, ExtractDag) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
param.0 = f32[4]{0} parameter(0)
tanh = f32[4]{0} tanh(f32[4]{0} param.0)
negate = f32[4]{0} negate(f32[4]{0} tanh)
exp = f32[4]{0} exponential(f32[4]{0} negate)
ROOT add = f32[4]{0} add(f32[4]{0} negate, f32[4]{0} exp)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "exp"));
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Exp(op::Negate(op::Tanh(op::Parameter(0)))));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 0);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Parameter(0), op::Parameter(1)));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Negate(op::Parameter(0)),
op::Exp(op::Negate(op::Parameter(0)))));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 2);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Negate(op::Tanh(op::Parameter(0))),
op::Exp(op::Negate(op::Tanh(op::Parameter(0))))));
}
}
TEST_F(HloExtractorTest, ExtractWithConstant) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
p = f32[4]{0} parameter(0)
tanh = f32[4]{0} tanh(p)
c = f32[4]{0} constant({1, 2, 3, 4})
ROOT add = f32[4]{0} add(tanh, c)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 0);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Parameter(0), op::Parameter(1)));
}
{
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Tanh(op::Parameter(0)), op::Constant()));
}
}
TEST_F(HloExtractorTest, ExtractFromMultipleComputation) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.1 = f32[] constant(1)
c.2 = f32[] constant(2)
add.0 = f32[] add(c.1, c.2)
c.3 = f32[] constant(4)
ROOT ret = f32[] subtract(add.0, c.3)
}
ENTRY axpy_computation {
alpha = f32[] call(), to_apply=calculate_alpha
broadcast = f32[10] broadcast(alpha), dimensions={}
x = f32[10] parameter(0)
ax = f32[10] multiply(broadcast, x)
y = f32[10] parameter(1)
ROOT add.1 = f32[10] add(ax, y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* inst = FindInstruction(hlo_module.get(), "add.0");
EXPECT_THAT(inst, op::Add());
auto extract_selector = [&inst](const HloInstruction* hlo_inst) {
return hlo_inst != inst;
};
{
auto replace_type_selector = [](const HloInstruction* hlo_inst) {
return ReplaceType::kReplaceConst;
};
auto extracted_module =
ExtractModule(hlo_module->entry_computation()->root_instruction(),
-1, extract_selector,
replace_type_selector,
true);
EXPECT_EQ(extracted_module->computation_count(), 2);
auto calculate_alpha_root_instruction =
FindComputation(extracted_module.get(), "calculate_alpha")
->root_instruction();
EXPECT_THAT(calculate_alpha_root_instruction,
op::Subtract(op::Constant(), op::Constant()));
}
{
auto replace_type_selector = [](const HloInstruction* hlo_inst) {
return ReplaceType::kReplaceZeroBroadcast;
};
auto extracted_module =
ExtractModule(hlo_module->entry_computation()->root_instruction(),
-1, extract_selector,
replace_type_selector,
true);
EXPECT_EQ(extracted_module->computation_count(), 2);
auto calculate_alpha_root_instruction =
FindComputation(extracted_module.get(), "calculate_alpha")
->root_instruction();
EXPECT_THAT(calculate_alpha_root_instruction,
op::Subtract(op::Broadcast(op::Constant()), op::Constant()));
}
}
TEST_F(HloExtractorTest, HloSelector) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.1 = f32[] constant(1)
c.2 = f32[] constant(2)
c.3 = f32[] add(c.1, c.2)
c.4 = f32[] constant(4)
ROOT ret = f32[] multiply(c.4, c.3)
}
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] call(), to_apply=calculate_alpha
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* inst =
FindInstruction(hlo_module.get(), HloOpcode::kSubtract);
EXPECT_NE(inst, nullptr);
EXPECT_THAT(inst, op::Subtract(op::Multiply(), op::Add()));
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kCall;
};
auto extracted_module = ExtractModule(inst, -1, hlo_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(op::Broadcast(op::Parameter()),
op::Parameter()),
op::Add(op::Parameter(), op::Parameter())));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kBroadcast;
};
auto extracted_module = ExtractModule(inst, 2, hlo_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(op::Parameter(), op::Parameter()),
op::Add(op::Parameter(), op::Parameter())));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kBroadcast;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceConst;
};
auto extracted_module =
ExtractModule(inst, 2, hlo_selector, replace_type_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(op::Constant(), op::Parameter()),
op::Add(op::Parameter(), op::Parameter())));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kAdd;
};
auto extracted_module = ExtractModule(inst, -1, hlo_selector);
EXPECT_EQ(extracted_module->computation_count(), 2);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kSubtract;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceConst;
};
auto extracted_module =
ExtractModule(hlo_module->entry_computation()->root_instruction(),
2, hlo_selector, replace_type_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Constant(), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
if (hlo_inst->opcode() != HloOpcode::kBroadcast &&
hlo_inst->opcode() != HloOpcode::kAdd) {
return true;
}
return false;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
if (hlo_inst->opcode() == HloOpcode::kBroadcast) {
return ReplaceType::kReplaceConst;
}
return ReplaceType::kReplaceParam;
};
auto extracted_module =
ExtractModule(inst, 2, hlo_selector, replace_type_selector);
EXPECT_EQ(extracted_module->computation_count(), 1);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Subtract(op::Multiply(op::Constant(), op::Parameter()),
op::Parameter()));
}
}
TEST_F(HloExtractorTest, ReplaceTupleWithConstant) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
param.0 = f32[4]{0} parameter(0)
tuple.0 = (f32[4]{0}, f32[4]{0}) rng-bit-generator(f32[4]{0} param.0), algorithm=rng_default
negate = f32[4]{0} negate(f32[4]{0} param.0)
tuple.1 = ((f32[4]{0}, f32[4]{0}), f32[4]{0}) tuple(tuple.0, negate)
element = f32[4]{0} get-tuple-element(((f32[4]{0}, f32[4]{0}), f32[4]{0}) tuple.1), index=1
ROOT add = f32[4]{0} add(element, param.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kTuple;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceConst;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::GetTupleElement(op::Constant()), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kGetTupleElement;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceZeroBroadcast;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Broadcast(), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kGetTupleElement;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceRandomBroadcast;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
op::Add(op::Broadcast(), op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kTuple;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceZeroBroadcast;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(
extracted_module->entry_computation()->root_instruction(),
op::Add(op::GetTupleElement(op::Tuple(op::Tuple(), op::Broadcast())),
op::Parameter()));
}
{
auto hlo_selector = [](const HloInstruction* hlo_inst) -> bool {
return hlo_inst->opcode() != HloOpcode::kTuple;
};
auto replace_type_selector =
[](const HloInstruction* hlo_inst) -> ReplaceType {
return ReplaceType::kReplaceRandomBroadcast;
};
auto extracted_module =
ExtractModule(FindInstruction(hlo_module.get(), "add"),
-1, hlo_selector, replace_type_selector);
EXPECT_THAT(
extracted_module->entry_computation()->root_instruction(),
op::Add(op::GetTupleElement(op::Tuple(op::Tuple(), op::Broadcast())),
op::Parameter()));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_extractor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tools/hlo_extractor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
089abd1e-d29d-43a7-89fe-1428446e1f3b | cpp | tensorflow/tensorflow | densify | tensorflow/lite/kernels/densify.cc | tensorflow/lite/kernels/densify_test.cc | #include "tensorflow/lite/kernels/internal/reference/densify.h"
#include <stddef.h>
#include <cstdint>
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace densify {
struct OpContext {
OpContext(TfLiteContext* context, TfLiteNode* node) {
input = GetInput(context, node, 0);
output = GetOutput(context, node, 0);
}
const TfLiteTensor* input;
TfLiteTensor* output;
};
struct OpData {
bool dense_weights_initialized;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* op_data = new OpData();
op_data->dense_weights_initialized = false;
return op_data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpContext op_context(context, node);
TF_LITE_ENSURE(context, op_context.input->type != kTfLiteString);
TF_LITE_ENSURE(context, IsConstantTensor(op_context.input));
TF_LITE_ENSURE(context, op_context.input->sparsity != nullptr);
op_context.output->type = op_context.input->type;
op_context.output->name = "Densify_output";
op_context.output->allocation_type = kTfLiteArenaRwPersistent;
return context->ResizeTensor(context, op_context.output,
TfLiteIntArrayCopy(op_context.input->dims));
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* op_data = reinterpret_cast<OpData*>(node->user_data);
OpContext op_context(context, node);
if (op_data->dense_weights_initialized) {
return kTfLiteOk;
}
switch (op_context.input->type) {
case kTfLiteFloat32:
reference_ops::Densify(op_context.input->sparsity,
GetTensorShape(op_context.input),
GetTensorData<float>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<float>(op_context.output), context);
break;
case kTfLiteFloat16:
reference_ops::Densify(
op_context.input->sparsity, GetTensorShape(op_context.input),
GetTensorData<Eigen::half>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<Eigen::half>(op_context.output), context);
break;
case kTfLiteInt8:
reference_ops::Densify(op_context.input->sparsity,
GetTensorShape(op_context.input),
GetTensorData<int8_t>(op_context.input),
GetTensorShape(op_context.output),
GetTensorData<int8_t>(op_context.output), context);
break;
default:
TF_LITE_KERNEL_LOG(context, "Type %d not supported.",
op_context.input->type);
return kTfLiteError;
}
op_data->dense_weights_initialized = true;
return kTfLiteOk;
}
}
TfLiteRegistration* Register_DENSIFY() {
static TfLiteRegistration r = {densify::Init, densify::Free, densify::Prepare,
densify::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/memory/memory.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_DENSIFY();
}
}
namespace {
using ::testing::ElementsAreArray;
template <typename T>
class DensifyOpModel : public SingleOpModel {
public:
DensifyOpModel(const TensorData& input, const std::vector<T>& input_data,
int version = 1) {
input_ = AddConstSparseInput(input, input_data);
output_ = AddOutput({input.type, input.shape});
SetBuiltinOp(BuiltinOperator_DENSIFY, BuiltinOptions_DensifyOptions,
CreateDensifyOptions(builder_).Union());
resolver_ = std::make_unique<SingleOpResolver>(
BuiltinOperator_DENSIFY, ops::builtin::Register_DENSIFY(), version);
BuildInterpreter({input.shape}, -1,
false,
false, true);
}
std::vector<T> GetInput() { return ExtractVector<T>(input_); }
std::vector<T> GetOutput() { return ExtractVector<T>(output_); }
private:
int input_;
int output_;
};
TEST(DensifyOpTest, Float) {
std::vector<float> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<float> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_FLOAT32;
input.shape = {3, 4};
input.traversal_order = {0, 1};
input.format = {kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<float> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
TEST(DensifyOpTest, Float3D) {
std::vector<float> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<float> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_FLOAT32;
input.shape = {3, 2, 2};
input.traversal_order = {0, 1, 2};
input.format = {kTfLiteDimDense, kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<float> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
TEST(DensifyOpTest, Int8) {
std::vector<int8_t> dense_values = {6, 0, 9, 8, 0, 0, 0, 0, 5, 0, 0, 7};
std::vector<int8_t> sparse_values = {6, 9, 8, 5, 7};
TensorData input = {};
input.type = TensorType_INT8;
input.shape = {3, 4};
input.traversal_order = {0, 1};
input.format = {kTfLiteDimDense, kTfLiteDimSparseCSR};
DensifyOpModel<int8_t> m(input, dense_values);
ASSERT_EQ(m.Invoke(), kTfLiteOk);
EXPECT_THAT(m.GetInput(), ElementsAreArray(sparse_values));
EXPECT_THAT(m.GetOutput(), ElementsAreArray(dense_values));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/densify.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/densify_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7244c624-0159-4253-9a5a-0c37c152e4d2 | cpp | tensorflow/tensorflow | fusion_wrapper | third_party/xla/xla/service/gpu/transforms/fusion_wrapper.cc | third_party/xla/xla/service/gpu/transforms/fusion_wrapper_test.cc | #include "xla/service/gpu/transforms/fusion_wrapper.h"
#include <functional>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> FusionWrapper::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto instructions = module->entry_computation()->MakeInstructionPostOrder();
bool changed = false;
std::function<absl::Status(HloInstruction*)> handle_instruction;
handle_instruction = [&](HloInstruction* instruction) -> absl::Status {
switch (instruction->opcode()) {
case HloOpcode::kConditional:
case HloOpcode::kWhile:
for (auto* computation : instruction->called_computations()) {
for (auto* inner_instruction :
computation->MakeInstructionPostOrder()) {
TF_RETURN_IF_ERROR(handle_instruction(inner_instruction));
}
}
break;
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAnd:
case HloOpcode::kAtan2:
case HloOpcode::kBitcastConvert:
case HloOpcode::kBroadcast:
case HloOpcode::kCeil:
case HloOpcode::kCbrt:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConvolution:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kDivide:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kGather:
case HloOpcode::kImag:
case HloOpcode::kIota:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kMap:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kPad:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReshape:
case HloOpcode::kReduce:
case HloOpcode::kReducePrecision:
case HloOpcode::kReduceWindow:
case HloOpcode::kRemainder:
case HloOpcode::kReverse:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kScatter:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSlice:
case HloOpcode::kSqrt:
case HloOpcode::kSubtract:
case HloOpcode::kStochasticConvert:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTranspose:
case HloOpcode::kXor: {
auto* computation = instruction->parent();
auto* fusion_instruction =
computation->AddInstruction(HloInstruction::CreateFusion(
instruction->shape(),
ChooseFusionKind(*instruction, *instruction), instruction));
const absl::string_view wrapped_opcode =
HloOpcodeString(instruction->opcode());
module->SetAndUniquifyInstrName(
fusion_instruction, absl::StrCat("wrapped_", wrapped_opcode));
module->SetAndUniquifyComputationName(
fusion_instruction->fused_instructions_computation(),
absl::StrCat("wrapped_", wrapped_opcode, "_computation"));
if (module->has_schedule()) {
module->schedule().replace_instruction(computation, instruction,
fusion_instruction);
}
TF_RETURN_IF_ERROR(
fusion_instruction->CopyAllControlDepsFrom(instruction));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(fusion_instruction));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction));
changed = true;
break;
}
default:
break;
}
return absl::OkStatus();
};
for (auto* instruction : instructions) {
TF_RETURN_IF_ERROR(handle_instruction(instruction));
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/fusion_wrapper.h"
#include <optional>
#include <gtest/gtest.h>
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
class FusionWrapperTest : public HloTestBase {};
TEST_F(FusionWrapperTest, ConvolutionWorks) {
RunAndFilecheckHloRewrite(R"(HloModule TestModule
ENTRY TestComputation {
input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0)
kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1)
ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4}
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, SimpleOp) {
RunAndFilecheckHloRewrite(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[30,41] parameter(0)
p1 = f16[30,41] parameter(1)
ROOT result = f16[60, 41] concatenate(p0, p1), dimensions={0}
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, Scatter) {
RunAndFilecheckHloRewrite(R"(
HloModule ScatterIntoScalar
update_s32 {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
parameter.1 = s32[] parameter(0)
parameter.2 = s32[0]{0} parameter(1)
parameter.3 = s32[] parameter(2)
ROOT scatter_ScatterIntoScalar = s32[] scatter(parameter.1, parameter.2, parameter.3),
update_window_dims={},
inserted_window_dims={},
scatter_dims_to_operand_dims={},
index_vector_dim=0,
to_apply=update_s32
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, ControlDependency) {
RunAndFilecheckHloRewrite(R"(
HloModule TestModule
fusion {
ROOT param = f32[] parameter(0)
}
ENTRY main {
param = f32[] parameter(0)
fusion = f32[] fusion(param), kind=kLoop, calls=fusion
constant_one = f32[] constant(1)
ROOT add = f32[] add(param, constant_one), control-predecessors={fusion}
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, While) {
RunAndFilecheckHloRewrite(R"(
HloModule While
%body {
%parameter.5 = (f32[5]{0}) parameter(0)
%constant_8 = f32[] constant(0)
%broadcast.9 = f32[5]{0} broadcast(f32[] %constant_8), dimensions={}
ROOT %tuple.2 = (f32[5]{0}) tuple(f32[5]{0} %broadcast.9)
}
%cond {
%parameter.12 = (f32[5]{0}) parameter(0)
ROOT %constant_1 = pred[] constant(false)
}
ENTRY %main (parameter.1: f32[5]) -> (f32[5]) {
%parameter.1 = f32[5]{0} parameter(0)
%copy.3 = f32[5]{0} copy(f32[5]{0} %parameter.1)
%tuple = (f32[5]{0}) tuple(f32[5]{0} %copy.3)
ROOT %while.19 = (f32[5]{0}) while((f32[5]{0}) %tuple), condition=%cond, body=%body
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, WhileInFusion) {
RunAndFilecheckHloRewrite(R"(
HloModule While
%body {
%parameter.5 = (f32[5]{0}) parameter(0)
%constant_8 = f32[] constant(0)
%broadcast.9 = f32[5]{0} broadcast(f32[] %constant_8), dimensions={}
ROOT %tuple.2 = (f32[5]{0}) tuple(f32[5]{0} %broadcast.9)
}
%cond {
%parameter.12 = (f32[5]{0}) parameter(0)
ROOT %constant_1 = pred[] constant(false)
}
%fusion {
%parameter.1 = f32[5]{0} parameter(0)
%copy.3 = f32[5]{0} copy(f32[5]{0} %parameter.1)
%tuple = (f32[5]{0}) tuple(f32[5]{0} %copy.3)
ROOT %while.19 = (f32[5]{0}) while((f32[5]{0}) %tuple), condition=%cond, body=%body
}
ENTRY %main (parameter.1: f32[5]) -> (f32[5]) {
%parameter.1 = f32[5]{0} parameter(0)
ROOT %fusion = (f32[5]{0}) fusion(f32[5]{0} %parameter.1), kind=kLoop, calls=%fusion
})",
FusionWrapper(),
std::nullopt);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5f08271a-5063-4bbc-afe0-338a7720baa9 | cpp | tensorflow/tensorflow | accuracy_utils | tensorflow/examples/speech_commands/accuracy_utils.cc | tensorflow/examples/speech_commands/accuracy_utils_test.cc | #include "tensorflow/examples/speech_commands/accuracy_utils.h"
#include <fstream>
#include <iomanip>
#include <unordered_set>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/numbers.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
Status ReadGroundTruthFile(const string& file_name,
std::vector<std::pair<string, int64_t>>* result) {
std::ifstream file(file_name);
if (!file) {
return tensorflow::errors::NotFound("Ground truth file '", file_name,
"' not found.");
}
result->clear();
string line;
while (std::getline(file, line)) {
std::vector<string> pieces = tensorflow::str_util::Split(line, ',');
if (pieces.size() != 2) {
continue;
}
float timestamp;
if (!tensorflow::strings::safe_strtof(pieces[1], ×tamp)) {
return tensorflow::errors::InvalidArgument(
"Wrong number format at line: ", line);
}
string label = pieces[0];
auto timestamp_int64 = static_cast<int64_t>(timestamp);
result->push_back({label, timestamp_int64});
}
std::sort(result->begin(), result->end(),
[](const std::pair<string, int64>& left,
const std::pair<string, int64>& right) {
return left.second < right.second;
});
return absl::OkStatus();
}
void CalculateAccuracyStats(
const std::vector<std::pair<string, int64_t>>& ground_truth_list,
const std::vector<std::pair<string, int64_t>>& found_words,
int64_t up_to_time_ms, int64_t time_tolerance_ms,
StreamingAccuracyStats* stats) {
int64_t latest_possible_time;
if (up_to_time_ms == -1) {
latest_possible_time = std::numeric_limits<int64_t>::max();
} else {
latest_possible_time = up_to_time_ms + time_tolerance_ms;
}
stats->how_many_ground_truth_words = 0;
for (const std::pair<string, int64_t>& ground_truth : ground_truth_list) {
const int64_t ground_truth_time = ground_truth.second;
if (ground_truth_time > latest_possible_time) {
break;
}
++stats->how_many_ground_truth_words;
}
stats->how_many_false_positives = 0;
stats->how_many_correct_words = 0;
stats->how_many_wrong_words = 0;
std::unordered_set<int64_t> has_ground_truth_been_matched;
for (const std::pair<string, int64_t>& found_word : found_words) {
const string& found_label = found_word.first;
const int64_t found_time = found_word.second;
const int64_t earliest_time = found_time - time_tolerance_ms;
const int64_t latest_time = found_time + time_tolerance_ms;
bool has_match_been_found = false;
for (const std::pair<string, int64_t>& ground_truth : ground_truth_list) {
const int64_t ground_truth_time = ground_truth.second;
if ((ground_truth_time > latest_time) ||
(ground_truth_time > latest_possible_time)) {
break;
}
if (ground_truth_time < earliest_time) {
continue;
}
const string& ground_truth_label = ground_truth.first;
if ((ground_truth_label == found_label) &&
(has_ground_truth_been_matched.count(ground_truth_time) == 0)) {
++stats->how_many_correct_words;
} else {
++stats->how_many_wrong_words;
}
has_ground_truth_been_matched.insert(ground_truth_time);
has_match_been_found = true;
break;
}
if (!has_match_been_found) {
++stats->how_many_false_positives;
}
}
stats->how_many_ground_truth_matched = has_ground_truth_been_matched.size();
}
void PrintAccuracyStats(const StreamingAccuracyStats& stats) {
if (stats.how_many_ground_truth_words == 0) {
LOG(INFO) << "No ground truth yet, " << stats.how_many_false_positives
<< " false positives";
} else {
float any_match_percentage =
(stats.how_many_ground_truth_matched * 100.0f) /
stats.how_many_ground_truth_words;
float correct_match_percentage = (stats.how_many_correct_words * 100.0f) /
stats.how_many_ground_truth_words;
float wrong_match_percentage = (stats.how_many_wrong_words * 100.0f) /
stats.how_many_ground_truth_words;
float false_positive_percentage =
(stats.how_many_false_positives * 100.0f) /
stats.how_many_ground_truth_words;
LOG(INFO) << std::setprecision(1) << std::fixed << any_match_percentage
<< "% matched, " << correct_match_percentage << "% correctly, "
<< wrong_match_percentage << "% wrongly, "
<< false_positive_percentage << "% false positives ";
}
}
} | #include "tensorflow/examples/speech_commands/accuracy_utils.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
TEST(AccuracyUtilsTest, ReadGroundTruthFile) {
string file_name = tensorflow::io::JoinPath(tensorflow::testing::TmpDir(),
"ground_truth.txt");
string file_data = "a,10\nb,12\n";
TF_ASSERT_OK(WriteStringToFile(Env::Default(), file_name, file_data));
std::vector<std::pair<string, int64_t>> ground_truth;
TF_ASSERT_OK(ReadGroundTruthFile(file_name, &ground_truth));
ASSERT_EQ(2, ground_truth.size());
EXPECT_EQ("a", ground_truth[0].first);
EXPECT_EQ(10, ground_truth[0].second);
EXPECT_EQ("b", ground_truth[1].first);
EXPECT_EQ(12, ground_truth[1].second);
}
TEST(AccuracyUtilsTest, CalculateAccuracyStats) {
StreamingAccuracyStats stats;
CalculateAccuracyStats({{"a", 1000}, {"b", 9000}},
{{"a", 1200}, {"b", 5000}, {"a", 8700}}, 10000, 500,
&stats);
EXPECT_EQ(2, stats.how_many_ground_truth_words);
EXPECT_EQ(2, stats.how_many_ground_truth_matched);
EXPECT_EQ(1, stats.how_many_false_positives);
EXPECT_EQ(1, stats.how_many_correct_words);
EXPECT_EQ(1, stats.how_many_wrong_words);
}
TEST(AccuracyUtilsTest, PrintAccuracyStats) {
StreamingAccuracyStats stats;
PrintAccuracyStats(stats);
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/speech_commands/accuracy_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/examples/speech_commands/accuracy_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d5d262aa-e339-407f-8095-0640e0dd0715 | cpp | tensorflow/tensorflow | offset_counter_helper | tensorflow/python/framework/offset_counter_helper.cc | tensorflow/python/framework/offset_counter_helper_test.cc | #include "tensorflow/python/framework/offset_counter_helper.h"
#include <cstdint>
#include <fstream>
#include <string>
#include "absl/strings/string_view.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/regexp.h"
#include "tsl/platform/strcat.h"
namespace tensorflow {
absl::Status FindOpRegistationFromFile(absl::string_view filename,
OpRegOffsets& op_reg_offsets) {
static constexpr LazyRE2 reg_pattern = {
R"regex((REGISTER_OP)\("([\w>]+)"\))regex"};
std::ifstream f(std::string{filename});
if (f.bad()) {
return tsl::errors::IOError(
tsl::strings::StrCat("Cannot open file: ", filename), errno);
}
std::string line;
absl::string_view reg_keyword, op_name;
uint32_t offsets = 0;
while (std::getline(f, line)) {
if (RE2::PartialMatch(line, *reg_pattern, ®_keyword, &op_name)) {
uint32_t offset_start = offsets + (op_name.data() - line.data() - 1);
uint32_t offset_end = offset_start + op_name.size() + 2;
auto op_reg_offset = op_reg_offsets.add_offsets();
op_reg_offset->set_name(std::string{op_name});
op_reg_offset->set_filepath(std::string{filename});
op_reg_offset->set_start(offset_start);
op_reg_offset->set_end(offset_end);
}
offsets += line.size() + 1;
}
f.close();
return absl::OkStatus();
}
} | #include "tensorflow/python/framework/offset_counter_helper.h"
#include <string>
#include "absl/strings/str_format.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/python/framework/op_reg_offset.pb.h"
namespace tensorflow {
namespace {
TEST(OffsetCounterHelper, FindOpRegistationFromFile) {
std::string content = R"code(
REGISTER_OP("Test>Op1");
REGISTER_OP("Test>Op2")
.Input("input: int32")
.Output("output: int32");
)code";
Env* env = Env::Default();
string fname;
ASSERT_TRUE(env->LocalTempFilename(&fname));
TF_ASSERT_OK(WriteStringToFile(env, fname, content));
OpRegOffsets actual;
TF_CHECK_OK(FindOpRegistationFromFile(fname, actual));
EXPECT_EQ(actual.offsets(0).name(), "Test>Op1");
EXPECT_EQ(actual.offsets(0).filepath(), fname);
EXPECT_EQ(actual.offsets(0).start(), 13);
EXPECT_EQ(actual.offsets(0).end(), 23);
EXPECT_EQ(actual.offsets(1).name(), "Test>Op2");
EXPECT_EQ(actual.offsets(1).filepath(), fname);
EXPECT_EQ(actual.offsets(1).start(), 38);
EXPECT_EQ(actual.offsets(1).end(), 48);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/offset_counter_helper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/python/framework/offset_counter_helper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6a0d1eee-2a26-44cb-8754-0448e35469d2 | cpp | tensorflow/tensorflow | requantization_range_op | tensorflow/core/kernels/requantization_range_op.cc | tensorflow/core/kernels/requantization_range_op_test.cc | #define EIGEN_USE_THREADS
#include <math.h>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/type_traits.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/quantization_utils.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
void CalculateUsedRange(const Tensor& input, qint32* used_min_quantized,
qint32* used_max_quantized) {
auto input_array = input.flat<qint32>();
Eigen::Tensor<qint32, 0, Eigen::RowMajor> min = input_array.minimum();
Eigen::Tensor<qint32, 0, Eigen::RowMajor> max = input_array.maximum();
*used_min_quantized = min();
*used_max_quantized = max();
}
class RequantizationRangeOp : public OpKernel {
public:
explicit RequantizationRangeOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& input = ctx->input(0);
OP_REQUIRES(ctx, ctx->input(1).NumElements() > 0,
errors::InvalidArgument("Input min must not be empty."));
OP_REQUIRES(ctx, ctx->input(2).NumElements() > 0,
errors::InvalidArgument("Input max must not be empty."));
const float input_min_float = ctx->input(1).flat<float>()(0);
const float input_max_float = ctx->input(2).flat<float>()(0);
Tensor* output_min = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &output_min));
Tensor* output_max = nullptr;
OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_max));
qint32 used_min_quantized;
qint32 used_max_quantized;
CalculateUsedRange(input, &used_min_quantized, &used_max_quantized);
const float used_min_float = std::min(
0.0f,
QuantizedToFloat(used_min_quantized, input_min_float, input_max_float));
const float used_max_float =
QuantizedToFloat(used_max_quantized, input_min_float, input_max_float);
output_min->flat<float>().setConstant(used_min_float);
output_max->flat<float>().setConstant(used_max_float);
}
};
REGISTER_KERNEL_BUILDER(Name("RequantizationRange")
.Device(DEVICE_CPU)
.TypeConstraint<qint32>("Tinput"),
RequantizationRangeOp);
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
void CalculateUsedRange(const Tensor& input, qint32* actual_min_quantized,
qint32* actual_max_quantized);
class RequantizationRangeTest : public OpsTestBase {
protected:
};
TEST_F(RequantizationRangeTest, HandCrafted) {
TF_ASSERT_OK(NodeDefBuilder("requantization_range", "RequantizationRange")
.Input(FakeInput(DT_QINT32))
.Input(FakeInput(DT_FLOAT))
.Input(FakeInput(DT_FLOAT))
.Attr("Tinput", DataTypeToEnum<qint32>::v())
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
const int value_count = 3;
AddInputFromArray<qint32>(TensorShape({value_count}),
{-(1 << 23), 0, (1 << 23)});
AddInputFromArray<float>(TensorShape({1}), {-256.0f});
AddInputFromArray<float>(TensorShape({1}), {256.0f});
TF_ASSERT_OK(RunOpKernel());
Tensor expected_min(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_min, {-1.0f});
test::ExpectTensorEqual<float>(expected_min, *GetOutput(0));
Tensor expected_max(allocator(), DT_FLOAT, TensorShape({}));
test::FillValues<float>(&expected_max, {1.0f});
test::ExpectTensorEqual<float>(expected_max, *GetOutput(1));
}
static void BM_RequantizationRange(::testing::benchmark::State& state) {
const int size = state.range(0);
Tensor quantized_tensor(DT_QINT32, TensorShape({1, size}));
test::FillFn<qint32>(&quantized_tensor, [](int n) { return qint32(n); });
qint32 actual_min;
qint32 actual_max;
for (auto s : state) {
CalculateUsedRange(quantized_tensor, &actual_min, &actual_max);
}
state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * size);
state.SetBytesProcessed(static_cast<int64_t>(state.iterations()) * size * 4);
}
BENCHMARK(BM_RequantizationRange)
->UseRealTime()
->Arg(100)
->Arg(1000)
->Arg(10000)
->Arg(100000)
->Arg(1000000)
->Arg(10000000)
->Arg(100000000);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/requantization_range_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/requantization_range_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1d2091bf-92ce-4eb9-be25-2a47e7d5068d | cpp | abseil/abseil-cpp | charconv | absl/strings/charconv.cc | absl/strings/charconv_test.cc | #include "absl/strings/charconv.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <system_error>
#include "absl/base/casts.h"
#include "absl/base/config.h"
#include "absl/base/nullability.h"
#include "absl/numeric/bits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/internal/charconv_bigint.h"
#include "absl/strings/internal/charconv_parse.h"
#ifdef ABSL_BIT_PACK_FLOATS
#error ABSL_BIT_PACK_FLOATS cannot be directly set
#elif defined(__x86_64__) || defined(_M_X64)
#define ABSL_BIT_PACK_FLOATS 1
#endif
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
template <typename FloatType>
struct FloatTraits;
template <>
struct FloatTraits<double> {
using mantissa_t = uint64_t;
static constexpr int kTargetBits = 64;
static constexpr int kTargetExponentBits = 11;
static constexpr int kTargetMantissaBits = 53;
static constexpr int kMaxExponent = 971;
static constexpr int kMinNormalExponent = -1074;
static constexpr int kExponentBias = 1023;
static constexpr int kEiselLemireShift = 9;
static constexpr uint64_t kEiselLemireMask = uint64_t{0x1FF};
static constexpr int kEiselLemireMinInclusiveExp10 = -324 - 18;
static constexpr int kEiselLemireMaxExclusiveExp10 = 309;
static double MakeNan(absl::Nonnull<const char*> tagp) {
#if ABSL_HAVE_BUILTIN(__builtin_nan)
return __builtin_nan(tagp);
#else
using namespace std;
return nan(tagp);
#endif
}
static double Make(mantissa_t mantissa, int exponent, bool sign) {
#ifndef ABSL_BIT_PACK_FLOATS
using namespace std;
return sign ? -ldexp(mantissa, exponent) : ldexp(mantissa, exponent);
#else
constexpr uint64_t kMantissaMask =
(uint64_t{1} << (kTargetMantissaBits - 1)) - 1;
uint64_t dbl = static_cast<uint64_t>(sign) << 63;
if (mantissa > kMantissaMask) {
dbl += static_cast<uint64_t>(exponent + 1023 + kTargetMantissaBits - 1)
<< 52;
mantissa &= kMantissaMask;
} else {
assert(exponent == kMinNormalExponent);
}
dbl += mantissa;
return absl::bit_cast<double>(dbl);
#endif
}
};
template <>
struct FloatTraits<float> {
using mantissa_t = uint32_t;
static constexpr int kTargetBits = 32;
static constexpr int kTargetExponentBits = 8;
static constexpr int kTargetMantissaBits = 24;
static constexpr int kMaxExponent = 104;
static constexpr int kMinNormalExponent = -149;
static constexpr int kExponentBias = 127;
static constexpr int kEiselLemireShift = 38;
static constexpr uint64_t kEiselLemireMask = uint64_t{0x3FFFFFFFFF};
static constexpr int kEiselLemireMinInclusiveExp10 = -46 - 18;
static constexpr int kEiselLemireMaxExclusiveExp10 = 39;
static float MakeNan(absl::Nonnull<const char*> tagp) {
#if ABSL_HAVE_BUILTIN(__builtin_nanf)
return __builtin_nanf(tagp);
#else
using namespace std;
return std::nanf(tagp);
#endif
}
static float Make(mantissa_t mantissa, int exponent, bool sign) {
#ifndef ABSL_BIT_PACK_FLOATS
using namespace std;
return sign ? -ldexpf(mantissa, exponent) : ldexpf(mantissa, exponent);
#else
constexpr uint32_t kMantissaMask =
(uint32_t{1} << (kTargetMantissaBits - 1)) - 1;
uint32_t flt = static_cast<uint32_t>(sign) << 31;
if (mantissa > kMantissaMask) {
flt += static_cast<uint32_t>(exponent + 127 + kTargetMantissaBits - 1)
<< 23;
mantissa &= kMantissaMask;
} else {
assert(exponent == kMinNormalExponent);
}
flt += mantissa;
return absl::bit_cast<float>(flt);
#endif
}
};
extern const uint64_t kPower10MantissaHighTable[];
extern const uint64_t kPower10MantissaLowTable[];
constexpr int kPower10TableMinInclusive = -342;
constexpr int kPower10TableMaxExclusive = 309;
uint64_t Power10Mantissa(int n) {
return kPower10MantissaHighTable[n - kPower10TableMinInclusive];
}
int Power10Exponent(int n) {
return (217706 * n >> 16) - 63;
}
bool Power10Overflow(int n) { return n >= kPower10TableMaxExclusive; }
bool Power10Underflow(int n) { return n < kPower10TableMinInclusive; }
bool Power10Exact(int n) { return n >= 0 && n <= 27; }
constexpr int kOverflow = 99999;
constexpr int kUnderflow = -99999;
struct CalculatedFloat {
uint64_t mantissa = 0;
int exponent = 0;
};
int BitWidth(uint128 value) {
if (Uint128High64(value) == 0) {
return static_cast<int>(bit_width(Uint128Low64(value)));
}
return 128 - countl_zero(Uint128High64(value));
}
template <typename FloatType>
int NormalizedShiftSize(int mantissa_width, int binary_exponent) {
const int normal_shift =
mantissa_width - FloatTraits<FloatType>::kTargetMantissaBits;
const int minimum_shift =
FloatTraits<FloatType>::kMinNormalExponent - binary_exponent;
return std::max(normal_shift, minimum_shift);
}
int TruncateToBitWidth(int bit_width, absl::Nonnull<uint128*> value) {
const int current_bit_width = BitWidth(*value);
const int shift = current_bit_width - bit_width;
*value >>= shift;
return shift;
}
template <typename FloatType>
bool HandleEdgeCase(const strings_internal::ParsedFloat& input, bool negative,
absl::Nonnull<FloatType*> value) {
if (input.type == strings_internal::FloatType::kNan) {
constexpr ptrdiff_t kNanBufferSize = 128;
#if (defined(__GNUC__) && !defined(__clang__)) || \
(defined(__clang__) && __clang_major__ < 7)
volatile char n_char_sequence[kNanBufferSize];
#else
char n_char_sequence[kNanBufferSize];
#endif
if (input.subrange_begin == nullptr) {
n_char_sequence[0] = '\0';
} else {
ptrdiff_t nan_size = input.subrange_end - input.subrange_begin;
nan_size = std::min(nan_size, kNanBufferSize - 1);
std::copy_n(input.subrange_begin, nan_size, n_char_sequence);
n_char_sequence[nan_size] = '\0';
}
char* nan_argument = const_cast<char*>(n_char_sequence);
*value = negative ? -FloatTraits<FloatType>::MakeNan(nan_argument)
: FloatTraits<FloatType>::MakeNan(nan_argument);
return true;
}
if (input.type == strings_internal::FloatType::kInfinity) {
*value = negative ? -std::numeric_limits<FloatType>::infinity()
: std::numeric_limits<FloatType>::infinity();
return true;
}
if (input.mantissa == 0) {
*value = negative ? -0.0 : 0.0;
return true;
}
return false;
}
template <typename FloatType>
void EncodeResult(const CalculatedFloat& calculated, bool negative,
absl::Nonnull<absl::from_chars_result*> result,
absl::Nonnull<FloatType*> value) {
if (calculated.exponent == kOverflow) {
result->ec = std::errc::result_out_of_range;
*value = negative ? -std::numeric_limits<FloatType>::max()
: std::numeric_limits<FloatType>::max();
return;
} else if (calculated.mantissa == 0 || calculated.exponent == kUnderflow) {
result->ec = std::errc::result_out_of_range;
*value = negative ? -0.0 : 0.0;
return;
}
*value = FloatTraits<FloatType>::Make(
static_cast<typename FloatTraits<FloatType>::mantissa_t>(
calculated.mantissa),
calculated.exponent, negative);
}
uint64_t ShiftRightAndRound(uint128 value, int shift, bool input_exact,
absl::Nonnull<bool*> output_exact) {
if (shift <= 0) {
*output_exact = input_exact;
return static_cast<uint64_t>(value << -shift);
}
if (shift >= 128) {
*output_exact = true;
return 0;
}
*output_exact = true;
const uint128 shift_mask = (uint128(1) << shift) - 1;
const uint128 halfway_point = uint128(1) << (shift - 1);
const uint128 shifted_bits = value & shift_mask;
value >>= shift;
if (shifted_bits > halfway_point) {
return static_cast<uint64_t>(value + 1);
}
if (shifted_bits == halfway_point) {
if ((value & 1) == 1 || !input_exact) {
++value;
}
return static_cast<uint64_t>(value);
}
if (!input_exact && shifted_bits == halfway_point - 1) {
*output_exact = false;
}
return static_cast<uint64_t>(value);
}
bool MustRoundUp(uint64_t guess_mantissa, int guess_exponent,
const strings_internal::ParsedFloat& parsed_decimal) {
absl::strings_internal::BigUnsigned<84> exact_mantissa;
int exact_exponent = exact_mantissa.ReadFloatMantissa(parsed_decimal, 768);
guess_mantissa = guess_mantissa * 2 + 1;
guess_exponent -= 1;
absl::strings_internal::BigUnsigned<84>& lhs = exact_mantissa;
int comparison;
if (exact_exponent >= 0) {
lhs.MultiplyByFiveToTheNth(exact_exponent);
absl::strings_internal::BigUnsigned<84> rhs(guess_mantissa);
if (exact_exponent > guess_exponent) {
lhs.ShiftLeft(exact_exponent - guess_exponent);
} else {
rhs.ShiftLeft(guess_exponent - exact_exponent);
}
comparison = Compare(lhs, rhs);
} else {
absl::strings_internal::BigUnsigned<84> rhs =
absl::strings_internal::BigUnsigned<84>::FiveToTheNth(-exact_exponent);
rhs.MultiplyBy(guess_mantissa);
if (exact_exponent > guess_exponent) {
lhs.ShiftLeft(exact_exponent - guess_exponent);
} else {
rhs.ShiftLeft(guess_exponent - exact_exponent);
}
comparison = Compare(lhs, rhs);
}
if (comparison < 0) {
return false;
} else if (comparison > 0) {
return true;
} else {
return (guess_mantissa & 2) == 2;
}
}
template <typename FloatType>
CalculatedFloat CalculatedFloatFromRawValues(uint64_t mantissa, int exponent) {
CalculatedFloat result;
if (mantissa == uint64_t{1} << FloatTraits<FloatType>::kTargetMantissaBits) {
mantissa >>= 1;
exponent += 1;
}
if (exponent > FloatTraits<FloatType>::kMaxExponent) {
result.exponent = kOverflow;
} else if (mantissa == 0) {
result.exponent = kUnderflow;
} else {
result.exponent = exponent;
result.mantissa = mantissa;
}
return result;
}
template <typename FloatType>
CalculatedFloat CalculateFromParsedHexadecimal(
const strings_internal::ParsedFloat& parsed_hex) {
uint64_t mantissa = parsed_hex.mantissa;
int exponent = parsed_hex.exponent;
int mantissa_width = static_cast<int>(bit_width(mantissa));
const int shift = NormalizedShiftSize<FloatType>(mantissa_width, exponent);
bool result_exact;
exponent += shift;
mantissa = ShiftRightAndRound(mantissa, shift,
true, &result_exact);
return CalculatedFloatFromRawValues<FloatType>(mantissa, exponent);
}
template <typename FloatType>
CalculatedFloat CalculateFromParsedDecimal(
const strings_internal::ParsedFloat& parsed_decimal) {
CalculatedFloat result;
if (Power10Underflow(parsed_decimal.exponent)) {
result.exponent = kUnderflow;
return result;
} else if (Power10Overflow(parsed_decimal.exponent)) {
result.exponent = kOverflow;
return result;
}
uint128 wide_binary_mantissa = parsed_decimal.mantissa;
wide_binary_mantissa *= Power10Mantissa(parsed_decimal.exponent);
int binary_exponent = Power10Exponent(parsed_decimal.exponent);
bool mantissa_exact;
int mantissa_width;
if (parsed_decimal.subrange_begin) {
mantissa_width = 58;
mantissa_exact = false;
binary_exponent +=
TruncateToBitWidth(mantissa_width, &wide_binary_mantissa);
} else if (!Power10Exact(parsed_decimal.exponent)) {
mantissa_width = 63;
mantissa_exact = false;
binary_exponent +=
TruncateToBitWidth(mantissa_width, &wide_binary_mantissa);
} else {
mantissa_width = BitWidth(wide_binary_mantissa);
mantissa_exact = true;
}
const int shift =
NormalizedShiftSize<FloatType>(mantissa_width, binary_exponent);
bool result_exact;
binary_exponent += shift;
uint64_t binary_mantissa = ShiftRightAndRound(wide_binary_mantissa, shift,
mantissa_exact, &result_exact);
if (!result_exact) {
if (MustRoundUp(binary_mantissa, binary_exponent, parsed_decimal)) {
binary_mantissa += 1;
}
}
return CalculatedFloatFromRawValues<FloatType>(binary_mantissa,
binary_exponent);
}
template <typename FloatType>
bool EiselLemire(const strings_internal::ParsedFloat& input, bool negative,
absl::Nonnull<FloatType*> value,
absl::Nonnull<std::errc*> ec) {
uint64_t man = input.mantissa;
int exp10 = input.exponent;
if (exp10 < FloatTraits<FloatType>::kEiselLemireMinInclusiveExp10) {
*value = negative ? -0.0 : 0.0;
*ec = std::errc::result_out_of_range;
return true;
} else if (exp10 >= FloatTraits<FloatType>::kEiselLemireMaxExclusiveExp10) {
*value = negative ? -std::numeric_limits<FloatType>::max()
: std::numeric_limits<FloatType>::max();
*ec = std::errc::result_out_of_range;
return true;
}
static_assert(
FloatTraits<FloatType>::kEiselLemireMinInclusiveExp10 >=
kPower10TableMinInclusive,
"(exp10-kPower10TableMinInclusive) in kPower10MantissaHighTable bounds");
static_assert(
FloatTraits<FloatType>::kEiselLemireMaxExclusiveExp10 <=
kPower10TableMaxExclusive,
"(exp10-kPower10TableMinInclusive) in kPower10MantissaHighTable bounds");
int clz = countl_zero(man);
man <<= static_cast<unsigned int>(clz);
uint64_t ret_exp2 =
static_cast<uint64_t>((217706 * exp10 >> 16) + 64 +
FloatTraits<FloatType>::kExponentBias - clz);
uint128 x = static_cast<uint128>(man) *
static_cast<uint128>(
kPower10MantissaHighTable[exp10 - kPower10TableMinInclusive]);
static constexpr uint64_t high64_mask =
FloatTraits<FloatType>::kEiselLemireMask;
if (((Uint128High64(x) & high64_mask) == high64_mask) &&
(man > (std::numeric_limits<uint64_t>::max() - Uint128Low64(x)))) {
uint128 y =
static_cast<uint128>(man) *
static_cast<uint128>(
kPower10MantissaLowTable[exp10 - kPower10TableMinInclusive]);
x += Uint128High64(y);
if (((Uint128High64(x) & high64_mask) == high64_mask) &&
((Uint128Low64(x) + 1) == 0) &&
(man > (std::numeric_limits<uint64_t>::max() - Uint128Low64(y)))) {
return false;
}
}
uint64_t msb = Uint128High64(x) >> 63;
uint64_t ret_man =
Uint128High64(x) >> (msb + FloatTraits<FloatType>::kEiselLemireShift);
ret_exp2 -= 1 ^ msb;
if ((Uint128Low64(x) == 0) && ((Uint128High64(x) & high64_mask) == 0) &&
((ret_man & 3) == 1)) {
return false;
}
ret_man += ret_man & 1;
ret_man >>= 1;
if ((ret_man >> FloatTraits<FloatType>::kTargetMantissaBits) > 0) {
ret_exp2 += 1;
}
static constexpr uint64_t max_exp2 =
(1 << FloatTraits<FloatType>::kTargetExponentBits) - 1;
if ((ret_exp2 - 1) >= (max_exp2 - 1)) {
return false;
}
#ifndef ABSL_BIT_PACK_FLOATS
if (FloatTraits<FloatType>::kTargetBits == 64) {
*value = FloatTraits<FloatType>::Make(
(ret_man & 0x000FFFFFFFFFFFFFu) | 0x0010000000000000u,
static_cast<int>(ret_exp2) - 1023 - 52, negative);
return true;
} else if (FloatTraits<FloatType>::kTargetBits == 32) {
*value = FloatTraits<FloatType>::Make(
(static_cast<uint32_t>(ret_man) & 0x007FFFFFu) | 0x00800000u,
static_cast<int>(ret_exp2) - 127 - 23, negative);
return true;
}
#else
if (FloatTraits<FloatType>::kTargetBits == 64) {
uint64_t ret_bits = (ret_exp2 << 52) | (ret_man & 0x000FFFFFFFFFFFFFu);
if (negative) {
ret_bits |= 0x8000000000000000u;
}
*value = absl::bit_cast<double>(ret_bits);
return true;
} else if (FloatTraits<FloatType>::kTargetBits == 32) {
uint32_t ret_bits = (static_cast<uint32_t>(ret_exp2) << 23) |
(static_cast<uint32_t>(ret_man) & 0x007FFFFFu);
if (negative) {
ret_bits |= 0x80000000u;
}
*value = absl::bit_cast<float>(ret_bits);
return true;
}
#endif
return false;
}
template <typename FloatType>
from_chars_result FromCharsImpl(absl::Nonnull<const char*> first,
absl::Nonnull<const char*> last,
FloatType& value, chars_format fmt_flags) {
from_chars_result result;
result.ptr = first;
result.ec = std::errc();
bool negative = false;
if (first != last && *first == '-') {
++first;
negative = true;
}
if ((fmt_flags & chars_format::hex) == chars_format{} && last - first >= 2 &&
*first == '0' && (first[1] == 'x' || first[1] == 'X')) {
const char* hex_first = first + 2;
strings_internal::ParsedFloat hex_parse =
strings_internal::ParseFloat<16>(hex_first, last, fmt_flags);
if (hex_parse.end == nullptr ||
hex_parse.type != strings_internal::FloatType::kNumber) {
if (fmt_flags == chars_format::scientific) {
result.ec = std::errc::invalid_argument;
} else {
result.ptr = first + 1;
value = negative ? -0.0 : 0.0;
}
return result;
}
result.ptr = hex_parse.end;
if (HandleEdgeCase(hex_parse, negative, &value)) {
return result;
}
CalculatedFloat calculated =
CalculateFromParsedHexadecimal<FloatType>(hex_parse);
EncodeResult(calculated, negative, &result, &value);
return result;
}
if ((fmt_flags & chars_format::hex) == chars_format::hex) {
strings_internal::ParsedFloat hex_parse =
strings_internal::ParseFloat<16>(first, last, fmt_flags);
if (hex_parse.end == nullptr) {
result.ec = std::errc::invalid_argument;
return result;
}
result.ptr = hex_parse.end;
if (HandleEdgeCase(hex_parse, negative, &value)) {
return result;
}
CalculatedFloat calculated =
CalculateFromParsedHexadecimal<FloatType>(hex_parse);
EncodeResult(calculated, negative, &result, &value);
return result;
} else {
strings_internal::ParsedFloat decimal_parse =
strings_internal::ParseFloat<10>(first, last, fmt_flags);
if (decimal_parse.end == nullptr) {
result.ec = std::errc::invalid_argument;
return result;
}
result.ptr = decimal_parse.end;
if (HandleEdgeCase(decimal_parse, negative, &value)) {
return result;
}
if ((decimal_parse.subrange_begin == nullptr) &&
EiselLemire<FloatType>(decimal_parse, negative, &value, &result.ec)) {
return result;
}
CalculatedFloat calculated =
CalculateFromParsedDecimal<FloatType>(decimal_parse);
EncodeResult(calculated, negative, &result, &value);
return result;
}
}
}
from_chars_result from_chars(absl::Nonnull<const char*> first,
absl::Nonnull<const char*> last, double& value,
chars_format fmt) {
return FromCharsImpl(first, last, value, fmt);
}
from_chars_result from_chars(absl::Nonnull<const char*> first,
absl::Nonnull<const char*> last, float& value,
chars_format fmt) {
return FromCharsImpl(first, last, value, fmt);
}
namespace {
const uint64_t kPower10MantissaHighTable[] = {
0xeef453d6923bd65aU, 0x9558b4661b6565f8U, 0xbaaee17fa23ebf76U,
0xe95a99df8ace6f53U, 0x91d8a02bb6c10594U, 0xb64ec836a47146f9U,
0xe3e27a444d8d98b7U, 0x8e6d8c6ab0787f72U, 0xb208ef855c969f4fU,
0xde8b2b66b3bc4723U, 0x8b16fb203055ac76U, 0xaddcb9e83c6b1793U,
0xd953e8624b85dd78U, 0x87d4713d6f33aa6bU, 0xa9c98d8ccb009506U,
0xd43bf0effdc0ba48U, 0x84a57695fe98746dU, 0xa5ced43b7e3e9188U,
0xcf42894a5dce35eaU, 0x818995ce7aa0e1b2U, 0xa1ebfb4219491a1fU,
0xca66fa129f9b60a6U, 0xfd00b897478238d0U, 0x9e20735e8cb16382U,
0xc5a890362fddbc62U, 0xf712b443bbd52b7bU, 0x9a6bb0aa55653b2dU,
0xc1069cd4eabe89f8U, 0xf148440a256e2c76U, 0x96cd2a865764dbcaU,
0xbc807527ed3e12bcU, 0xeba09271e88d976bU, 0x93445b8731587ea3U,
0xb8157268fdae9e4cU, 0xe61acf033d1a45dfU, 0x8fd0c16206306babU,
0xb3c4f1ba87bc8696U, 0xe0b62e2929aba83cU, 0x8c71dcd9ba0b4925U,
0xaf8e5410288e1b6fU, 0xdb71e91432b1a24aU, 0x892731ac9faf056eU,
0xab70fe17c79ac6caU, 0xd64d3d9db981787dU, 0x85f0468293f0eb4eU,
0xa76c582338ed2621U, 0xd1476e2c07286faaU, 0x82cca4db847945caU,
0xa37fce126597973cU, 0xcc5fc196fefd7d0cU, 0xff77b1fcbebcdc4fU,
0x9faacf3df73609b1U, 0xc795830d75038c1dU, 0xf97ae3d0d2446f25U,
0x9becce62836ac577U, 0xc2e801fb244576d5U, 0xf3a20279ed56d48aU,
0x9845418c345644d6U, 0xbe5691ef416bd60cU, 0xedec366b11c6cb8fU,
0x94b3a202eb1c3f39U, 0xb9e08a83a5e34f07U, 0xe858ad248f5c22c9U,
0x91376c36d99995beU, 0xb58547448ffffb2dU, 0xe2e69915b3fff9f9U,
0x8dd01fad907ffc3bU, 0xb1442798f49ffb4aU, 0xdd95317f31c7fa1dU,
0x8a7d3eef7f1cfc52U, 0xad1c8eab5ee43b66U, 0xd863b256369d4a40U,
0x873e4f75e2224e68U, 0xa90de3535aaae202U, 0xd3515c2831559a83U,
0x8412d9991ed58091U, 0xa5178fff668ae0b6U, 0xce5d73ff402d98e3U,
0x80fa687f881c7f8eU, 0xa139029f6a239f72U, 0xc987434744ac874eU,
0xfbe9141915d7a922U, 0x9d71ac8fada6c9b5U, 0xc4ce17b399107c22U,
0xf6019da07f549b2bU, 0x99c102844f94e0fbU, 0xc0314325637a1939U,
0xf03d93eebc589f88U, 0x96267c7535b763b5U, 0xbbb01b9283253ca2U,
0xea9c227723ee8bcbU, 0x92a1958a7675175fU, 0xb749faed14125d36U,
0xe51c79a85916f484U, 0x8f31cc0937ae58d2U, 0xb2fe3f0b8599ef07U,
0xdfbdcece67006ac9U, 0x8bd6a141006042bdU, 0xaecc49914078536dU,
0xda7f5bf590966848U, 0x888f99797a5e012dU, 0xaab37fd7d8f58178U,
0xd5605fcdcf32e1d6U, 0x855c3be0a17fcd26U, 0xa6b34ad8c9dfc06fU,
0xd0601d8efc57b08bU, 0x823c12795db6ce57U, 0xa2cb1717b52481edU,
0xcb7ddcdda26da268U, 0xfe5d54150b090b02U, 0x9efa548d26e5a6e1U,
0xc6b8e9b0709f109aU, 0xf867241c8cc6d4c0U, 0x9b407691d7fc44f8U,
0xc21094364dfb5636U, 0xf294b943e17a2bc4U, 0x979cf3ca6cec5b5aU,
0xbd8430bd08277231U, 0xece53cec4a314ebdU, 0x940f4613ae5ed136U,
0xb913179899f68584U, 0xe757dd7ec07426e5U, 0x9096ea6f3848984fU,
0xb4bca50b065abe63U, 0xe1ebce4dc7f16dfbU, 0x8d3360f09cf6e4bdU,
0xb080392cc4349decU, 0xdca04777f541c567U, 0x89e42caaf9491b60U,
0xac5d37d5b79b6239U, 0xd77485cb25823ac7U, 0x86a8d39ef77164bcU,
0xa8530886b54dbdebU, 0xd267caa862a12d66U, 0x8380dea93da4bc60U,
0xa46116538d0deb78U, 0xcd795be870516656U, 0x806bd9714632dff6U,
0xa086cfcd97bf97f3U, 0xc8a883c0fdaf7df0U, 0xfad2a4b13d1b5d6cU,
0x9cc3a6eec6311a63U, 0xc3f490aa77bd60fcU, 0xf4f1b4d515acb93bU,
0x991711052d8bf3c5U, 0xbf5cd54678eef0b6U, 0xef340a98172aace4U,
0x9580869f0e7aac0eU, 0xbae0a846d2195712U, 0xe998d258869facd7U,
0x91ff83775423cc06U, 0xb67f6455292cbf08U, 0xe41f3d6a7377eecaU,
0x8e938662882af53eU, 0xb23867fb2a35b28dU, 0xdec681f9f4c31f31U,
0x8b3c113c38f9f37eU, 0xae0b158b4738705eU, 0xd98ddaee19068c76U,
0x87f8a8d4cfa417c9U, 0xa9f6d30a038d1dbcU, 0xd47487cc8470652bU,
0x84c8d4dfd2c63f3bU, 0xa5fb0a17c777cf09U, 0xcf79cc9db955c2ccU,
0x81ac1fe293d599bfU, 0xa21727db38cb002fU, 0xca9cf1d206fdc03bU,
0xfd442e4688bd304aU, 0x9e4a9cec15763e2eU, 0xc5dd44271ad3cdbaU,
0xf7549530e188c128U, 0x9a94dd3e8cf578b9U, 0xc13a148e3032d6e7U,
0xf18899b1bc3f8ca1U, 0x96f5600f15a7b7e5U, 0xbcb2b812db11a5deU,
0xebdf661791d60f56U, 0x936b9fcebb25c995U, 0xb84687c269ef3bfbU,
0xe65829b3046b0afaU, 0x8ff71a0fe2c2e6dcU, 0xb3f4e093db73a093U,
0xe0f218b8d25088b8U, 0x8c974f7383725573U, 0xafbd2350644eeacfU,
0xdbac6c247d62a583U, 0x894bc396ce5da772U, 0xab9eb47c81f5114fU,
0xd686619ba27255a2U, 0x8613fd0145877585U, 0xa798fc4196e952e7U,
0xd17f3b51fca3a7a0U, 0x82ef85133de648c4U, 0xa3ab66580d5fdaf5U,
0xcc963fee10b7d1b3U, 0xffbbcfe994e5c61fU, 0x9fd561f1fd0f9bd3U,
0xc7caba6e7c5382c8U, 0xf9bd690a1b68637bU, 0x9c1661a651213e2dU,
0xc31bfa0fe5698db8U, 0xf3e2f893dec3f126U, 0x986ddb5c6b3a76b7U,
0xbe89523386091465U, 0xee2ba6c0678b597fU, 0x94db483840b717efU,
0xba121a4650e4ddebU, 0xe896a0d7e51e1566U, 0x915e2486ef32cd60U,
0xb5b5ada8aaff80b8U, 0xe3231912d5bf60e6U, 0x8df5efabc5979c8fU,
0xb1736b96b6fd83b3U, 0xddd0467c64bce4a0U, 0x8aa22c0dbef60ee4U,
0xad4ab7112eb3929dU, 0xd89d64d57a607744U, 0x87625f056c7c4a8bU,
0xa93af6c6c79b5d2dU, 0xd389b47879823479U, 0x843610cb4bf160cbU,
0xa54394fe1eedb8feU, 0xce947a3da6a9273eU, 0x811ccc668829b887U,
0xa163ff802a3426a8U, 0xc9bcff6034c13052U, 0xfc2c3f3841f17c67U,
0x9d9ba7832936edc0U, 0xc5029163f384a931U, 0xf64335bcf065d37dU,
0x99ea0196163fa42eU, 0xc06481fb9bcf8d39U, 0xf07da27a82c37088U,
0x964e858c91ba2655U, 0xbbe226efb628afeaU, 0xeadab0aba3b2dbe5U,
0x92c8ae6b464fc96fU, 0xb77ada0617e3bbcbU, 0xe55990879ddcaabdU,
0x8f57fa54c2a9eab6U, 0xb32df8e9f3546564U, 0xdff9772470297ebdU,
0x8bfbea76c619ef36U, 0xaefae51477a06b03U, 0xdab99e59958885c4U,
0x88b402f7fd75539bU, 0xaae103b5fcd2a881U, 0xd59944a37c0752a2U,
0x857fcae62d8493a5U, 0xa6dfbd9fb8e5b88eU, 0xd097ad07a71f26b2U,
0x825ecc24c873782fU, 0xa2f67f2dfa90563bU, 0xcbb41ef979346bcaU,
0xfea126b7d78186bcU, 0x9f24b832e6b0f436U, 0xc6ede63fa05d3143U,
0xf8a95fcf88747d94U, 0x9b69dbe1b548ce7cU, 0xc24452da229b021bU,
0xf2d56790ab41c2a2U, 0x97c560ba6b0919a5U, 0xbdb6b8e905cb600fU,
0xed246723473e3813U, 0x9436c0760c86e30bU, 0xb94470938fa89bceU,
0xe7958cb87392c2c2U, 0x90bd77f3483bb9b9U, 0xb4ecd5f01a4aa828U,
0xe2280b6c20dd5232U, 0x8d590723948a535fU, 0xb0af48ec79ace837U,
0xdcdb1b2798182244U, 0x8a08f0f8bf0f156bU, 0xac8b2d36eed2dac5U,
0xd7adf884aa879177U, 0x86ccbb52ea94baeaU, 0xa87fea27a539e9a5U,
0xd29fe4b18e88640eU, 0x83a3eeeef9153e89U, 0xa48ceaaab75a8e2bU,
0xcdb02555653131b6U, 0x808e17555f3ebf11U, 0xa0b19d2ab70e6ed6U,
0xc8de047564d20a8bU, 0xfb158592be068d2eU, 0x9ced737bb6c4183dU,
0xc428d05aa4751e4cU, 0xf53304714d9265dfU, 0x993fe2c6d07b7fabU,
0xbf8fdb78849a5f96U, 0xef73d256a5c0f77cU, 0x95a8637627989aadU,
0xbb127c53b17ec159U, 0xe9d71b689dde71afU, 0x9226712162ab070dU,
0xb6b00d69bb55c8d1U, 0xe45c10c42a2b3b05U, 0x8eb98a7a9a5b04e3U,
0xb267ed1940f1c61cU, 0xdf01e85f912e37a3U, 0x8b61313bbabce2c6U,
0xae397d8aa96c1b77U, 0xd9c7dced53c72255U, 0x881cea14545c7575U,
0xaa242499697392d2U, 0xd4ad2dbfc3d07787U, 0x84ec3c97da624ab4U,
0xa6274bbdd0fadd61U, 0xcfb11ead453994baU, 0x81ceb32c4b43fcf4U,
0xa2425ff75e14fc31U, 0xcad2f7f5359a3b3eU, 0xfd87b5f28300ca0dU,
0x9e74d1b791e07e48U, 0xc612062576589ddaU, 0xf79687aed3eec551U,
0x9abe14cd44753b52U, 0xc16d9a0095928a27U, 0xf1c90080baf72cb1U,
0x971da05074da7beeU, 0xbce5086492111aeaU, 0xec1e4a7db69561a5U,
0x9392ee8e921d5d07U, 0xb877aa3236a4b449U, 0xe69594bec44de15bU,
0x901d7cf73ab0acd9U, 0xb424dc35095cd80fU, 0xe12e13424bb40e13U,
0x8cbccc096f5088cbU, 0xafebff0bcb24aafeU, 0xdbe6fecebdedd5beU,
0x89705f4136b4a597U, 0xabcc77118461cefcU, 0xd6bf94d5e57a42bcU,
0x8637bd05af6c69b5U, 0xa7c5ac471b478423U, 0xd1b71758e219652bU,
0x83126e978d4fdf3bU, 0xa3d70a3d70a3d70aU, 0xccccccccccccccccU,
0x8000000000000000U, 0xa000000000000000U, 0xc800000000000000U,
0xfa00000000000000U, 0x9c40000000000000U, 0xc350000000000000U,
0xf424000000000000U, 0x9896800000000000U, 0xbebc200000000000U,
0xee6b280000000000U, 0x9502f90000000000U, 0xba43b74000000000U,
0xe8d4a51000000000U, 0x9184e72a00000000U, 0xb5e620f480000000U,
0xe35fa931a0000000U, 0x8e1bc9bf04000000U, 0xb1a2bc2ec5000000U,
0xde0b6b3a76400000U, 0x8ac7230489e80000U, 0xad78ebc5ac620000U,
0xd8d726b7177a8000U, 0x878678326eac9000U, 0xa968163f0a57b400U,
0xd3c21bcecceda100U, 0x84595161401484a0U, 0xa56fa5b99019a5c8U,
0xcecb8f27f4200f3aU, 0x813f3978f8940984U, 0xa18f07d736b90be5U,
0xc9f2c9cd04674edeU, 0xfc6f7c4045812296U, 0x9dc5ada82b70b59dU,
0xc5371912364ce305U, 0xf684df56c3e01bc6U, 0x9a130b963a6c115cU,
0xc097ce7bc90715b3U, 0xf0bdc21abb48db20U, 0x96769950b50d88f4U,
0xbc143fa4e250eb31U, 0xeb194f8e1ae525fdU, 0x92efd1b8d0cf37beU,
0xb7abc627050305adU, 0xe596b7b0c643c719U, 0x8f7e32ce7bea5c6fU,
0xb35dbf821ae4f38bU, 0xe0352f62a19e306eU, 0x8c213d9da502de45U,
0xaf298d050e4395d6U, 0xdaf3f04651d47b4cU, 0x88d8762bf324cd0fU,
0xab0e93b6efee0053U, 0xd5d238a4abe98068U, 0x85a36366eb71f041U,
0xa70c3c40a64e6c51U, 0xd0cf4b50cfe20765U, 0x82818f1281ed449fU,
0xa321f2d7226895c7U, 0xcbea6f8ceb02bb39U, 0xfee50b7025c36a08U,
0x9f4f2726179a2245U, 0xc722f0ef9d80aad6U, 0xf8ebad2b84e0d58bU,
0x9b934c3b330c8577U, 0xc2781f49ffcfa6d5U, 0xf316271c7fc3908aU,
0x97edd871cfda3a56U, 0xbde94e8e43d0c8ecU, 0xed63a231d4c4fb27U,
0x945e455f24fb1cf8U, 0xb975d6b6ee39e436U, 0xe7d34c64a9c85d44U,
0x90e40fbeea1d3a4aU, 0xb51d13aea4a488ddU, 0xe264589a4dcdab14U,
0x8d7eb76070a08aecU, 0xb0de65388cc8ada8U, 0xdd15fe86affad912U,
0x8a2dbf142dfcc7abU, 0xacb92ed9397bf996U, 0xd7e77a8f87daf7fbU,
0x86f0ac99b4e8dafdU, 0xa8acd7c0222311bcU, 0xd2d80db02aabd62bU,
0x83c7088e1aab65dbU, 0xa4b8cab1a1563f52U, 0xcde6fd5e09abcf26U,
0x80b05e5ac60b6178U, 0xa0dc75f1778e39d6U, 0xc913936dd571c84cU,
0xfb5878494ace3a5fU, 0x9d174b2dcec0e47bU, 0xc45d1df942711d9aU,
0xf5746577930d6500U, 0x9968bf6abbe85f20U, 0xbfc2ef456ae276e8U,
0xefb3ab16c59b14a2U, 0x95d04aee3b80ece5U, 0xbb445da9ca61281fU,
0xea1575143cf97226U, 0x924d692ca61be758U, 0xb6e0c377cfa2e12eU,
0xe498f455c38b997aU, 0x8edf98b59a373fecU, 0xb2977ee300c50fe7U,
0xdf3d5e9bc0f653e1U, 0x8b865b215899f46cU, 0xae67f1e9aec07187U,
0xda01ee641a708de9U, 0x884134fe908658b2U, 0xaa51823e34a7eedeU,
0xd4e5e2cdc1d1ea96U, 0x850fadc09923329eU, 0xa6539930bf6bff45U,
0xcfe87f7cef46ff16U, 0x81f14fae158c5f6eU, 0xa26da3999aef7749U,
0xcb090c8001ab551cU, 0xfdcb4fa002162a63U, 0x9e9f11c4014dda7eU,
0xc646d63501a1511dU, 0xf7d88bc24209a565U, 0x9ae757596946075fU,
0xc1a12d2fc3978937U, 0xf209787bb47d6b84U, 0x9745eb4d50ce6332U,
0xbd176620a501fbffU, 0xec5d3fa8ce427affU, 0x93ba47c980e98cdfU,
0xb8a8d9bbe123f017U, 0xe6d3102ad96cec1dU, 0x9043ea1ac7e41392U,
0xb454e4a179dd1877U, 0xe16a1dc9d8545e94U, 0x8ce2529e2734bb1dU,
0xb01ae745b101e9e4U, 0xdc21a1171d42645dU, 0x899504ae72497ebaU,
0xabfa45da0edbde69U, 0xd6f8d7509292d603U, 0x865b86925b9bc5c2U,
0xa7f26836f282b732U, 0xd1ef0244af2364ffU, 0x8335616aed761f1fU,
0xa402b9c5a8d3a6e7U, 0xcd036837130890a1U, 0x802221226be55a64U,
0xa02aa96b06deb0fdU, 0xc83553c5c8965d3dU, 0xfa42a8b73abbf48cU,
0x9c69a97284b578d7U, 0xc38413cf25e2d70dU, 0xf46518c2ef5b8cd1U,
0x98bf2f79d5993802U, 0xbeeefb584aff8603U, 0xeeaaba2e5dbf6784U,
0x952ab45cfa97a0b2U, 0xba756174393d88dfU, 0xe912b9d1478ceb17U,
0x91abb422ccb812eeU, 0xb616a12b7fe617aaU, 0xe39c49765fdf9d94U,
0x8e41ade9fbebc27dU, 0xb1d219647ae6b31cU, 0xde469fbd99a05fe3U,
0x8aec23d680043beeU, 0xada72ccc20054ae9U, 0xd910f7ff28069da4U,
0x87aa9aff79042286U, 0xa99541bf57452b28U, 0xd3fa922f2d1675f2U,
0x847c9b5d7c2e09b7U, 0xa59bc234db398c25U, 0xcf02b2c21207ef2eU,
0x8161afb94b44f57dU, 0xa1ba1ba79e1632dcU, 0xca28a291859bbf93U,
0xfcb2cb35e702af78U, 0x9defbf01b061adabU, 0xc56baec21c7a1916U,
0xf6c69a72a3989f5bU, 0x9a3c2087a63f6399U, 0xc0cb28a98fcf3c7fU,
0xf0fdf2d3f3c30b9fU, 0x969eb7c47859e743U, 0xbc4665b596706114U,
0xeb57ff22fc0c7959U, 0x9316ff75dd87cbd8U, 0xb7dcbf5354e9beceU,
0xe5d3ef282a242e81U, 0x8fa475791a569d10U, 0xb38d92d760ec4455U,
0xe070f78d3927556aU, 0x8c469ab843b89562U, 0xaf58416654a6babbU,
0xdb2e51bfe9d0696aU, 0x88fcf317f22241e2U, 0xab3c2fddeeaad25aU,
0xd60b3bd56a5586f1U, 0x85c7056562757456U, 0xa738c6bebb12d16cU,
0xd106f86e69d785c7U, 0x82a45b450226b39cU, 0xa34d721642b06084U,
0xcc20ce9bd35c78a5U, 0xff290242c83396ceU, 0x9f79a169bd203e41U,
0xc75809c42c684dd1U, 0xf92e0c3537826145U, 0x9bbcc7a142b17ccbU,
0xc2abf989935ddbfeU, 0xf356f7ebf83552feU, 0x98165af37b2153deU,
0xbe1bf1b059e9a8d6U, 0xeda2ee1c7064130cU, 0x9485d4d1c63e8be7U,
0xb9a74a0637ce2ee1U, 0xe8111c87c5c1ba99U, 0x910ab1d4db9914a0U,
0xb54d5e4a127f59c8U, 0xe2a0b5dc971f303aU, 0x8da471a9de737e24U,
0xb10d8e1456105dadU, 0xdd50f1996b947518U, 0x8a5296ffe33cc92fU,
0xace73cbfdc0bfb7bU, 0xd8210befd30efa5aU, 0x8714a775e3e95c78U,
0xa8d9d1535ce3b396U, 0xd31045a8341ca07cU, 0x83ea2b892091e44dU,
0xa4e4b66b68b65d60U, 0xce1de40642e3f4b9U, 0x80d2ae83e9ce78f3U,
0xa1075a24e4421730U, 0xc94930ae1d529cfcU, 0xfb9b7cd9a4a7443cU,
0x9d412e0806e88aa5U, 0xc491798a08a2ad4eU, 0xf5b5d7ec8acb58a2U,
0x9991a6f3d6bf1765U, 0xbff610b0cc6edd3fU, 0xeff394dcff8a948eU,
0x95f83d0a1fb69cd9U, 0xbb764c4ca7a4440fU, 0xea53df5fd18d5513U,
0x92746b9be2f8552cU, 0xb7118682dbb66a77U, 0xe4d5e82392a40515U,
0x8f05b1163ba6832dU, 0xb2c71d5bca9023f8U, 0xdf78e4b2bd342cf6U,
0x8bab8eefb6409c1aU, 0xae9672aba3d0c320U, 0xda3c0f568cc4f3e8U,
0x8865899617fb1871U, 0xaa7eebfb9df9de8dU, 0xd51ea6fa85785631U,
0x8533285c936b35deU, 0xa67ff273b8460356U, 0xd01fef10a657842cU,
0x8213f56a67f6b29bU, 0xa298f2c501f45f42U, 0xcb3f2f7642717713U,
0xfe0efb53d30dd4d7U, 0x9ec95d1463e8a506U, 0xc67bb4597ce2ce48U,
0xf81aa16fdc1b81daU, 0x9b10a4e5e9913128U, 0xc1d4ce1f63f57d72U,
0xf24a01a73cf2dccfU, 0x976e41088617ca01U, 0xbd49d14aa79dbc82U,
0xec9c459d51852ba2U, 0x93e1ab8252f33b45U, 0xb8da1662e7b00a17U,
0xe7109bfba19c0c9dU, 0x906a617d450187e2U, 0xb484f9dc9641e9daU,
0xe1a63853bbd26451U, 0x8d07e33455637eb2U, 0xb049dc016abc5e5fU,
0xdc5c5301c56b75f7U, 0x89b9b3e11b6329baU, 0xac2820d9623bf429U,
0xd732290fbacaf133U, 0x867f59a9d4bed6c0U, 0xa81f301449ee8c70U,
0xd226fc195c6a2f8cU, 0x83585d8fd9c25db7U, 0xa42e74f3d032f525U,
0xcd3a1230c43fb26fU, 0x80444b5e7aa7cf85U, 0xa0555e361951c366U,
0xc86ab5c39fa63440U, 0xfa856334878fc150U, 0x9c935e00d4b9d8d2U,
0xc3b8358109e84f07U, 0xf4a642e14c6262c8U, 0x98e7e9cccfbd7dbdU,
0xbf21e44003acdd2cU, 0xeeea5d5004981478U, 0x95527a5202df0ccbU,
0xbaa718e68396cffdU, 0xe950df20247c83fdU, 0x91d28b7416cdd27eU,
0xb6472e511c81471dU, 0xe3d8f9e563a198e5U, 0x8e679c2f5e44ff8fU,
};
const uint64_t kPower10MantissaLowTable[] = {
0x113faa2906a13b3fU, 0x4ac7ca59a424c507U, 0x5d79bcf00d2df649U,
0xf4d82c2c107973dcU, 0x79071b9b8a4be869U, 0x9748e2826cdee284U,
0xfd1b1b2308169b25U, 0xfe30f0f5e50e20f7U, 0xbdbd2d335e51a935U,
0xad2c788035e61382U, 0x4c3bcb5021afcc31U, 0xdf4abe242a1bbf3dU,
0xd71d6dad34a2af0dU, 0x8672648c40e5ad68U, 0x680efdaf511f18c2U,
0x0212bd1b2566def2U, 0x014bb630f7604b57U, 0x419ea3bd35385e2dU,
0x52064cac828675b9U, 0x7343efebd1940993U, 0x1014ebe6c5f90bf8U,
0xd41a26e077774ef6U, 0x8920b098955522b4U, 0x55b46e5f5d5535b0U,
0xeb2189f734aa831dU, 0xa5e9ec7501d523e4U, 0x47b233c92125366eU,
0x999ec0bb696e840aU, 0xc00670ea43ca250dU, 0x380406926a5e5728U,
0xc605083704f5ecf2U, 0xf7864a44c633682eU, 0x7ab3ee6afbe0211dU,
0x5960ea05bad82964U, 0x6fb92487298e33bdU, 0xa5d3b6d479f8e056U,
0x8f48a4899877186cU, 0x331acdabfe94de87U, 0x9ff0c08b7f1d0b14U,
0x07ecf0ae5ee44dd9U, 0xc9e82cd9f69d6150U, 0xbe311c083a225cd2U,
0x6dbd630a48aaf406U, 0x092cbbccdad5b108U, 0x25bbf56008c58ea5U,
0xaf2af2b80af6f24eU, 0x1af5af660db4aee1U, 0x50d98d9fc890ed4dU,
0xe50ff107bab528a0U, 0x1e53ed49a96272c8U, 0x25e8e89c13bb0f7aU,
0x77b191618c54e9acU, 0xd59df5b9ef6a2417U, 0x4b0573286b44ad1dU,
0x4ee367f9430aec32U, 0x229c41f793cda73fU, 0x6b43527578c1110fU,
0x830a13896b78aaa9U, 0x23cc986bc656d553U, 0x2cbfbe86b7ec8aa8U,
0x7bf7d71432f3d6a9U, 0xdaf5ccd93fb0cc53U, 0xd1b3400f8f9cff68U,
0x23100809b9c21fa1U, 0xabd40a0c2832a78aU, 0x16c90c8f323f516cU,
0xae3da7d97f6792e3U, 0x99cd11cfdf41779cU, 0x40405643d711d583U,
0x482835ea666b2572U, 0xda3243650005eecfU, 0x90bed43e40076a82U,
0x5a7744a6e804a291U, 0x711515d0a205cb36U, 0x0d5a5b44ca873e03U,
0xe858790afe9486c2U, 0x626e974dbe39a872U, 0xfb0a3d212dc8128fU,
0x7ce66634bc9d0b99U, 0x1c1fffc1ebc44e80U, 0xa327ffb266b56220U,
0x4bf1ff9f0062baa8U, 0x6f773fc3603db4a9U, 0xcb550fb4384d21d3U,
0x7e2a53a146606a48U, 0x2eda7444cbfc426dU, 0xfa911155fefb5308U,
0x793555ab7eba27caU, 0x4bc1558b2f3458deU, 0x9eb1aaedfb016f16U,
0x465e15a979c1cadcU, 0x0bfacd89ec191ec9U, 0xcef980ec671f667bU,
0x82b7e12780e7401aU, 0xd1b2ecb8b0908810U, 0x861fa7e6dcb4aa15U,
0x67a791e093e1d49aU, 0xe0c8bb2c5c6d24e0U, 0x58fae9f773886e18U,
0xaf39a475506a899eU, 0x6d8406c952429603U, 0xc8e5087ba6d33b83U,
0xfb1e4a9a90880a64U, 0x5cf2eea09a55067fU, 0xf42faa48c0ea481eU,
0xf13b94daf124da26U, 0x76c53d08d6b70858U, 0x54768c4b0c64ca6eU,
0xa9942f5dcf7dfd09U, 0xd3f93b35435d7c4cU, 0xc47bc5014a1a6dafU,
0x359ab6419ca1091bU, 0xc30163d203c94b62U, 0x79e0de63425dcf1dU,
0x985915fc12f542e4U, 0x3e6f5b7b17b2939dU, 0xa705992ceecf9c42U,
0x50c6ff782a838353U, 0xa4f8bf5635246428U, 0x871b7795e136be99U,
0x28e2557b59846e3fU, 0x331aeada2fe589cfU, 0x3ff0d2c85def7621U,
0x0fed077a756b53a9U, 0xd3e8495912c62894U, 0x64712dd7abbbd95cU,
0xbd8d794d96aacfb3U, 0xecf0d7a0fc5583a0U, 0xf41686c49db57244U,
0x311c2875c522ced5U, 0x7d633293366b828bU, 0xae5dff9c02033197U,
0xd9f57f830283fdfcU, 0xd072df63c324fd7bU, 0x4247cb9e59f71e6dU,
0x52d9be85f074e608U, 0x67902e276c921f8bU, 0x00ba1cd8a3db53b6U,
0x80e8a40eccd228a4U, 0x6122cd128006b2cdU, 0x796b805720085f81U,
0xcbe3303674053bb0U, 0xbedbfc4411068a9cU, 0xee92fb5515482d44U,
0x751bdd152d4d1c4aU, 0xd262d45a78a0635dU, 0x86fb897116c87c34U,
0xd45d35e6ae3d4da0U, 0x8974836059cca109U, 0x2bd1a438703fc94bU,
0x7b6306a34627ddcfU, 0x1a3bc84c17b1d542U, 0x20caba5f1d9e4a93U,
0x547eb47b7282ee9cU, 0xe99e619a4f23aa43U, 0x6405fa00e2ec94d4U,
0xde83bc408dd3dd04U, 0x9624ab50b148d445U, 0x3badd624dd9b0957U,
0xe54ca5d70a80e5d6U, 0x5e9fcf4ccd211f4cU, 0x7647c3200069671fU,
0x29ecd9f40041e073U, 0xf468107100525890U, 0x7182148d4066eeb4U,
0xc6f14cd848405530U, 0xb8ada00e5a506a7cU, 0xa6d90811f0e4851cU,
0x908f4a166d1da663U, 0x9a598e4e043287feU, 0x40eff1e1853f29fdU,
0xd12bee59e68ef47cU, 0x82bb74f8301958ceU, 0xe36a52363c1faf01U,
0xdc44e6c3cb279ac1U, 0x29ab103a5ef8c0b9U, 0x7415d448f6b6f0e7U,
0x111b495b3464ad21U, 0xcab10dd900beec34U, 0x3d5d514f40eea742U,
0x0cb4a5a3112a5112U, 0x47f0e785eaba72abU, 0x59ed216765690f56U,
0x306869c13ec3532cU, 0x1e414218c73a13fbU, 0xe5d1929ef90898faU,
0xdf45f746b74abf39U, 0x6b8bba8c328eb783U, 0x066ea92f3f326564U,
0xc80a537b0efefebdU, 0xbd06742ce95f5f36U, 0x2c48113823b73704U,
0xf75a15862ca504c5U, 0x9a984d73dbe722fbU, 0xc13e60d0d2e0ebbaU,
0x318df905079926a8U, 0xfdf17746497f7052U, 0xfeb6ea8bedefa633U,
0xfe64a52ee96b8fc0U, 0x3dfdce7aa3c673b0U, 0x06bea10ca65c084eU,
0x486e494fcff30a62U, 0x5a89dba3c3efccfaU, 0xf89629465a75e01cU,
0xf6bbb397f1135823U, 0x746aa07ded582e2cU, 0xa8c2a44eb4571cdcU,
0x92f34d62616ce413U, 0x77b020baf9c81d17U, 0x0ace1474dc1d122eU,
0x0d819992132456baU, 0x10e1fff697ed6c69U, 0xca8d3ffa1ef463c1U,
0xbd308ff8a6b17cb2U, 0xac7cb3f6d05ddbdeU, 0x6bcdf07a423aa96bU,
0x86c16c98d2c953c6U, 0xe871c7bf077ba8b7U, 0x11471cd764ad4972U,
0xd598e40d3dd89bcfU, 0x4aff1d108d4ec2c3U, 0xcedf722a585139baU,
0xc2974eb4ee658828U, 0x733d226229feea32U, 0x0806357d5a3f525fU,
0xca07c2dcb0cf26f7U, 0xfc89b393dd02f0b5U, 0xbbac2078d443ace2U,
0xd54b944b84aa4c0dU, 0x0a9e795e65d4df11U, 0x4d4617b5ff4a16d5U,
0x504bced1bf8e4e45U, 0xe45ec2862f71e1d6U, 0x5d767327bb4e5a4cU,
0x3a6a07f8d510f86fU, 0x890489f70a55368bU, 0x2b45ac74ccea842eU,
0x3b0b8bc90012929dU, 0x09ce6ebb40173744U, 0xcc420a6a101d0515U,
0x9fa946824a12232dU, 0x47939822dc96abf9U, 0x59787e2b93bc56f7U,
0x57eb4edb3c55b65aU, 0xede622920b6b23f1U, 0xe95fab368e45ecedU,
0x11dbcb0218ebb414U, 0xd652bdc29f26a119U, 0x4be76d3346f0495fU,
0x6f70a4400c562ddbU, 0xcb4ccd500f6bb952U, 0x7e2000a41346a7a7U,
0x8ed400668c0c28c8U, 0x728900802f0f32faU, 0x4f2b40a03ad2ffb9U,
0xe2f610c84987bfa8U, 0x0dd9ca7d2df4d7c9U, 0x91503d1c79720dbbU,
0x75a44c6397ce912aU, 0xc986afbe3ee11abaU, 0xfbe85badce996168U,
0xfae27299423fb9c3U, 0xdccd879fc967d41aU, 0x5400e987bbc1c920U,
0x290123e9aab23b68U, 0xf9a0b6720aaf6521U, 0xf808e40e8d5b3e69U,
0xb60b1d1230b20e04U, 0xb1c6f22b5e6f48c2U, 0x1e38aeb6360b1af3U,
0x25c6da63c38de1b0U, 0x579c487e5a38ad0eU, 0x2d835a9df0c6d851U,
0xf8e431456cf88e65U, 0x1b8e9ecb641b58ffU, 0xe272467e3d222f3fU,
0x5b0ed81dcc6abb0fU, 0x98e947129fc2b4e9U, 0x3f2398d747b36224U,
0x8eec7f0d19a03aadU, 0x1953cf68300424acU, 0x5fa8c3423c052dd7U,
0x3792f412cb06794dU, 0xe2bbd88bbee40bd0U, 0x5b6aceaeae9d0ec4U,
0xf245825a5a445275U, 0xeed6e2f0f0d56712U, 0x55464dd69685606bU,
0xaa97e14c3c26b886U, 0xd53dd99f4b3066a8U, 0xe546a8038efe4029U,
0xde98520472bdd033U, 0x963e66858f6d4440U, 0xdde7001379a44aa8U,
0x5560c018580d5d52U, 0xaab8f01e6e10b4a6U, 0xcab3961304ca70e8U,
0x3d607b97c5fd0d22U, 0x8cb89a7db77c506aU, 0x77f3608e92adb242U,
0x55f038b237591ed3U, 0x6b6c46dec52f6688U, 0x2323ac4b3b3da015U,
0xabec975e0a0d081aU, 0x96e7bd358c904a21U, 0x7e50d64177da2e54U,
0xdde50bd1d5d0b9e9U, 0x955e4ec64b44e864U, 0xbd5af13bef0b113eU,
0xecb1ad8aeacdd58eU, 0x67de18eda5814af2U, 0x80eacf948770ced7U,
0xa1258379a94d028dU, 0x096ee45813a04330U, 0x8bca9d6e188853fcU,
0x775ea264cf55347dU, 0x95364afe032a819dU, 0x3a83ddbd83f52204U,
0xc4926a9672793542U, 0x75b7053c0f178293U, 0x5324c68b12dd6338U,
0xd3f6fc16ebca5e03U, 0x88f4bb1ca6bcf584U, 0x2b31e9e3d06c32e5U,
0x3aff322e62439fcfU, 0x09befeb9fad487c2U, 0x4c2ebe687989a9b3U,
0x0f9d37014bf60a10U, 0x538484c19ef38c94U, 0x2865a5f206b06fb9U,
0xf93f87b7442e45d3U, 0xf78f69a51539d748U, 0xb573440e5a884d1bU,
0x31680a88f8953030U, 0xfdc20d2b36ba7c3dU, 0x3d32907604691b4cU,
0xa63f9a49c2c1b10fU, 0x0fcf80dc33721d53U, 0xd3c36113404ea4a8U,
0x645a1cac083126e9U, 0x3d70a3d70a3d70a3U, 0xccccccccccccccccU,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x0000000000000000U, 0x0000000000000000U,
0x0000000000000000U, 0x4000000000000000U, 0x5000000000000000U,
0xa400000000000000U, 0x4d00000000000000U, 0xf020000000000000U,
0x6c28000000000000U, 0xc732000000000000U, 0x3c7f400000000000U,
0x4b9f100000000000U, 0x1e86d40000000000U, 0x1314448000000000U,
0x17d955a000000000U, 0x5dcfab0800000000U, 0x5aa1cae500000000U,
0xf14a3d9e40000000U, 0x6d9ccd05d0000000U, 0xe4820023a2000000U,
0xdda2802c8a800000U, 0xd50b2037ad200000U, 0x4526f422cc340000U,
0x9670b12b7f410000U, 0x3c0cdd765f114000U, 0xa5880a69fb6ac800U,
0x8eea0d047a457a00U, 0x72a4904598d6d880U, 0x47a6da2b7f864750U,
0x999090b65f67d924U, 0xfff4b4e3f741cf6dU, 0xbff8f10e7a8921a4U,
0xaff72d52192b6a0dU, 0x9bf4f8a69f764490U, 0x02f236d04753d5b4U,
0x01d762422c946590U, 0x424d3ad2b7b97ef5U, 0xd2e0898765a7deb2U,
0x63cc55f49f88eb2fU, 0x3cbf6b71c76b25fbU, 0x8bef464e3945ef7aU,
0x97758bf0e3cbb5acU, 0x3d52eeed1cbea317U, 0x4ca7aaa863ee4bddU,
0x8fe8caa93e74ef6aU, 0xb3e2fd538e122b44U, 0x60dbbca87196b616U,
0xbc8955e946fe31cdU, 0x6babab6398bdbe41U, 0xc696963c7eed2dd1U,
0xfc1e1de5cf543ca2U, 0x3b25a55f43294bcbU, 0x49ef0eb713f39ebeU,
0x6e3569326c784337U, 0x49c2c37f07965404U, 0xdc33745ec97be906U,
0x69a028bb3ded71a3U, 0xc40832ea0d68ce0cU, 0xf50a3fa490c30190U,
0x792667c6da79e0faU, 0x577001b891185938U, 0xed4c0226b55e6f86U,
0x544f8158315b05b4U, 0x696361ae3db1c721U, 0x03bc3a19cd1e38e9U,
0x04ab48a04065c723U, 0x62eb0d64283f9c76U, 0x3ba5d0bd324f8394U,
0xca8f44ec7ee36479U, 0x7e998b13cf4e1ecbU, 0x9e3fedd8c321a67eU,
0xc5cfe94ef3ea101eU, 0xbba1f1d158724a12U, 0x2a8a6e45ae8edc97U,
0xf52d09d71a3293bdU, 0x593c2626705f9c56U, 0x6f8b2fb00c77836cU,
0x0b6dfb9c0f956447U, 0x4724bd4189bd5eacU, 0x58edec91ec2cb657U,
0x2f2967b66737e3edU, 0xbd79e0d20082ee74U, 0xecd8590680a3aa11U,
0xe80e6f4820cc9495U, 0x3109058d147fdcddU, 0xbd4b46f0599fd415U,
0x6c9e18ac7007c91aU, 0x03e2cf6bc604ddb0U, 0x84db8346b786151cU,
0xe612641865679a63U, 0x4fcb7e8f3f60c07eU, 0xe3be5e330f38f09dU,
0x5cadf5bfd3072cc5U, 0x73d9732fc7c8f7f6U, 0x2867e7fddcdd9afaU,
0xb281e1fd541501b8U, 0x1f225a7ca91a4226U, 0x3375788de9b06958U,
0x0052d6b1641c83aeU, 0xc0678c5dbd23a49aU, 0xf840b7ba963646e0U,
0xb650e5a93bc3d898U, 0xa3e51f138ab4cebeU, 0xc66f336c36b10137U,
0xb80b0047445d4184U, 0xa60dc059157491e5U, 0x87c89837ad68db2fU,
0x29babe4598c311fbU, 0xf4296dd6fef3d67aU, 0x1899e4a65f58660cU,
0x5ec05dcff72e7f8fU, 0x76707543f4fa1f73U, 0x6a06494a791c53a8U,
0x0487db9d17636892U, 0x45a9d2845d3c42b6U, 0x0b8a2392ba45a9b2U,
0x8e6cac7768d7141eU, 0x3207d795430cd926U, 0x7f44e6bd49e807b8U,
0x5f16206c9c6209a6U, 0x36dba887c37a8c0fU, 0xc2494954da2c9789U,
0xf2db9baa10b7bd6cU, 0x6f92829494e5acc7U, 0xcb772339ba1f17f9U,
0xff2a760414536efbU, 0xfef5138519684abaU, 0x7eb258665fc25d69U,
0xef2f773ffbd97a61U, 0xaafb550ffacfd8faU, 0x95ba2a53f983cf38U,
0xdd945a747bf26183U, 0x94f971119aeef9e4U, 0x7a37cd5601aab85dU,
0xac62e055c10ab33aU, 0x577b986b314d6009U, 0xed5a7e85fda0b80bU,
0x14588f13be847307U, 0x596eb2d8ae258fc8U, 0x6fca5f8ed9aef3bbU,
0x25de7bb9480d5854U, 0xaf561aa79a10ae6aU, 0x1b2ba1518094da04U,
0x90fb44d2f05d0842U, 0x353a1607ac744a53U, 0x42889b8997915ce8U,
0x69956135febada11U, 0x43fab9837e699095U, 0x94f967e45e03f4bbU,
0x1d1be0eebac278f5U, 0x6462d92a69731732U, 0x7d7b8f7503cfdcfeU,
0x5cda735244c3d43eU, 0x3a0888136afa64a7U, 0x088aaa1845b8fdd0U,
0x8aad549e57273d45U, 0x36ac54e2f678864bU, 0x84576a1bb416a7ddU,
0x656d44a2a11c51d5U, 0x9f644ae5a4b1b325U, 0x873d5d9f0dde1feeU,
0xa90cb506d155a7eaU, 0x09a7f12442d588f2U, 0x0c11ed6d538aeb2fU,
0x8f1668c8a86da5faU, 0xf96e017d694487bcU, 0x37c981dcc395a9acU,
0x85bbe253f47b1417U, 0x93956d7478ccec8eU, 0x387ac8d1970027b2U,
0x06997b05fcc0319eU, 0x441fece3bdf81f03U, 0xd527e81cad7626c3U,
0x8a71e223d8d3b074U, 0xf6872d5667844e49U, 0xb428f8ac016561dbU,
0xe13336d701beba52U, 0xecc0024661173473U, 0x27f002d7f95d0190U,
0x31ec038df7b441f4U, 0x7e67047175a15271U, 0x0f0062c6e984d386U,
0x52c07b78a3e60868U, 0xa7709a56ccdf8a82U, 0x88a66076400bb691U,
0x6acff893d00ea435U, 0x0583f6b8c4124d43U, 0xc3727a337a8b704aU,
0x744f18c0592e4c5cU, 0x1162def06f79df73U, 0x8addcb5645ac2ba8U,
0x6d953e2bd7173692U, 0xc8fa8db6ccdd0437U, 0x1d9c9892400a22a2U,
0x2503beb6d00cab4bU, 0x2e44ae64840fd61dU, 0x5ceaecfed289e5d2U,
0x7425a83e872c5f47U, 0xd12f124e28f77719U, 0x82bd6b70d99aaa6fU,
0x636cc64d1001550bU, 0x3c47f7e05401aa4eU, 0x65acfaec34810a71U,
0x7f1839a741a14d0dU, 0x1ede48111209a050U, 0x934aed0aab460432U,
0xf81da84d5617853fU, 0x36251260ab9d668eU, 0xc1d72b7c6b426019U,
0xb24cf65b8612f81fU, 0xdee033f26797b627U, 0x169840ef017da3b1U,
0x8e1f289560ee864eU, 0xf1a6f2bab92a27e2U, 0xae10af696774b1dbU,
0xacca6da1e0a8ef29U, 0x17fd090a58d32af3U, 0xddfc4b4cef07f5b0U,
0x4abdaf101564f98eU, 0x9d6d1ad41abe37f1U, 0x84c86189216dc5edU,
0x32fd3cf5b4e49bb4U, 0x3fbc8c33221dc2a1U, 0x0fabaf3feaa5334aU,
0x29cb4d87f2a7400eU, 0x743e20e9ef511012U, 0x914da9246b255416U,
0x1ad089b6c2f7548eU, 0xa184ac2473b529b1U, 0xc9e5d72d90a2741eU,
0x7e2fa67c7a658892U, 0xddbb901b98feeab7U, 0x552a74227f3ea565U,
0xd53a88958f87275fU, 0x8a892abaf368f137U, 0x2d2b7569b0432d85U,
0x9c3b29620e29fc73U, 0x8349f3ba91b47b8fU, 0x241c70a936219a73U,
0xed238cd383aa0110U, 0xf4363804324a40aaU, 0xb143c6053edcd0d5U,
0xdd94b7868e94050aU, 0xca7cf2b4191c8326U, 0xfd1c2f611f63a3f0U,
0xbc633b39673c8cecU, 0xd5be0503e085d813U, 0x4b2d8644d8a74e18U,
0xddf8e7d60ed1219eU, 0xcabb90e5c942b503U, 0x3d6a751f3b936243U,
0x0cc512670a783ad4U, 0x27fb2b80668b24c5U, 0xb1f9f660802dedf6U,
0x5e7873f8a0396973U, 0xdb0b487b6423e1e8U, 0x91ce1a9a3d2cda62U,
0x7641a140cc7810fbU, 0xa9e904c87fcb0a9dU, 0x546345fa9fbdcd44U,
0xa97c177947ad4095U, 0x49ed8eabcccc485dU, 0x5c68f256bfff5a74U,
0x73832eec6fff3111U, 0xc831fd53c5ff7eabU, 0xba3e7ca8b77f5e55U,
0x28ce1bd2e55f35ebU, 0x7980d163cf5b81b3U, 0xd7e105bcc332621fU,
0x8dd9472bf3fefaa7U, 0xb14f98f6f0feb951U, 0x6ed1bf9a569f33d3U,
0x0a862f80ec4700c8U, 0xcd27bb612758c0faU, 0x8038d51cb897789cU,
0xe0470a63e6bd56c3U, 0x1858ccfce06cac74U, 0x0f37801e0c43ebc8U,
0xd30560258f54e6baU, 0x47c6b82ef32a2069U, 0x4cdc331d57fa5441U,
0xe0133fe4adf8e952U, 0x58180fddd97723a6U, 0x570f09eaa7ea7648U,
};
}
ABSL_NAMESPACE_END
} | #include "absl/strings/charconv.h"
#include <cfloat>
#include <cmath>
#include <cstdlib>
#include <functional>
#include <limits>
#include <string>
#include <system_error>
#include "gtest/gtest.h"
#include "absl/strings/internal/pow10_helper.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#ifdef _MSC_FULL_VER
#define ABSL_COMPILER_DOES_EXACT_ROUNDING 0
#define ABSL_STRTOD_HANDLES_NAN_CORRECTLY 0
#else
#define ABSL_COMPILER_DOES_EXACT_ROUNDING 1
#define ABSL_STRTOD_HANDLES_NAN_CORRECTLY 1
#endif
namespace {
using absl::strings_internal::Pow10;
#if ABSL_COMPILER_DOES_EXACT_ROUNDING
void TestDoubleParse(absl::string_view str, double expected_number) {
SCOPED_TRACE(str);
double actual_number = 0.0;
absl::from_chars_result result =
absl::from_chars(str.data(), str.data() + str.length(), actual_number);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(result.ptr, str.data() + str.length());
EXPECT_EQ(actual_number, expected_number);
}
void TestFloatParse(absl::string_view str, float expected_number) {
SCOPED_TRACE(str);
float actual_number = 0.0;
absl::from_chars_result result =
absl::from_chars(str.data(), str.data() + str.length(), actual_number);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(result.ptr, str.data() + str.length());
EXPECT_EQ(actual_number, expected_number);
}
#define FROM_CHARS_TEST_DOUBLE(number) \
{ \
TestDoubleParse(#number, number); \
TestDoubleParse("-" #number, -number); \
}
#define FROM_CHARS_TEST_FLOAT(number) \
{ \
TestFloatParse(#number, number##f); \
TestFloatParse("-" #number, -number##f); \
}
TEST(FromChars, NearRoundingCases) {
FROM_CHARS_TEST_DOUBLE(5.e125);
FROM_CHARS_TEST_DOUBLE(69.e267);
FROM_CHARS_TEST_DOUBLE(999.e-026);
FROM_CHARS_TEST_DOUBLE(7861.e-034);
FROM_CHARS_TEST_DOUBLE(75569.e-254);
FROM_CHARS_TEST_DOUBLE(928609.e-261);
FROM_CHARS_TEST_DOUBLE(9210917.e080);
FROM_CHARS_TEST_DOUBLE(84863171.e114);
FROM_CHARS_TEST_DOUBLE(653777767.e273);
FROM_CHARS_TEST_DOUBLE(5232604057.e-298);
FROM_CHARS_TEST_DOUBLE(27235667517.e-109);
FROM_CHARS_TEST_DOUBLE(653532977297.e-123);
FROM_CHARS_TEST_DOUBLE(3142213164987.e-294);
FROM_CHARS_TEST_DOUBLE(46202199371337.e-072);
FROM_CHARS_TEST_DOUBLE(231010996856685.e-073);
FROM_CHARS_TEST_DOUBLE(9324754620109615.e212);
FROM_CHARS_TEST_DOUBLE(78459735791271921.e049);
FROM_CHARS_TEST_DOUBLE(272104041512242479.e200);
FROM_CHARS_TEST_DOUBLE(6802601037806061975.e198);
FROM_CHARS_TEST_DOUBLE(20505426358836677347.e-221);
FROM_CHARS_TEST_DOUBLE(836168422905420598437.e-234);
FROM_CHARS_TEST_DOUBLE(4891559871276714924261.e222);
FROM_CHARS_TEST_FLOAT(5.e-20);
FROM_CHARS_TEST_FLOAT(67.e14);
FROM_CHARS_TEST_FLOAT(985.e15);
FROM_CHARS_TEST_FLOAT(7693.e-42);
FROM_CHARS_TEST_FLOAT(55895.e-16);
FROM_CHARS_TEST_FLOAT(996622.e-44);
FROM_CHARS_TEST_FLOAT(7038531.e-32);
FROM_CHARS_TEST_FLOAT(60419369.e-46);
FROM_CHARS_TEST_FLOAT(702990899.e-20);
FROM_CHARS_TEST_FLOAT(6930161142.e-48);
FROM_CHARS_TEST_FLOAT(25933168707.e-13);
FROM_CHARS_TEST_FLOAT(596428896559.e20);
FROM_CHARS_TEST_DOUBLE(9.e-265);
FROM_CHARS_TEST_DOUBLE(85.e-037);
FROM_CHARS_TEST_DOUBLE(623.e100);
FROM_CHARS_TEST_DOUBLE(3571.e263);
FROM_CHARS_TEST_DOUBLE(81661.e153);
FROM_CHARS_TEST_DOUBLE(920657.e-023);
FROM_CHARS_TEST_DOUBLE(4603285.e-024);
FROM_CHARS_TEST_DOUBLE(87575437.e-309);
FROM_CHARS_TEST_DOUBLE(245540327.e122);
FROM_CHARS_TEST_DOUBLE(6138508175.e120);
FROM_CHARS_TEST_DOUBLE(83356057653.e193);
FROM_CHARS_TEST_DOUBLE(619534293513.e124);
FROM_CHARS_TEST_DOUBLE(2335141086879.e218);
FROM_CHARS_TEST_DOUBLE(36167929443327.e-159);
FROM_CHARS_TEST_DOUBLE(609610927149051.e-255);
FROM_CHARS_TEST_DOUBLE(3743626360493413.e-165);
FROM_CHARS_TEST_DOUBLE(94080055902682397.e-242);
FROM_CHARS_TEST_DOUBLE(899810892172646163.e283);
FROM_CHARS_TEST_DOUBLE(7120190517612959703.e120);
FROM_CHARS_TEST_DOUBLE(25188282901709339043.e-252);
FROM_CHARS_TEST_DOUBLE(308984926168550152811.e-052);
FROM_CHARS_TEST_DOUBLE(6372891218502368041059.e064);
FROM_CHARS_TEST_FLOAT(3.e-23);
FROM_CHARS_TEST_FLOAT(57.e18);
FROM_CHARS_TEST_FLOAT(789.e-35);
FROM_CHARS_TEST_FLOAT(2539.e-18);
FROM_CHARS_TEST_FLOAT(76173.e28);
FROM_CHARS_TEST_FLOAT(887745.e-11);
FROM_CHARS_TEST_FLOAT(5382571.e-37);
FROM_CHARS_TEST_FLOAT(82381273.e-35);
FROM_CHARS_TEST_FLOAT(750486563.e-38);
FROM_CHARS_TEST_FLOAT(3752432815.e-39);
FROM_CHARS_TEST_FLOAT(75224575729.e-45);
FROM_CHARS_TEST_FLOAT(459926601011.e15);
}
#undef FROM_CHARS_TEST_DOUBLE
#undef FROM_CHARS_TEST_FLOAT
#endif
float ToFloat(absl::string_view s) {
float f;
absl::from_chars(s.data(), s.data() + s.size(), f);
return f;
}
double ToDouble(absl::string_view s) {
double d;
absl::from_chars(s.data(), s.data() + s.size(), d);
return d;
}
TEST(FromChars, NearRoundingCasesExplicit) {
EXPECT_EQ(ToDouble("5.e125"), ldexp(6653062250012735, 365));
EXPECT_EQ(ToDouble("69.e267"), ldexp(4705683757438170, 841));
EXPECT_EQ(ToDouble("999.e-026"), ldexp(6798841691080350, -129));
EXPECT_EQ(ToDouble("7861.e-034"), ldexp(8975675289889240, -153));
EXPECT_EQ(ToDouble("75569.e-254"), ldexp(6091718967192243, -880));
EXPECT_EQ(ToDouble("928609.e-261"), ldexp(7849264900213743, -900));
EXPECT_EQ(ToDouble("9210917.e080"), ldexp(8341110837370930, 236));
EXPECT_EQ(ToDouble("84863171.e114"), ldexp(4625202867375927, 353));
EXPECT_EQ(ToDouble("653777767.e273"), ldexp(5068902999763073, 884));
EXPECT_EQ(ToDouble("5232604057.e-298"), ldexp(5741343011915040, -1010));
EXPECT_EQ(ToDouble("27235667517.e-109"), ldexp(6707124626673586, -380));
EXPECT_EQ(ToDouble("653532977297.e-123"), ldexp(7078246407265384, -422));
EXPECT_EQ(ToDouble("3142213164987.e-294"), ldexp(8219991337640559, -988));
EXPECT_EQ(ToDouble("46202199371337.e-072"), ldexp(5224462102115359, -246));
EXPECT_EQ(ToDouble("231010996856685.e-073"), ldexp(5224462102115359, -247));
EXPECT_EQ(ToDouble("9324754620109615.e212"), ldexp(5539753864394442, 705));
EXPECT_EQ(ToDouble("78459735791271921.e049"), ldexp(8388176519442766, 166));
EXPECT_EQ(ToDouble("272104041512242479.e200"), ldexp(5554409530847367, 670));
EXPECT_EQ(ToDouble("6802601037806061975.e198"), ldexp(5554409530847367, 668));
EXPECT_EQ(ToDouble("20505426358836677347.e-221"),
ldexp(4524032052079546, -722));
EXPECT_EQ(ToDouble("836168422905420598437.e-234"),
ldexp(5070963299887562, -760));
EXPECT_EQ(ToDouble("4891559871276714924261.e222"),
ldexp(6452687840519111, 757));
EXPECT_EQ(ToFloat("5.e-20"), ldexpf(15474250, -88));
EXPECT_EQ(ToFloat("67.e14"), ldexpf(12479722, 29));
EXPECT_EQ(ToFloat("985.e15"), ldexpf(14333636, 36));
EXPECT_EQ(ToFloat("7693.e-42"), ldexpf(10979816, -150));
EXPECT_EQ(ToFloat("55895.e-16"), ldexpf(12888509, -61));
EXPECT_EQ(ToFloat("996622.e-44"), ldexpf(14224264, -150));
EXPECT_EQ(ToFloat("7038531.e-32"), ldexpf(11420669, -107));
EXPECT_EQ(ToFloat("60419369.e-46"), ldexpf(8623340, -150));
EXPECT_EQ(ToFloat("702990899.e-20"), ldexpf(16209866, -61));
EXPECT_EQ(ToFloat("6930161142.e-48"), ldexpf(9891056, -150));
EXPECT_EQ(ToFloat("25933168707.e-13"), ldexpf(11138211, -32));
EXPECT_EQ(ToFloat("596428896559.e20"), ldexpf(12333860, 82));
EXPECT_EQ(ToDouble("9.e-265"), ldexp(8168427841980010, -930));
EXPECT_EQ(ToDouble("85.e-037"), ldexp(6360455125664090, -169));
EXPECT_EQ(ToDouble("623.e100"), ldexp(6263531988747231, 289));
EXPECT_EQ(ToDouble("3571.e263"), ldexp(6234526311072170, 833));
EXPECT_EQ(ToDouble("81661.e153"), ldexp(6696636728760206, 472));
EXPECT_EQ(ToDouble("920657.e-023"), ldexp(5975405561110124, -109));
EXPECT_EQ(ToDouble("4603285.e-024"), ldexp(5975405561110124, -110));
EXPECT_EQ(ToDouble("87575437.e-309"), ldexp(8452160731874668, -1053));
EXPECT_EQ(ToDouble("245540327.e122"), ldexp(4985336549131723, 381));
EXPECT_EQ(ToDouble("6138508175.e120"), ldexp(4985336549131723, 379));
EXPECT_EQ(ToDouble("83356057653.e193"), ldexp(5986732817132056, 625));
EXPECT_EQ(ToDouble("619534293513.e124"), ldexp(4798406992060657, 399));
EXPECT_EQ(ToDouble("2335141086879.e218"), ldexp(5419088166961646, 713));
EXPECT_EQ(ToDouble("36167929443327.e-159"), ldexp(8135819834632444, -536));
EXPECT_EQ(ToDouble("609610927149051.e-255"), ldexp(4576664294594737, -850));
EXPECT_EQ(ToDouble("3743626360493413.e-165"), ldexp(6898586531774201, -549));
EXPECT_EQ(ToDouble("94080055902682397.e-242"), ldexp(6273271706052298, -800));
EXPECT_EQ(ToDouble("899810892172646163.e283"), ldexp(7563892574477827, 947));
EXPECT_EQ(ToDouble("7120190517612959703.e120"), ldexp(5385467232557565, 409));
EXPECT_EQ(ToDouble("25188282901709339043.e-252"),
ldexp(5635662608542340, -825));
EXPECT_EQ(ToDouble("308984926168550152811.e-052"),
ldexp(5644774693823803, -157));
EXPECT_EQ(ToDouble("6372891218502368041059.e064"),
ldexp(4616868614322430, 233));
EXPECT_EQ(ToFloat("3.e-23"), ldexpf(9507380, -98));
EXPECT_EQ(ToFloat("57.e18"), ldexpf(12960300, 42));
EXPECT_EQ(ToFloat("789.e-35"), ldexpf(10739312, -130));
EXPECT_EQ(ToFloat("2539.e-18"), ldexpf(11990089, -72));
EXPECT_EQ(ToFloat("76173.e28"), ldexpf(9845130, 86));
EXPECT_EQ(ToFloat("887745.e-11"), ldexpf(9760860, -40));
EXPECT_EQ(ToFloat("5382571.e-37"), ldexpf(11447463, -124));
EXPECT_EQ(ToFloat("82381273.e-35"), ldexpf(8554961, -113));
EXPECT_EQ(ToFloat("750486563.e-38"), ldexpf(9975678, -120));
EXPECT_EQ(ToFloat("3752432815.e-39"), ldexpf(9975678, -121));
EXPECT_EQ(ToFloat("75224575729.e-45"), ldexpf(13105970, -137));
EXPECT_EQ(ToFloat("459926601011.e15"), ldexpf(12466336, 65));
}
template <typename FloatType>
void TestHalfwayValue(const std::string& mantissa, int exponent,
FloatType expected_low, FloatType expected_high,
FloatType expected_half) {
std::string low_rep = mantissa;
low_rep[low_rep.size() - 1] -= 1;
absl::StrAppend(&low_rep, std::string(1000, '9'), "e", exponent);
FloatType actual_low = 0;
absl::from_chars(low_rep.data(), low_rep.data() + low_rep.size(), actual_low);
EXPECT_EQ(expected_low, actual_low);
std::string high_rep =
absl::StrCat(mantissa, std::string(1000, '0'), "1e", exponent);
FloatType actual_high = 0;
absl::from_chars(high_rep.data(), high_rep.data() + high_rep.size(),
actual_high);
EXPECT_EQ(expected_high, actual_high);
std::string halfway_rep = absl::StrCat(mantissa, "e", exponent);
FloatType actual_half = 0;
absl::from_chars(halfway_rep.data(), halfway_rep.data() + halfway_rep.size(),
actual_half);
EXPECT_EQ(expected_half, actual_half);
}
TEST(FromChars, DoubleRounding) {
const double zero = 0.0;
const double first_subnormal = nextafter(zero, 1.0);
const double second_subnormal = nextafter(first_subnormal, 1.0);
const double first_normal = DBL_MIN;
const double last_subnormal = nextafter(first_normal, 0.0);
const double second_normal = nextafter(first_normal, 1.0);
const double last_normal = DBL_MAX;
const double penultimate_normal = nextafter(last_normal, 0.0);
TestHalfwayValue(
"2."
"470328229206232720882843964341106861825299013071623822127928412503377536"
"351043759326499181808179961898982823477228588654633283551779698981993873"
"980053909390631503565951557022639229085839244910518443593180284993653615"
"250031937045767824921936562366986365848075700158576926990370631192827955"
"855133292783433840935197801553124659726357957462276646527282722005637400"
"648549997709659947045402082816622623785739345073633900796776193057750674"
"017632467360096895134053553745851666113422376667860416215968046191446729"
"184030053005753084904876539171138659164623952491262365388187963623937328"
"042389101867234849766823508986338858792562830275599565752445550725518931"
"369083625477918694866799496832404970582102851318545139621383772282614543"
"7693412532098591327667236328125",
-324, zero, first_subnormal, zero);
TestHalfwayValue(
"7."
"410984687618698162648531893023320585475897039214871466383785237510132609"
"053131277979497545424539885696948470431685765963899850655339096945981621"
"940161728171894510697854671067917687257517734731555330779540854980960845"
"750095811137303474765809687100959097544227100475730780971111893578483867"
"565399878350301522805593404659373979179073872386829939581848166016912201"
"945649993128979841136206248449867871357218035220901702390328579173252022"
"052897402080290685402160661237554998340267130003581248647904138574340187"
"552090159017259254714629617513415977493871857473787096164563890871811984"
"127167305601704549300470526959016576377688490826798697257336652176556794"
"107250876433756084600398490497214911746308553955635418864151316847843631"
"3080237596295773983001708984375",
-324, first_subnormal, second_subnormal, second_subnormal);
TestHalfwayValue(
"2."
"225073858507201136057409796709131975934819546351645648023426109724822222"
"021076945516529523908135087914149158913039621106870086438694594645527657"
"207407820621743379988141063267329253552286881372149012981122451451889849"
"057222307285255133155755015914397476397983411801999323962548289017107081"
"850690630666655994938275772572015763062690663332647565300009245888316433"
"037779791869612049497390377829704905051080609940730262937128958950003583"
"799967207254304360284078895771796150945516748243471030702609144621572289"
"880258182545180325707018860872113128079512233426288368622321503775666622"
"503982534335974568884423900265498198385487948292206894721689831099698365"
"846814022854243330660339850886445804001034933970427567186443383770486037"
"86162277173854562306587467901408672332763671875",
-308, last_subnormal, first_normal, first_normal);
TestHalfwayValue(
"2."
"225073858507201630123055637955676152503612414573018013083228724049586647"
"606759446192036794116886953213985520549032000903434781884412325572184367"
"563347617020518175998922941393629966742598285899994830148971433555578567"
"693279306015978183162142425067962460785295885199272493577688320732492479"
"924816869232247165964934329258783950102250973957579510571600738343645738"
"494324192997092179207389919761694314131497173265255020084997973676783743"
"155205818804439163810572367791175177756227497413804253387084478193655533"
"073867420834526162513029462022730109054820067654020201547112002028139700"
"141575259123440177362244273712468151750189745559978653234255886219611516"
"335924167958029604477064946470184777360934300451421683607013647479513962"
"13837722826145437693412532098591327667236328125",
-308, first_normal, second_normal, first_normal);
TestHalfwayValue(
"1."
"797693134862315608353258760581052985162070023416521662616611746258695532"
"672923265745300992879465492467506314903358770175220871059269879629062776"
"047355692132901909191523941804762171253349609463563872612866401980290377"
"995141836029815117562837277714038305214839639239356331336428021390916694"
"57927874464075218944",
308, penultimate_normal, last_normal, penultimate_normal);
}
TEST(FromChars, FloatRounding) {
const float zero = 0.0;
const float first_subnormal = nextafterf(zero, 1.0);
const float second_subnormal = nextafterf(first_subnormal, 1.0);
const float first_normal = FLT_MIN;
const float last_subnormal = nextafterf(first_normal, 0.0);
const float second_normal = nextafterf(first_normal, 1.0);
const float last_normal = FLT_MAX;
const float penultimate_normal = nextafterf(last_normal, 0.0);
TestHalfwayValue(
"7."
"006492321624085354618647916449580656401309709382578858785341419448955413"
"42930300743319094181060791015625",
-46, zero, first_subnormal, zero);
TestHalfwayValue(
"2."
"101947696487225606385594374934874196920392912814773657635602425834686624"
"028790902229957282543182373046875",
-45, first_subnormal, second_subnormal, second_subnormal);
TestHalfwayValue(
"1."
"175494280757364291727882991035766513322858992758990427682963118425003064"
"9651730385585324256680905818939208984375",
-38, last_subnormal, first_normal, first_normal);
TestHalfwayValue(
"1."
"175494420887210724209590083408724842314472120785184615334540294131831453"
"9442813071445925743319094181060791015625",
-38, first_normal, second_normal, first_normal);
TestHalfwayValue("3.40282336497324057985868971510891282432", 38,
penultimate_normal, last_normal, penultimate_normal);
}
TEST(FromChars, Underflow) {
double d;
float f;
absl::from_chars_result result;
std::string negative_underflow = "-1e-1000";
const char* begin = negative_underflow.data();
const char* end = begin + negative_underflow.size();
d = 100.0;
result = absl::from_chars(begin, end, d);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_TRUE(std::signbit(d));
EXPECT_GE(d, -std::numeric_limits<double>::min());
f = 100.0;
result = absl::from_chars(begin, end, f);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_TRUE(std::signbit(f));
EXPECT_GE(f, -std::numeric_limits<float>::min());
std::string positive_underflow = "1e-1000";
begin = positive_underflow.data();
end = begin + positive_underflow.size();
d = -100.0;
result = absl::from_chars(begin, end, d);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_FALSE(std::signbit(d));
EXPECT_LE(d, std::numeric_limits<double>::min());
f = -100.0;
result = absl::from_chars(begin, end, f);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_FALSE(std::signbit(f));
EXPECT_LE(f, std::numeric_limits<float>::min());
}
TEST(FromChars, Overflow) {
double d;
float f;
absl::from_chars_result result;
std::string negative_overflow = "-1e1000";
const char* begin = negative_overflow.data();
const char* end = begin + negative_overflow.size();
d = 100.0;
result = absl::from_chars(begin, end, d);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_TRUE(std::signbit(d));
EXPECT_EQ(d, -std::numeric_limits<double>::max());
f = 100.0;
result = absl::from_chars(begin, end, f);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_TRUE(std::signbit(f));
EXPECT_EQ(f, -std::numeric_limits<float>::max());
std::string positive_overflow = "1e1000";
begin = positive_overflow.data();
end = begin + positive_overflow.size();
d = -100.0;
result = absl::from_chars(begin, end, d);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_FALSE(std::signbit(d));
EXPECT_EQ(d, std::numeric_limits<double>::max());
f = -100.0;
result = absl::from_chars(begin, end, f);
EXPECT_EQ(result.ptr, end);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_FALSE(std::signbit(f));
EXPECT_EQ(f, std::numeric_limits<float>::max());
}
TEST(FromChars, RegressionTestsFromFuzzer) {
absl::string_view src = "0x21900000p00000000099";
float f;
auto result = absl::from_chars(src.data(), src.data() + src.size(), f);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
}
TEST(FromChars, ReturnValuePtr) {
double d;
absl::from_chars_result result;
std::string normal = "3.14@#$%@#$%";
result = absl::from_chars(normal.data(), normal.data() + normal.size(), d);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(result.ptr - normal.data(), 4);
std::string overflow = "1e1000@#$%@#$%";
result = absl::from_chars(overflow.data(),
overflow.data() + overflow.size(), d);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_EQ(result.ptr - overflow.data(), 6);
std::string garbage = "#$%@#$%";
result = absl::from_chars(garbage.data(),
garbage.data() + garbage.size(), d);
EXPECT_EQ(result.ec, std::errc::invalid_argument);
EXPECT_EQ(result.ptr - garbage.data(), 0);
}
TEST(FromChars, TestVersusStrtod) {
for (int mantissa = 1000000; mantissa <= 9999999; mantissa += 501) {
for (int exponent = -300; exponent < 300; ++exponent) {
std::string candidate = absl::StrCat(mantissa, "e", exponent);
double strtod_value = strtod(candidate.c_str(), nullptr);
double absl_value = 0;
absl::from_chars(candidate.data(), candidate.data() + candidate.size(),
absl_value);
ASSERT_EQ(strtod_value, absl_value) << candidate;
}
}
}
TEST(FromChars, TestVersusStrtof) {
for (int mantissa = 1000000; mantissa <= 9999999; mantissa += 501) {
for (int exponent = -43; exponent < 32; ++exponent) {
std::string candidate = absl::StrCat(mantissa, "e", exponent);
float strtod_value = strtof(candidate.c_str(), nullptr);
float absl_value = 0;
absl::from_chars(candidate.data(), candidate.data() + candidate.size(),
absl_value);
ASSERT_EQ(strtod_value, absl_value) << candidate;
}
}
}
template <typename Float>
bool Identical(Float a, Float b) {
return 0 == memcmp(&a, &b, sizeof(Float));
}
TEST(FromChars, NaNDoubles) {
for (std::string n_char_sequence :
{"", "1", "2", "3", "fff", "FFF", "200000", "400000", "4000000000000",
"8000000000000", "abc123", "legal_but_unexpected",
"99999999999999999999999", "_"}) {
std::string input = absl::StrCat("nan(", n_char_sequence, ")");
SCOPED_TRACE(input);
double from_chars_double;
absl::from_chars(input.data(), input.data() + input.size(),
from_chars_double);
double std_nan_double = std::nan(n_char_sequence.c_str());
EXPECT_TRUE(Identical(from_chars_double, std_nan_double));
#if ABSL_STRTOD_HANDLES_NAN_CORRECTLY
double strtod_double = strtod(input.c_str(), nullptr);
EXPECT_TRUE(Identical(from_chars_double, strtod_double));
#endif
std::string negative_input = "-" + input;
double negative_from_chars_double;
absl::from_chars(negative_input.data(),
negative_input.data() + negative_input.size(),
negative_from_chars_double);
EXPECT_TRUE(std::signbit(negative_from_chars_double));
EXPECT_FALSE(Identical(negative_from_chars_double, from_chars_double));
from_chars_double = std::copysign(from_chars_double, -1.0);
EXPECT_TRUE(Identical(negative_from_chars_double, from_chars_double));
}
}
TEST(FromChars, NaNFloats) {
for (std::string n_char_sequence :
{"", "1", "2", "3", "fff", "FFF", "200000", "400000", "4000000000000",
"8000000000000", "abc123", "legal_but_unexpected",
"99999999999999999999999", "_"}) {
std::string input = absl::StrCat("nan(", n_char_sequence, ")");
SCOPED_TRACE(input);
float from_chars_float;
absl::from_chars(input.data(), input.data() + input.size(),
from_chars_float);
float std_nan_float = std::nanf(n_char_sequence.c_str());
EXPECT_TRUE(Identical(from_chars_float, std_nan_float));
#if ABSL_STRTOD_HANDLES_NAN_CORRECTLY
float strtof_float = strtof(input.c_str(), nullptr);
EXPECT_TRUE(Identical(from_chars_float, strtof_float));
#endif
std::string negative_input = "-" + input;
float negative_from_chars_float;
absl::from_chars(negative_input.data(),
negative_input.data() + negative_input.size(),
negative_from_chars_float);
EXPECT_TRUE(std::signbit(negative_from_chars_float));
EXPECT_FALSE(Identical(negative_from_chars_float, from_chars_float));
from_chars_float = std::copysign(from_chars_float, -1.0f);
EXPECT_TRUE(Identical(negative_from_chars_float, from_chars_float));
}
}
int NextStep(int step) {
return step + (step >> 2) + 1;
}
template <typename Float>
void TestOverflowAndUnderflow(
const std::function<std::string(int)>& input_generator,
const std::function<Float(int)>& expected_generator, int lower_bound,
int upper_bound) {
int index, step;
for (index = lower_bound, step = 1; index < upper_bound;
index += step, step = NextStep(step)) {
std::string input = input_generator(index);
SCOPED_TRACE(input);
Float expected = expected_generator(index);
Float actual;
auto result =
absl::from_chars(input.data(), input.data() + input.size(), actual);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(expected, actual)
<< absl::StrFormat("%a vs %a", expected, actual);
}
for (index = upper_bound, step = 1; index > lower_bound;
index -= step, step = NextStep(step)) {
std::string input = input_generator(index);
SCOPED_TRACE(input);
Float expected = expected_generator(index);
Float actual;
auto result =
absl::from_chars(input.data(), input.data() + input.size(), actual);
EXPECT_EQ(result.ec, std::errc());
EXPECT_EQ(expected, actual)
<< absl::StrFormat("%a vs %a", expected, actual);
}
for (index = lower_bound - 1, step = 1; index > -1000000;
index -= step, step = NextStep(step)) {
std::string input = input_generator(index);
SCOPED_TRACE(input);
Float actual;
auto result =
absl::from_chars(input.data(), input.data() + input.size(), actual);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_LT(actual, 1.0);
}
for (index = upper_bound + 1, step = 1; index < 1000000;
index += step, step = NextStep(step)) {
std::string input = input_generator(index);
SCOPED_TRACE(input);
Float actual;
auto result =
absl::from_chars(input.data(), input.data() + input.size(), actual);
EXPECT_EQ(result.ec, std::errc::result_out_of_range);
EXPECT_GT(actual, 1.0);
}
}
TEST(FromChars, HexdecimalDoubleLimits) {
auto input_gen = [](int index) { return absl::StrCat("0x1.0p", index); };
auto expected_gen = [](int index) { return std::ldexp(1.0, index); };
TestOverflowAndUnderflow<double>(input_gen, expected_gen, -1074, 1023);
}
TEST(FromChars, HexdecimalFloatLimits) {
auto input_gen = [](int index) { return absl::StrCat("0x1.0p", index); };
auto expected_gen = [](int index) { return std::ldexp(1.0f, index); };
TestOverflowAndUnderflow<float>(input_gen, expected_gen, -149, 127);
}
TEST(FromChars, DecimalDoubleLimits) {
auto input_gen = [](int index) { return absl::StrCat("1.0e", index); };
auto expected_gen = [](int index) { return Pow10(index); };
TestOverflowAndUnderflow<double>(input_gen, expected_gen, -323, 308);
}
TEST(FromChars, DecimalFloatLimits) {
auto input_gen = [](int index) { return absl::StrCat("1.0e", index); };
auto expected_gen = [](int index) { return Pow10(index); };
TestOverflowAndUnderflow<float>(input_gen, expected_gen, -45, 38);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/charconv.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/charconv_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
ea7c33c9-5fe6-4f44-bea3-c27959c14017 | cpp | google/arolla | lambda_expr_operator | arolla/expr/lambda_expr_operator.cc | arolla/expr/lambda_expr_operator_test.cc | #include "arolla/expr/lambda_expr_operator.h"
#include <cstddef>
#include <limits>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_debug_string.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/expr_visitor.h"
#include "arolla/expr/qtype_utils.h"
#include "arolla/expr/tuple_expr_operator.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr {
namespace {
constexpr absl::string_view kDefaultLambdaOperatorName = "anonymous.lambda";
absl::Status ValidateLambdaBody(const PostOrder& lambda_body_post_order) {
for (const auto& node : lambda_body_post_order.nodes()) {
if (node->is_leaf()) {
return absl::InvalidArgumentError(
"leaf nodes are not permitted within the lambda body");
}
}
for (const auto& node : lambda_body_post_order.nodes()) {
if (node->is_placeholder() && !node->node_deps().empty()) {
return absl::InvalidArgumentError(
"no placeholder nodes with dependencies permitted within the "
"lambda "
"body");
}
}
absl::flat_hash_set<absl::string_view> placeholder_keys;
for (const auto& node : lambda_body_post_order.nodes()) {
if (node->is_placeholder() &&
!placeholder_keys.emplace(node->placeholder_key()).second) {
return absl::InternalError(
"placeholder's key must unique identify the node");
}
}
return absl::OkStatus();
}
}
absl::StatusOr<std::shared_ptr<LambdaOperator>> LambdaOperator::Make(
ExprNodePtr lambda_body) {
return LambdaOperator::Make(kDefaultLambdaOperatorName,
std::move(lambda_body));
}
absl::StatusOr<std::shared_ptr<LambdaOperator>> LambdaOperator::Make(
absl::string_view operator_name, ExprNodePtr lambda_body) {
auto placeholders = GetPlaceholderKeys(lambda_body);
if (placeholders.empty()) {
return absl::InvalidArgumentError(
"exactly one placeholder expected, but none were found");
} else if (placeholders.size() > 1) {
return absl::InvalidArgumentError(absl::StrFormat(
"exactly one placeholder expected, but %d are found: P.%s",
placeholders.size(), absl::StrJoin(placeholders, ", P.")));
}
return LambdaOperator::Make(operator_name,
ExprOperatorSignature{{placeholders[0]}},
std::move(lambda_body), "");
}
absl::StatusOr<std::shared_ptr<LambdaOperator>> LambdaOperator::Make(
const ExprOperatorSignature& lambda_signature, ExprNodePtr lambda_body) {
return LambdaOperator::Make(kDefaultLambdaOperatorName, lambda_signature,
std::move(lambda_body), "");
}
absl::StatusOr<std::shared_ptr<LambdaOperator>> LambdaOperator::Make(
absl::string_view operator_name,
const ExprOperatorSignature& lambda_signature, ExprNodePtr lambda_body) {
return LambdaOperator::Make(operator_name, lambda_signature,
std::move(lambda_body), "");
}
absl::StatusOr<std::shared_ptr<LambdaOperator>> LambdaOperator::Make(
absl::string_view operator_name,
const ExprOperatorSignature& lambda_signature, ExprNodePtr lambda_body,
absl::string_view doc) {
RETURN_IF_ERROR(ValidateSignature(lambda_signature));
auto lambda_body_post_order = PostOrder(lambda_body);
RETURN_IF_ERROR(ValidateLambdaBody(lambda_body_post_order));
absl::flat_hash_map<absl::string_view, bool> lambda_param_used;
for (const auto& param : lambda_signature.parameters) {
lambda_param_used.emplace(param.name, false);
}
for (const auto& node : lambda_body_post_order.nodes()) {
if (!node->is_placeholder()) {
continue;
}
const auto it = lambda_param_used.find(node->placeholder_key());
if (it == lambda_param_used.end()) {
return absl::InvalidArgumentError(
absl::StrCat("P.", node->placeholder_key(),
" is missing in the list of lambda parameters"));
}
it->second = true;
}
for (const auto& param : lambda_signature.parameters) {
if (!(absl::StartsWith(param.name, "unused") ||
absl::StartsWith(param.name, "_")) &&
!lambda_param_used[param.name]) {
LOG(WARNING) << "Unused lambda parameter: '" << param.name << "' in "
<< operator_name;
}
}
auto fingerprint = FingerprintHasher("arolla::expr::LambdaOperator")
.Combine(operator_name, lambda_signature,
lambda_body->fingerprint(), doc)
.Finish();
return std::make_shared<LambdaOperator>(
PrivateConstrutorTag{}, operator_name, lambda_signature,
std::move(lambda_body_post_order), doc, fingerprint);
}
LambdaOperator::LambdaOperator(PrivateConstrutorTag, absl::string_view name,
const ExprOperatorSignature& signature,
PostOrder lambda_body_post_order,
absl::string_view doc, Fingerprint fingerprint)
: ExprOperatorWithFixedSignature(name, signature, doc, fingerprint),
lambda_body_post_order_(std::move(lambda_body_post_order)) {
absl::flat_hash_map<absl::string_view, size_t> sig_param_indices;
sig_param_indices.reserve(signature.parameters.size());
for (size_t i = 0; i < signature.parameters.size(); ++i) {
sig_param_indices[signature.parameters[i].name] = i;
}
lambda_param_indices_.resize(signature.parameters.size(),
std::numeric_limits<size_t>::max());
for (size_t i = 0; i < lambda_body_post_order_.nodes_size(); ++i) {
const auto& node = lambda_body_post_order_.node(i);
if (node->is_placeholder()) {
lambda_param_indices_[sig_param_indices.at(node->placeholder_key())] = i;
}
}
}
namespace {
absl::StatusOr<ExprNodePtr> WrapAsTuple(absl::Span<const ExprNodePtr> fields) {
return MakeOpNode(MakeTupleOperator::Make(),
std::vector<ExprNodePtr>(fields.begin(), fields.end()));
}
ExprAttributes WrapAsTuple(absl::Span<const ExprAttributes> field_attrs) {
return MakeTupleOperator::StaticInferAttributes(field_attrs);
}
}
absl::StatusOr<ExprNodePtr> LambdaOperator::ToLowerLevel(
const ExprNodePtr& node) const {
RETURN_IF_ERROR(ValidateNodeDepsCount(*node));
std::vector<ExprNodePtr> result(lambda_body_post_order_.nodes_size());
if (!lambda_param_indices_.empty()) {
const auto inputs = absl::MakeConstSpan(node->node_deps());
for (size_t i = 0; i + 1 < lambda_param_indices_.size(); ++i) {
if (lambda_param_indices_[i] != std::numeric_limits<size_t>::max()) {
result[lambda_param_indices_[i]] = inputs[i];
}
}
if (lambda_param_indices_.back() != std::numeric_limits<size_t>::max()) {
if (HasVariadicParameter(signature())) {
ASSIGN_OR_RETURN(
result[lambda_param_indices_.back()],
WrapAsTuple(inputs.subspan(lambda_param_indices_.size() - 1)));
} else {
result[lambda_param_indices_.back()] = inputs.back();
}
}
}
for (size_t i = 0; i < lambda_body_post_order_.nodes_size(); ++i) {
const auto& original_node = lambda_body_post_order_.node(i);
if (original_node->is_placeholder()) {
continue;
}
if (original_node->is_literal()) {
result[i] = original_node;
continue;
}
DCHECK(original_node->is_op());
const auto& dep_indices = lambda_body_post_order_.dep_indices(i);
std::vector<ExprNodePtr> deps(dep_indices.size());
for (size_t j = 0; j < dep_indices.size(); ++j) {
deps[j] = result[dep_indices[j]];
}
if (i + 1 < lambda_body_post_order_.nodes_size() ||
node->attr().IsEmpty()) {
ASSIGN_OR_RETURN(result[i],
WithNewDependencies(original_node, std::move(deps)));
} else {
#ifndef NDEBUG
auto attr = original_node->op()->InferAttributes(GetExprAttrs(deps));
DCHECK(attr.ok() && attr->IsIdenticalTo(node->attr()));
#endif
result[i] = ExprNode::UnsafeMakeOperatorNode(
ExprOperatorPtr(original_node->op()), std::move(deps),
ExprAttributes(node->attr()));
}
}
return result.back();
}
absl::StatusOr<ExprAttributes> LambdaOperator::InferAttributes(
absl::Span<const ExprAttributes> inputs) const {
RETURN_IF_ERROR(ValidateOpInputsCount(inputs));
std::vector<ExprAttributes> results(lambda_body_post_order_.nodes_size());
if (!lambda_param_indices_.empty()) {
for (size_t i = 0; i + 1 < lambda_param_indices_.size(); ++i) {
if (lambda_param_indices_[i] != std::numeric_limits<size_t>::max()) {
results[lambda_param_indices_[i]] = inputs[i];
}
}
if (lambda_param_indices_.back() != std::numeric_limits<size_t>::max()) {
if (HasVariadicParameter(signature())) {
results[lambda_param_indices_.back()] =
WrapAsTuple(inputs.subspan(lambda_param_indices_.size() - 1));
} else {
results[lambda_param_indices_.back()] = inputs.back();
}
}
}
std::vector<ExprAttributes> deps;
for (size_t i = 0; i < lambda_body_post_order_.nodes_size(); ++i) {
const auto& original_node = lambda_body_post_order_.node(i);
if (original_node->is_placeholder()) {
continue;
}
if (const auto& attr = original_node->attr(); attr.qvalue().has_value()) {
results[i] = attr;
continue;
}
DCHECK(original_node->is_op());
const auto& dep_indices = lambda_body_post_order_.dep_indices(i);
deps.resize(dep_indices.size());
for (size_t j = 0; j < dep_indices.size(); ++j) {
deps[j] = results[dep_indices[j]];
}
ASSIGN_OR_RETURN(results[i], original_node->op()->InferAttributes(deps),
_ << "while deducing output type for "
<< GetDebugSnippet(original_node));
}
return results.back();
}
absl::string_view LambdaOperator::py_qvalue_specialization_key() const {
return "::arolla::expr::LambdaOperator";
}
namespace {
absl::StatusOr<ExprOperatorPtr> IgnoreUnusedParametersOp() {
static const absl::NoDestructor<absl::StatusOr<ExprOperatorPtr>> result(
MakeLambdaOperator("ignore_unused_parameters",
ExprOperatorSignature::Make("expr, *unused"),
Placeholder("expr")));
return *result;
}
}
absl::StatusOr<ExprNodePtr> SuppressUnusedWarning(
absl::string_view unused_parameters, absl::StatusOr<ExprNodePtr> expr) {
std::vector<absl::string_view> unused_parameter_names = absl::StrSplit(
unused_parameters, absl::ByAnyChar(", "), absl::SkipEmpty());
std::vector<absl::StatusOr<ExprNodePtr>> args;
args.reserve(1 + unused_parameter_names.size());
args.push_back(std::move(expr));
for (absl::string_view name : unused_parameter_names) {
args.push_back(Placeholder(name));
}
return CallOp(IgnoreUnusedParametersOp(), std::move(args));
}
} | #include "arolla/expr/lambda_expr_operator.h"
#include <cstdint>
#include <memory>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/expr/annotation_expr_operators.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/testing/testing.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/qtype/typed_ref.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/bytes.h"
namespace arolla::expr {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::arolla::testing::EqualsAttr;
using ::arolla::testing::EqualsExpr;
using ::arolla::testing::InvokeExprOperator;
using ::arolla::testing::WithQTypeAnnotation;
using ::testing::ElementsAre;
using ::testing::HasSubstr;
using Attr = ExprAttributes;
TEST(LambdaOperatorTest, NoParameters) {
auto foobar = Literal<int32_t>(0xf00baa);
ASSERT_OK_AND_ASSIGN(
const auto lambda_op,
LambdaOperator::Make("foo.bar", ExprOperatorSignature{}, foobar));
EXPECT_EQ(lambda_op->display_name(), "foo.bar");
{
EXPECT_THAT(CallOp(lambda_op, {Leaf("x")}),
StatusIs(absl::StatusCode::kInvalidArgument));
}
ASSERT_OK_AND_ASSIGN(const auto folded_expr, CallOp(lambda_op, {}));
ASSERT_OK_AND_ASSIGN(const auto expected_folded_expr,
MakeOpNode(lambda_op, {}));
EXPECT_THAT(folded_expr, EqualsExpr(expected_folded_expr));
ASSERT_OK_AND_ASSIGN(const auto unfolded_expr, ToLowerNode(folded_expr));
EXPECT_THAT(unfolded_expr, EqualsExpr(foobar));
EXPECT_EQ(lambda_op->doc(), "");
EXPECT_THAT(lambda_op->GetDoc(), IsOkAndHolds(""));
}
TEST(LambdaOperatorTest, SingleArgument) {
auto f1 = Literal<float>(1.0);
auto p0 = Placeholder("p0");
auto p1 = Placeholder("p1");
{
EXPECT_THAT(LambdaOperator::Make(f1),
StatusIs(absl::StatusCode::kInvalidArgument));
}
{
ASSERT_OK_AND_ASSIGN(auto lambda_body, CallOp("math.add", {p0, p1}));
EXPECT_THAT(LambdaOperator::Make(lambda_body),
StatusIs(absl::StatusCode::kInvalidArgument));
}
{
ASSERT_OK_AND_ASSIGN(auto lambda_body, CallOp("math.add", {p0, f1}));
ASSERT_OK_AND_ASSIGN(auto lambda_op, LambdaOperator::Make(lambda_body));
ASSERT_OK_AND_ASSIGN(
const auto expected_lambda_op,
LambdaOperator::Make(ExprOperatorSignature{{"p0"}}, lambda_body));
EXPECT_EQ(lambda_op->fingerprint(), expected_lambda_op->fingerprint());
}
{
ASSERT_OK_AND_ASSIGN(auto lambda_body, CallOp("math.add", {p0, f1}));
ASSERT_OK_AND_ASSIGN(auto lambda_op,
LambdaOperator::Make("op.name", lambda_body));
ASSERT_OK_AND_ASSIGN(
const auto expected_lambda_op,
LambdaOperator::Make("op.name", ExprOperatorSignature{{"p0"}},
lambda_body));
EXPECT_EQ(lambda_op->fingerprint(), expected_lambda_op->fingerprint());
EXPECT_EQ(lambda_op->display_name(), "op.name");
}
}
TEST(LambdaOperatorTest, General) {
auto x = Leaf("x");
auto y = Leaf("y");
auto z = Literal(0);
auto p0 = Placeholder("p0");
auto p1 = Placeholder("p1");
ASSERT_OK_AND_ASSIGN(auto lambda_signature,
ExprOperatorSignature::Make("p0, p1=", 0));
ASSERT_OK_AND_ASSIGN(auto lambda_body, CallOp("math.add", {p0, p1}));
ASSERT_OK_AND_ASSIGN(auto lambda_op,
LambdaOperator::Make(lambda_signature, lambda_body));
EXPECT_EQ(lambda_op->display_name(), "anonymous.lambda");
EXPECT_THAT(lambda_op->lambda_body(), EqualsExpr(lambda_body));
{
EXPECT_THAT(CallOp(lambda_op, {}),
StatusIs(absl::StatusCode::kInvalidArgument));
}
{
EXPECT_THAT(CallOp(lambda_op, {x, x, x}),
StatusIs(absl::StatusCode::kInvalidArgument));
}
{
ASSERT_OK_AND_ASSIGN(auto folded_expr, CallOp(lambda_op, {x}));
ASSERT_OK_AND_ASSIGN(auto expected_folded_expr,
MakeOpNode(lambda_op, {x, z}));
EXPECT_THAT(folded_expr, EqualsExpr(expected_folded_expr));
ASSERT_OK_AND_ASSIGN(auto unfolded_expr, ToLowerNode(folded_expr));
ASSERT_OK_AND_ASSIGN(auto expected_unfolded_expr,
CallOp("math.add", {x, z}));
EXPECT_THAT(unfolded_expr, EqualsExpr(expected_unfolded_expr));
}
{
ASSERT_OK_AND_ASSIGN(auto folded_expr, CallOp(lambda_op, {x, y}));
ASSERT_OK_AND_ASSIGN(auto expected_folded_expr,
MakeOpNode(lambda_op, {x, y}));
EXPECT_THAT(folded_expr, EqualsExpr(expected_folded_expr));
ASSERT_OK_AND_ASSIGN(auto unfolded_expr, ToLowerNode(folded_expr));
ASSERT_OK_AND_ASSIGN(auto expected_unfolded_expr,
CallOp("math.add", {x, y}));
EXPECT_THAT(unfolded_expr, EqualsExpr(expected_unfolded_expr));
}
}
TEST(LambdaOperatorTest, MakeLambdaOperator) {
ASSERT_OK_AND_ASSIGN(
auto lambda_op,
MakeLambdaOperator(
ExprOperatorSignature::Make("x, y"),
CallOp("math.add", {Placeholder("x"), Placeholder("y")})));
EXPECT_EQ(lambda_op->display_name(), "anonymous.lambda");
EXPECT_THAT(
MakeLambdaOperator(
absl::StatusOr<ExprOperatorSignature>(
absl::FailedPreconditionError("~~~")),
CallOp("math.add", {Placeholder("x"), Placeholder("y")})),
StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("~~~")));
}
TEST(LambdaOperatorTest, QTypePropagation) {
ASSERT_OK_AND_ASSIGN(auto lambda_signature,
ExprOperatorSignature::Make("x, y"));
ASSERT_OK_AND_ASSIGN(
auto lambda_body,
CallOp("math.add", {Placeholder("x"), Placeholder("y")}));
ASSERT_OK_AND_ASSIGN(lambda_body,
CallOp("math.add", {lambda_body, Placeholder("y")}));
ASSERT_OK_AND_ASSIGN(
auto lambda_op,
LambdaOperator::Make("test.lambda", lambda_signature, lambda_body));
ASSERT_OK_AND_ASSIGN(
const auto called_lambda,
CallOp(lambda_op, {Literal<int64_t>(57), Literal<int64_t>(57)}));
EXPECT_THAT(called_lambda->qtype(), GetQType<int64_t>());
EXPECT_THAT(
CallOp(lambda_op, {Literal(Bytes{""}), Literal<int64_t>(57)}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr(
"while deducing output type for M.math.add(P.x, P.y); while "
"calling test.lambda with args {b'', int64{57}}")));
}
TEST(LambdaOperatorTest, QValuePropagation) {
ASSERT_OK_AND_ASSIGN(auto op,
MakeLambdaOperator("test.lambda", Placeholder("x")));
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {Literal(1)}));
EXPECT_THAT(expr->attr(), EqualsAttr(TypedRef::FromValue(1)));
}
TEST(LambdaOperatorTest, BadLambdaBody) {
const ExprOperatorSignature lambda_signature{{"p"}};
EXPECT_OK(LambdaOperator::Make(lambda_signature, Placeholder("p")));
EXPECT_THAT(
LambdaOperator::Make(lambda_signature, Placeholder("missing_parameter")),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(LambdaOperator::Make(lambda_signature, Leaf("p")),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(LambdaOperatorTest, VariadicArg) {
ASSERT_OK_AND_ASSIGN(
auto head_op,
MakeLambdaOperator(ExprOperatorSignature::Make("head, *_tail"),
Placeholder("head")));
ASSERT_OK_AND_ASSIGN(
auto tail_op,
MakeLambdaOperator(ExprOperatorSignature::Make("_head, *tail"),
Placeholder("tail")));
ASSERT_OK_AND_ASSIGN(auto h,
InvokeExprOperator<TypedValue>(head_op, 0.f, 1.f, 2.f));
ASSERT_OK_AND_ASSIGN(auto t,
InvokeExprOperator<TypedValue>(tail_op, 0.f, 1.f, 2.f));
EXPECT_THAT(h.As<float>(), IsOkAndHolds(0.f));
EXPECT_EQ(t.GetType(),
MakeTupleQType({GetQType<float>(), GetQType<float>()}));
EXPECT_EQ(t.GetFieldCount(), 2);
EXPECT_THAT(t.GetField(0).As<float>(), IsOkAndHolds(1.f));
EXPECT_THAT(t.GetField(1).As<float>(), IsOkAndHolds(2.f));
EXPECT_THAT(
head_op->InferAttributes(
{Attr(GetQType<float>()), Attr(TypedValue::FromValue(1.f)), Attr{}}),
IsOkAndHolds(EqualsAttr(GetQType<float>())));
EXPECT_THAT(
tail_op->InferAttributes(
{Attr(GetQType<float>()), Attr(TypedValue::FromValue(1.f)), Attr{}}),
IsOkAndHolds(EqualsAttr(nullptr)));
}
TEST(LambdaOperatorTest, VariadicArgInferAttributes) {
ASSERT_OK_AND_ASSIGN(auto op,
MakeLambdaOperator(ExprOperatorSignature::Make("*args"),
Placeholder("args")));
{
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {}));
ASSERT_OK_AND_ASSIGN(auto lowered_expr, ToLowest(expr));
ASSERT_THAT(expr->attr(), EqualsAttr(lowered_expr->attr()));
}
{
auto v0 = Placeholder("x");
ASSERT_OK_AND_ASSIGN(
auto v1, WithQTypeAnnotation(Placeholder("x"), GetQType<int>()));
auto v2 = Literal(1.5f);
for (const auto& a0 : {v0, v1, v2}) {
for (const auto& a1 : {v0, v1, v2}) {
ASSERT_OK_AND_ASSIGN(auto expr, CallOp(op, {a0, a1}));
ASSERT_OK_AND_ASSIGN(auto lowered_expr, ToLowest(expr));
ASSERT_THAT(expr->attr(), EqualsAttr(lowered_expr->attr()));
}
}
}
}
TEST(LambdaOperatorTest, OutputQTypeRequiresLiteral) {
{
ASSERT_OK_AND_ASSIGN(auto lambda_signature,
ExprOperatorSignature::Make("x, y"));
ASSERT_OK_AND_ASSIGN(
auto lambda_body,
CallOp(QTypeAnnotation::Make(), {Placeholder("x"), Placeholder("y")}));
ASSERT_OK_AND_ASSIGN(auto lambda_op,
LambdaOperator::Make(lambda_signature, lambda_body));
ASSERT_OK_AND_ASSIGN(
const auto called_lambda,
CallOp(lambda_op, {Leaf("a"), Literal<QTypePtr>(GetQType<int64_t>())}));
EXPECT_EQ(called_lambda->qtype(), GetQType<int64_t>());
}
{
ASSERT_OK_AND_ASSIGN(auto lambda_signature,
ExprOperatorSignature::Make("x"));
ASSERT_OK_AND_ASSIGN(
auto lambda_body,
WithQTypeAnnotation(Placeholder("x"), GetQType<int64_t>()));
ASSERT_OK_AND_ASSIGN(auto lambda_op,
LambdaOperator::Make(lambda_signature, lambda_body));
EXPECT_THAT(lambda_op->InferAttributes({Attr{}}),
IsOkAndHolds(EqualsAttr(GetQType<int64_t>())));
}
}
TEST(LambdaOperatorTest, GetDoc) {
auto lambda_body = Placeholder("x");
ASSERT_OK_AND_ASSIGN(
auto op, LambdaOperator::Make("lambda_op_with_docstring",
ExprOperatorSignature{{"x"}}, lambda_body,
"doc-string"));
ASSERT_EQ(op->doc(), "doc-string");
ASSERT_THAT(op->GetDoc(), IsOkAndHolds("doc-string"));
}
TEST(LambdaOperatorTest, SuppressUnusedWarning) {
{
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("math.add", {Placeholder("x"), Placeholder("y")}));
ASSERT_OK_AND_ASSIGN(auto wrapped_expr, SuppressUnusedWarning("", expr));
EXPECT_THAT(GetPlaceholderKeys(wrapped_expr), ElementsAre("x", "y"));
EXPECT_THAT(ToLowerNode(wrapped_expr), IsOkAndHolds(EqualsExpr(expr)));
}
{
ASSERT_OK_AND_ASSIGN(
auto expr, CallOp("math.add", {Placeholder("x"), Placeholder("y")}));
ASSERT_OK_AND_ASSIGN(auto wrapped_expr,
SuppressUnusedWarning("a, b, c", expr));
EXPECT_THAT(GetPlaceholderKeys(wrapped_expr),
ElementsAre("a", "b", "c", "x", "y"));
EXPECT_THAT(ToLowest(wrapped_expr), IsOkAndHolds(EqualsExpr(expr)));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/lambda_expr_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/lambda_expr_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
02d4b049-ec00-4653-bf8f-7bb9bd5e4cf6 | cpp | google/tensorstore | http_request | tensorstore/internal/http/http_request.cc | tensorstore/internal/http/http_request_test.cc | #include "tensorstore/internal/http/http_request.h"
#include <cassert>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/strings/ascii.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/kvstore/byte_range.h"
namespace tensorstore {
namespace internal_http {
std::optional<std::string> FormatRangeHeader(
OptionalByteRangeRequest byte_range) {
assert(byte_range.SatisfiesInvariants());
if (byte_range.IsRange() &&
byte_range.exclusive_max > byte_range.inclusive_min) {
return absl::StrFormat("Range: bytes=%d-%d", byte_range.inclusive_min,
byte_range.exclusive_max - 1);
}
if (byte_range.IsSuffix()) {
return absl::StrFormat("Range: bytes=%d-", byte_range.inclusive_min);
}
if (byte_range.IsSuffixLength()) {
return absl::StrFormat("Range: bytes=%d", byte_range.inclusive_min);
}
return std::nullopt;
}
std::optional<std::string> FormatCacheControlMaxAgeHeader(
absl::Duration max_age) {
if (max_age >= absl::InfiniteDuration()) {
return std::nullopt;
}
auto max_age_seconds = absl::ToInt64Seconds(max_age);
if (max_age_seconds > 0) {
return absl::StrFormat("cache-control: max-age=%d", max_age_seconds);
} else {
return "cache-control: no-cache";
}
}
std::optional<std::string> FormatStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
if (staleness_bound == absl::InfinitePast()) {
return std::nullopt;
}
absl::Time now;
absl::Duration duration = absl::ZeroDuration();
if (staleness_bound != absl::InfiniteFuture() &&
(now = absl::Now()) > staleness_bound) {
duration = now - staleness_bound;
}
return FormatCacheControlMaxAgeHeader(duration);
}
HttpRequestBuilder::HttpRequestBuilder(
std::string_view method, std::string base_url,
absl::FunctionRef<std::string(std::string_view)> uri_encoder)
: uri_encoder_(uri_encoder),
request_{std::string(method), std::move(base_url)},
query_parameter_separator_("?") {
assert(!request_.method.empty());
assert(request_.method ==
absl::AsciiStrToUpper(std::string_view(request_.method)));
if (request_.url.find_last_of('?') != std::string::npos) {
query_parameter_separator_ = "&";
}
}
HttpRequest HttpRequestBuilder::BuildRequest() { return std::move(request_); }
HttpRequestBuilder& HttpRequestBuilder::AddHeader(std::string header) {
if (!header.empty()) {
request_.headers.push_back(std::move(header));
}
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::AddQueryParameter(
std::string_view key, std::string_view value) {
assert(!key.empty());
if (value.empty()) {
absl::StrAppend(&request_.url, query_parameter_separator_,
uri_encoder_(key));
} else {
absl::StrAppend(&request_.url, query_parameter_separator_,
uri_encoder_(key), "=", uri_encoder_(value));
}
query_parameter_separator_ = "&";
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::EnableAcceptEncoding() {
request_.accept_encoding = true;
return *this;
}
HttpRequestBuilder& HttpRequestBuilder::MaybeAddRangeHeader(
OptionalByteRangeRequest byte_range) {
return AddHeader(FormatRangeHeader(std::move(byte_range)));
}
HttpRequestBuilder& HttpRequestBuilder::MaybeAddCacheControlMaxAgeHeader(
absl::Duration max_age) {
return AddHeader(FormatCacheControlMaxAgeHeader(max_age));
}
HttpRequestBuilder&
HttpRequestBuilder::MaybeAddStalenessBoundCacheControlHeader(
absl::Time staleness_bound) {
return AddHeader(FormatStalenessBoundCacheControlHeader(staleness_bound));
}
HttpRequestBuilder& HttpRequestBuilder::AddHostHeader(std::string_view host) {
if (host.empty()) {
host = internal::ParseGenericUri(request_.url).authority;
}
return AddHeader(absl::StrFormat("host: %s", host));
}
}
} | #include "tensorstore/internal/http/http_request.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorstore/kvstore/byte_range.h"
namespace {
using ::tensorstore::OptionalByteRangeRequest;
using ::tensorstore::internal_http::HttpRequestBuilder;
using ::testing::AnyOf;
using ::testing::ElementsAre;
TEST(HttpRequestBuilder, BuildRequest) {
auto request = HttpRequestBuilder("GET", "http:
.AddHeader("X-foo: bar")
.AddQueryParameter("name", "dragon")
.AddQueryParameter("age", "1234")
.EnableAcceptEncoding()
.BuildRequest();
EXPECT_EQ("http:
EXPECT_TRUE(request.accept_encoding);
EXPECT_EQ("GET", request.method);
EXPECT_THAT(request.headers, testing::ElementsAre("X-foo: bar"));
}
TEST(HttpRequestBuilder, AddCacheControlMaxAgeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::InfiniteDuration());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::ZeroDuration());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: max-age=10"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddCacheControlMaxAgeHeader(-absl::Seconds(10));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
}
TEST(HttpRequestBuilder, AddStalenessBoundCacheControlHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::InfinitePast());
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::InfiniteFuture());
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
const absl::Time kFutureTime = absl::Now() + absl::Minutes(525600);
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(kFutureTime);
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("cache-control: no-cache"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddStalenessBoundCacheControlHeader(absl::Now() -
absl::Milliseconds(5900));
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre(AnyOf("cache-control: max-age=4",
"cache-control: max-age=5")));
}
}
TEST(HttpRequestBuilder, MaybeAddRangeHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader({});
EXPECT_THAT(builder.BuildRequest().headers, ::testing::IsEmpty());
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest::Suffix(1));
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("Range: bytes=1-"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest::SuffixLength(5));
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("Range: bytes=-5"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.MaybeAddRangeHeader(OptionalByteRangeRequest{1, 2});
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("Range: bytes=1-1"));
}
}
TEST(HttpRequestBuilder, AddHostHeader) {
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader({});
EXPECT_THAT(builder.BuildRequest().headers, ElementsAre("host: 127.0.0.1"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader("host.header");
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("host: host.header"));
}
{
HttpRequestBuilder builder("GET", "http:
builder.AddHostHeader({});
EXPECT_THAT(builder.BuildRequest().headers,
ElementsAre("host: localhost:1234"));
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_request.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/http/http_request_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |