ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
2e95af84-26d7-4b74-b1e2-81c690cb5511 | cpp | tensorflow/tensorflow | clustering_bridge_passes | tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc | tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes_test.cc | #include "tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.h"
#include <string>
#include "absl/log/log.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/sparsecore_passes.h"
#include "tensorflow/compiler/mlir/tf2xla/internal/passes/clustering_passes.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::OpPassManager;
using mlir::func::FuncOp;
void AddReplicatedBridgeClusteringPipelinePasses(OpPassManager& pm,
llvm::StringRef module_name) {
const llvm::SmallVector<std::string, 4> ops_to_preserve = {
"tf.TPUReplicateMetadata", "tf.TPUCompilationResult",
"tf.TPUReplicatedOutput"};
bool strict_clusters =
tensorflow::GetMlirCommonFlags()->tf_mlir_enable_strict_clusters;
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass(ops_to_preserve));
pm.addNestedPass<FuncOp>(
mlir::CreateExecutorDialectToFunctionalConversionPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::TFTPU::CreateTPUPartitionedOpConversionPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUReorderReplicateAndPartitionedInputsPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateDecomposeReduceDatasetPass());
pm.addPass(mlir::TFDevice::CreateEmbeddingPipeliningPass());
pm.addPass(mlir::TFDevice::CreateEmbeddingSequencingPass());
pm.addPass(tensorflow::tf2xla::internal::CreateTPUClusterFormationPass(
strict_clusters));
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TFTPU::CreateTPUClusterCleanupAttributesPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateDeviceAttributeToLaunchPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFDevice::CreateDecomposeResourceOpsInClusterPass());
{
OpPassManager& func_pm = pm.nest<FuncOp>();
func_pm.addPass(mlir::TFTPU::CreateTPUHostComputationExpansionPass());
func_pm.addPass(mlir::TFTPU::CreateTPUUpdateEmbeddingEnqueueOpInputsPass());
}
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateLaunchToDeviceAttributePass());
pm.addPass(mlir::TF::CreateTFFunctionalControlFlowToRegions());
pm.addPass(mlir::createInlinerPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateDropWhileShapeInvariantInDeviceClusterPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFTPU::CreateTPUClusterCleanupAttributesPass());
pm.addPass(mlir::TFDevice::CreateResourceOpLiftingPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addNestedPass<FuncOp>(mlir::createCSEPass());
if (tensorflow::GetMlirCommonFlags()
->tf_mlir_enable_merge_control_flow_pass) {
pm.addPass(mlir::TFDevice::CreateMergeControlFlowPass());
}
pm.addPass(
tensorflow::tf2xla::internal::CreateMarkOpsForOutsideCompilationPass());
pm.addPass(tensorflow::tf2xla::internal::
CreateExtractHeadTailOutsideCompilationPass());
pm.addPass(
tensorflow::tf2xla::internal::CreateExtractOutsideCompilationPass());
pm.addNestedPass<FuncOp>(
mlir::TFDevice::CreateVerifyNoOutsideCompilationMarkersPass());
pm.addNestedPass<FuncOp>(mlir::TFDevice::CreateClusterConstantSinkingPass());
pm.addPass(mlir::TF::CreateResourceDeviceInferencePass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateHoistBroadcastReadPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateXlaBroadcastPass());
pm.addPass(mlir::TFDevice::CreateClusterOutliningPass());
pm.addPass(mlir::TFTPU::CreateTPUResourceReadForWritePass());
pm.addPass(mlir::TFDevice::CreateMarkInputOutputAliasesPass());
pm.addPass(
tensorflow::tf2xla::internal::CreateTPUShardingIdentificationPass());
pm.addNestedPass<FuncOp>(
mlir::TFTPU::CreateTPUResourceReadsWritesPartitioningPass());
pm.addPass(mlir::TFDevice::CreateAnnotateParameterReplicationPass());
pm.addNestedPass<FuncOp>(mlir::TF::CreateRewriteTPUEmbeddingOpsPass());
pm.addPass(mlir::TFTPU::CreateTPUAnnotateDynamicShapeInputsPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateHoistReplicateInvariantResourceWritesPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateVerifyClusteringPass());
}
void NoCanonicalization(OpPassManager& pm) {}
void AddNonReplicatedBridgeClusteringPipelinePasses(OpPassManager& pm) {
VLOG(2) << "Create TF XLA Bridge pipeline";
pm.addPass(mlir::TFDevice::CreateXlaValidateInputsPass());
pm.addNestedPass<FuncOp>(
mlir::TF::CreateCanonicalizeCompileAndReplicateAttributesPass());
const llvm::SmallVector<std::string, 4> ops_to_preserve = {};
pm.addNestedPass<FuncOp>(
mlir::tf_executor::CreateTFExecutorGraphPruningPass(ops_to_preserve));
pm.addNestedPass<FuncOp>(
mlir::CreateExecutorDialectToFunctionalConversionPass());
pm.addPass(mlir::TF::CreateGuaranteeAllFuncsOneUsePass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addPass(tensorflow::tf2xla::internal::CreateXlaClusterFormationPass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::TFDevice::CreateDecomposeResourceOpsInClusterPass());
pm.addPass(mlir::TF::CreateTFShapeInferencePass());
pm.addNestedPass<FuncOp>(mlir::createCanonicalizerPass());
pm.addPass(mlir::createInlinerPass({}, NoCanonicalization));
pm.addPass(mlir::TFDevice::CreateResourceOpLiftingPass());
pm.addPass(mlir::TFDevice::CreateClusterOutliningPass());
pm.addNestedPass<FuncOp>(
tensorflow::tf2xla::internal::CreateVerifyClusteringPass());
}
};
};
}; | #include "tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.h"
#include <gtest/gtest.h>
#include "mlir/Pass/PassManager.h"
namespace tensorflow {
namespace tf2xla {
namespace internal {
using mlir::OpPassManager;
TEST(ClusteringBridgePassesTest, AddsBridgePasses) {
OpPassManager pass_manager;
AddReplicatedBridgeClusteringPipelinePasses(pass_manager);
EXPECT_EQ(pass_manager.size(), 45);
}
TEST(ClusteringBridgePassesTest, AddsNonTPUBridgePasses) {
OpPassManager pass_manager;
AddNonReplicatedBridgeClusteringPipelinePasses(pass_manager);
EXPECT_EQ(pass_manager.size(), 15);
}
};
};
}; | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/internal/clustering_bridge_passes_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9cc88025-8857-47e4-adbf-2c50ad2316a5 | cpp | google/quiche | hpack_entry | quiche/http2/hpack/hpack_entry.cc | quiche/http2/hpack/hpack_entry_test.cc | #include "quiche/http2/hpack/hpack_entry.h"
#include <cstddef>
#include <string>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace spdy {
HpackEntry::HpackEntry(std::string name, std::string value)
: name_(std::move(name)), value_(std::move(value)) {}
size_t HpackEntry::Size(absl::string_view name, absl::string_view value) {
return name.size() + value.size() + kHpackEntrySizeOverhead;
}
size_t HpackEntry::Size() const { return Size(name(), value()); }
std::string HpackEntry::GetDebugString() const {
return absl::StrCat("{ name: \"", name_, "\", value: \"", value_, "\" }");
}
} | #include "quiche/http2/hpack/hpack_entry.h"
#include "absl/hash/hash.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace spdy {
namespace {
TEST(HpackLookupEntryTest, EntryNamesDiffer) {
HpackLookupEntry entry1{"header", "value"};
HpackLookupEntry entry2{"HEADER", "value"};
EXPECT_FALSE(entry1 == entry2);
EXPECT_NE(absl::Hash<HpackLookupEntry>()(entry1),
absl::Hash<HpackLookupEntry>()(entry2));
}
TEST(HpackLookupEntryTest, EntryValuesDiffer) {
HpackLookupEntry entry1{"header", "value"};
HpackLookupEntry entry2{"header", "VALUE"};
EXPECT_FALSE(entry1 == entry2);
EXPECT_NE(absl::Hash<HpackLookupEntry>()(entry1),
absl::Hash<HpackLookupEntry>()(entry2));
}
TEST(HpackLookupEntryTest, EntriesEqual) {
HpackLookupEntry entry1{"name", "value"};
HpackLookupEntry entry2{"name", "value"};
EXPECT_TRUE(entry1 == entry2);
EXPECT_EQ(absl::Hash<HpackLookupEntry>()(entry1),
absl::Hash<HpackLookupEntry>()(entry2));
}
TEST(HpackEntryTest, BasicEntry) {
HpackEntry entry("header-name", "header value");
EXPECT_EQ("header-name", entry.name());
EXPECT_EQ("header value", entry.value());
EXPECT_EQ(55u, entry.Size());
EXPECT_EQ(55u, HpackEntry::Size("header-name", "header value"));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/hpack_entry.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/hpack/hpack_entry_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d1d92fbe-e80a-43c3-9c83-84c759e94301 | cpp | tensorflow/tensorflow | dot_dimension_sorter | third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter.cc | third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter_test.cc | #include "xla/service/gpu/transforms/dot_dimension_sorter.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/permutation_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
namespace {
absl::Status SortDotDimensions(HloDotInstruction* dot) {
const DotDimensionNumbers& dims = dot->dot_dimension_numbers();
DotDimensionNumbers new_dims(dims);
new_dims.clear_lhs_contracting_dimensions();
new_dims.clear_rhs_contracting_dimensions();
const bool sort_by_lhs =
DistinctNumbersAreConsecutiveIfSorted(dims.lhs_contracting_dimensions());
const absl::Span<const int64_t>& sort_key =
sort_by_lhs ? dims.lhs_contracting_dimensions()
: dims.rhs_contracting_dimensions();
std::vector<int64_t> permutation;
for (const int64_t a : sort_key) {
permutation.push_back(a - *absl::c_min_element(sort_key));
}
const std::vector<int64_t> sorted_lhs =
Permute(dims.lhs_contracting_dimensions(), permutation);
*new_dims.mutable_lhs_contracting_dimensions() = {sorted_lhs.begin(),
sorted_lhs.end()};
const std::vector<int64_t> sorted_rhs =
Permute(dims.rhs_contracting_dimensions(), permutation);
*new_dims.mutable_rhs_contracting_dimensions() = {sorted_rhs.begin(),
sorted_rhs.end()};
std::unique_ptr<HloInstruction> new_dot = HloInstruction::CreateDot(
dot->shape(), dot->mutable_operand(0), dot->mutable_operand(1), new_dims,
dot->precision_config(), {dot->sparsity().begin(), dot->sparsity().end()},
absl::MakeSpan(dot->operands()).subspan(HloDotInstruction::kOperands));
dot->SetupDerivedInstruction(new_dot.get());
VLOG(3) << "Sorted dot() dimensions:\n"
<< "\t before: " << dot->ToString() << "\n"
<< "\t after: " << new_dot->ToString();
return dot->parent()->ReplaceWithNewInstruction(dot, std::move(new_dot));
}
}
absl::StatusOr<bool> DotDimensionSorter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloInstruction*> dots_to_process;
for (const HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : computation->instructions()) {
if (instr->opcode() != HloOpcode::kDot) {
continue;
}
if ((instr->operand(0)->shape().has_layout() &&
!LayoutUtil::IsMonotonicWithDim0Major(
instr->operand(0)->shape().layout())) ||
(instr->operand(1)->shape().has_layout() &&
!LayoutUtil::IsMonotonicWithDim0Major(
instr->operand(1)->shape().layout()))) {
continue;
}
const DotDimensionNumbers& dims = instr->dot_dimension_numbers();
if (dims.lhs_contracting_dimensions_size() == 0) {
continue;
}
const bool cons_lhs = DistinctNumbersAreConsecutiveIfSorted(
dims.lhs_contracting_dimensions());
const bool cons_rhs = DistinctNumbersAreConsecutiveIfSorted(
dims.rhs_contracting_dimensions());
const bool sorted_lhs =
absl::c_is_sorted(dims.lhs_contracting_dimensions());
const bool sorted_rhs =
absl::c_is_sorted(dims.rhs_contracting_dimensions());
if ((cons_lhs && !sorted_lhs && !cons_rhs) ||
(cons_rhs && !sorted_rhs && !cons_lhs) ||
(cons_lhs && !sorted_lhs && cons_rhs && !sorted_rhs)) {
dots_to_process.push_back(instr);
}
}
}
if (dots_to_process.empty()) {
return false;
}
for (HloInstruction* dot : dots_to_process) {
TF_RETURN_IF_ERROR(SortDotDimensions(Cast<HloDotInstruction>(dot)));
}
return true;
}
}
} | #include "xla/service/gpu/transforms/dot_dimension_sorter.h"
#include <memory>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class WithoutDotDimensionSorterTest : public GpuCodegenTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.add_xla_disable_hlo_passes("dot_dimension_sorter");
return debug_options;
}
};
TEST_F(WithoutDotDimensionSorterTest, UnsortedDimsCreateTransposes) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK: transpose
)");
}
TEST_F(WithoutDotDimensionSorterTest, SortedDimsDoNotCreateTransposes) {
const char* hlo_text = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
MatchOptimizedHlo(hlo_text, R"(
; CHECK-NOT: transpose
)");
}
TEST_F(WithoutDotDimensionSorterTest, DimOrderCanBeChanged) {
const char* hlo_text_ref = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
const char* hlo_text_modified = R"(
HloModule m
ENTRY e {
p0 = f16[1,14,9,32] parameter(0)
p1 = f16[12,9,32] parameter(1)
ROOT _ = f16[1,14,12] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_ref, hlo_text_modified,
ErrorSpec{1e-5, 1e-3},
true));
}
using DotDimensionSorterTest = GpuCodegenTest;
TEST_F(DotDimensionSorterTest, SortContractingDims) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,32] parameter(0)
p1 = f16[122,96,32] parameter(1)
ROOT _ = f16[1,144,122] dot(p0, p1),
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
const auto& dims =
module->entry_computation()->root_instruction()->dot_dimension_numbers();
EXPECT_EQ(dims.lhs_contracting_dimensions(0), 3);
EXPECT_EQ(dims.lhs_contracting_dimensions(1), 2);
EXPECT_EQ(dims.rhs_contracting_dimensions(0), 2);
EXPECT_EQ(dims.rhs_contracting_dimensions(1), 1);
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_TRUE(modified);
const auto& dims2 =
module->entry_computation()->root_instruction()->dot_dimension_numbers();
EXPECT_EQ(dims2.lhs_contracting_dimensions(0), 2);
EXPECT_EQ(dims2.lhs_contracting_dimensions(1), 3);
EXPECT_EQ(dims2.rhs_contracting_dimensions(0), 1);
EXPECT_EQ(dims2.rhs_contracting_dimensions(1), 2);
}
TEST_F(DotDimensionSorterTest, NothingToReorder) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,32] parameter(0)
p1 = f16[122,96,32] parameter(1)
ROOT _ = f16[1,144,122] dot(p0, p1),
lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_FALSE(modified);
}
TEST_F(DotDimensionSorterTest, SparseDotSortContractingDims) {
const char* module_string = R"(
HloModule m
ENTRY e {
p0 = f16[1,144,96,16] parameter(0)
p1 = f16[122,96,32] parameter(1)
meta = u16[1,144,96,2] parameter(2)
ROOT _ = f16[1,144,122] dot(p0, p1, meta), sparsity=L.3@2:4,
lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotDimensionSorter().Run(module.get()));
EXPECT_TRUE(modified);
HloDotInstruction* dot = DynCast<HloDotInstruction>(
module->entry_computation()->root_instruction());
EXPECT_TRUE(dot != nullptr && dot->sparse_operands() == 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
37814792-efdc-4cb3-a712-b3397f948e72 | cpp | tensorflow/tensorflow | auto_sharding_memory | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_memory.cc | third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_memory_test.cc | #include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace spmd {
namespace {
using PrimIdx = int64_t;
using LiveIdx = int64_t;
using GroupIdx = int64_t;
using PrimPair = std::pair<PrimIdx, PrimIdx>;
using Interval = std::pair<LiveIdx, LiveIdx>;
using ActivePrim = std::pair<Interval, PrimIdx>;
bool IsValid(const Interval& interval) {
return interval.first <= interval.second;
}
int64_t length(const Interval& interval) {
return interval.second - interval.first + 1;
}
}
std::pair<int64_t, int64_t> MemoryTermReducer::Reduce(
int64_t num_lives, int64_t num_primitives,
const std::function<
tsl::protobuf::RepeatedField<int64_t>(int64_t)>&
live,
int64_t max_iterations) {
LOG(INFO) << "Memory Term Reducer beginning to reduce number of terms ...";
reduced_live_.clear();
reduced_intervals_.clear();
reduced_groups_.clear();
int64_t num_terms = 0;
reduced_intervals_.reserve(num_primitives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
reduced_intervals_.push_back({std::numeric_limits<LiveIdx>::max(), 0});
}
for (LiveIdx live_idx = 0; live_idx < num_lives; ++live_idx) {
for (const PrimIdx prim_idx : live(live_idx)) {
Interval& interval = reduced_intervals_[prim_idx];
interval.first = std::min(interval.first, live_idx);
interval.second = std::max(interval.second, live_idx);
++num_terms;
}
}
Reduce(num_lives, num_primitives, max_iterations);
int64_t num_reduced_terms = 0;
reduced_live_.resize(num_lives);
for (PrimIdx prim_idx = 0; prim_idx < reduced_intervals_.size(); ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
for (LiveIdx live_idx = interval.first; live_idx <= interval.second;
++live_idx) {
reduced_live_[live_idx].push_back(prim_idx);
++num_reduced_terms;
}
}
for (const auto& group : reduced_groups_) num_reduced_terms += group.size();
LOG(INFO) << "Memory Term Reducer finished reducing the number of terms.";
return {num_terms, num_reduced_terms};
}
std::pair<int64_t, int64_t> MemoryTermReducer::Reduce(
int64_t num_lives, int64_t num_primitives,
const std::function<std::pair<int64_t, int64_t>(int64_t)>& intervals,
int64_t max_iterations) {
LOG(INFO) << "Memory Term Reducer beginning to reduce number of terms ...";
reduced_live_.clear();
reduced_intervals_.clear();
reduced_groups_.clear();
int64_t num_terms = 0;
reduced_intervals_.reserve(num_primitives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
reduced_intervals_.push_back(intervals(prim_idx));
const Interval& interval = reduced_intervals_.back();
if (IsValid(interval)) num_terms += length(interval);
}
Reduce(num_lives, num_primitives, max_iterations);
int64_t num_reduced_terms = 0;
for (PrimIdx prim_idx = 0; prim_idx < reduced_intervals_.size(); ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
if (IsValid(interval)) num_reduced_terms += length(interval);
}
for (const auto& group : reduced_groups_) num_reduced_terms += group.size();
LOG(INFO) << "Memory Term Reducer finished reducing the number of terms.";
return {num_terms, num_reduced_terms};
}
void MemoryTermReducer::Reduce(int64_t num_lives, int64_t num_primitives,
int64_t max_iterations) {
std::vector<absl::btree_set<PrimIdx>> enter(num_lives), evict(num_lives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
if (!IsValid(interval)) continue;
enter[interval.first].insert(prim_idx);
evict[interval.second].insert(prim_idx);
}
auto Splits = [this](PrimIdx large_idx, PrimIdx small_idx) -> bool {
const Interval& large = reduced_intervals_[large_idx];
const Interval& small = reduced_intervals_[small_idx];
return large.first < small.first && large.second > small.second;
};
auto CalcOverlap = [this, Splits](
int64_t prim0_idx,
int64_t prim1_idx) -> std::optional<Interval> {
if (prim0_idx == prim1_idx) return std::nullopt;
const Interval& interval0 = reduced_intervals_[prim0_idx];
const Interval& interval1 = reduced_intervals_[prim1_idx];
if (!IsValid(interval0) || !IsValid(interval1)) return std::nullopt;
if (Splits(prim0_idx, prim1_idx)) return std::nullopt;
if (Splits(prim1_idx, prim0_idx)) return std::nullopt;
return Interval(std::max(interval0.first, interval1.first),
std::min(interval0.second, interval1.second));
};
auto MergeIntoGroup = [num_primitives, this](
PrimIdx prim_idx,
absl::btree_set<PrimIdx>& reduced_group) {
if (prim_idx < num_primitives) {
reduced_group.insert(prim_idx);
} else {
const auto& group = reduced_groups_[prim_idx - num_primitives];
reduced_group.insert(group.begin(), group.end());
}
};
auto CalcNumTerms = [num_primitives, this](
PrimIdx prim_idx,
std::optional<Interval> overlap = std::nullopt) {
int64_t num_terms = length(reduced_intervals_[prim_idx]);
if (overlap) num_terms -= length(*overlap);
if (prim_idx >= num_primitives && num_terms > 0) {
num_terms += reduced_groups_[prim_idx - num_primitives].size();
}
return num_terms;
};
auto UpdatePrimitive = [this, &enter, &evict](
PrimIdx prim_idx,
const Interval& overlap) mutable {
Interval& interval = reduced_intervals_[prim_idx];
enter[interval.first].erase(prim_idx);
evict[interval.second].erase(prim_idx);
if (auto& t = interval.first; t == overlap.first) t = overlap.second + 1;
if (auto& t = interval.second; t == overlap.second) t = overlap.first - 1;
if (!IsValid(interval)) return;
enter[interval.first].insert(prim_idx);
evict[interval.second].insert(prim_idx);
};
auto SweepAndMerge = [&num_lives, &enter, &evict, &CalcOverlap, &CalcNumTerms,
&MergeIntoGroup, &UpdatePrimitive, this]() -> bool {
absl::btree_set<ActivePrim> actives;
absl::btree_multimap<int64_t, PrimPair> overlaps;
for (LiveIdx live_idx = 0; live_idx < num_lives; ++live_idx) {
for (const PrimIdx prim_idx : enter[live_idx]) {
actives.insert({reduced_intervals_[prim_idx], prim_idx});
}
for (const PrimIdx prim_idx : evict[live_idx]) {
auto active = actives.find({reduced_intervals_[prim_idx], prim_idx});
if (++active == actives.end()) continue;
std::optional<Interval> overlap = CalcOverlap(prim_idx, active->second);
if (!overlap) continue;
overlaps.insert({-length(*overlap), {prim_idx, active->second}});
}
for (const PrimIdx prim_idx : evict[live_idx]) {
actives.erase({reduced_intervals_[prim_idx], prim_idx});
}
}
bool changed = false;
for (const auto& [_, prim_pair] : overlaps) {
const PrimIdx prim0_idx = prim_pair.first, prim1_idx = prim_pair.second;
const std::optional<Interval> overlap = CalcOverlap(prim0_idx, prim1_idx);
if (!overlap) continue;
absl::btree_set<PrimIdx> reduced_group;
MergeIntoGroup(prim0_idx, reduced_group);
MergeIntoGroup(prim1_idx, reduced_group);
if (CalcNumTerms(prim0_idx) + CalcNumTerms(prim1_idx) <=
CalcNumTerms(prim0_idx, overlap) + CalcNumTerms(prim1_idx, overlap) +
length(*overlap) + reduced_group.size()) {
continue;
}
enter[overlap->first].insert(reduced_intervals_.size());
evict[overlap->second].insert(reduced_intervals_.size());
reduced_intervals_.push_back({overlap->first, overlap->second});
reduced_groups_.push_back(reduced_group);
UpdatePrimitive(prim0_idx, *overlap);
UpdatePrimitive(prim1_idx, *overlap);
changed = true;
}
return changed;
};
for (int64_t iteration = 0; iteration < max_iterations; ++iteration) {
if (!SweepAndMerge()) break;
}
for (GroupIdx group_idx = reduced_groups_.size() - 1; group_idx >= 0;
--group_idx) {
if (IsValid(reduced_intervals_[num_primitives + group_idx])) continue;
reduced_intervals_.erase(reduced_intervals_.begin() + num_primitives +
group_idx);
reduced_groups_.erase(reduced_groups_.begin() + group_idx);
}
}
const std::vector<std::vector<int64_t>>& MemoryTermReducer::GetReducedLive()
const {
return reduced_live_;
}
const std::vector<std::pair<int64_t, int64_t>>&
MemoryTermReducer::GetReducedIntervals() const {
return reduced_intervals_;
}
const std::vector<absl::btree_set<int64_t>>&
MemoryTermReducer::GetReducedGroups() const {
return reduced_groups_;
}
absl::flat_hash_set<int64_t> MemoryTermReducer::GetReducedTimes(
int64_t num_primitives) {
return GetReducedTimes(num_primitives, reduced_intervals_, reduced_groups_);
}
absl::flat_hash_set<int64_t> MemoryTermReducer::GetReducedTimes(
int64_t num_primitives,
const std::vector<std::pair<int64_t, int64_t>>& reduced_intervals,
const std::vector<absl::btree_set<int64_t>>& reduced_groups) {
std::vector<std::pair<int64_t, int64_t>> intervals;
for (int64_t reduced_interval_idx = 0;
reduced_interval_idx < reduced_intervals.size();
++reduced_interval_idx) {
const Interval& reduced_interval = reduced_intervals[reduced_interval_idx];
if (reduced_interval_idx < num_primitives) {
intervals.push_back(reduced_interval);
continue;
}
const GroupIdx group_idx = reduced_interval_idx - num_primitives;
for (const PrimIdx prim_idx : reduced_groups[group_idx]) {
Interval& interval = intervals[prim_idx];
if (!IsValid(interval)) {
interval.first = reduced_interval.first;
interval.second = reduced_interval.second;
continue;
}
interval.first = std::min(interval.first, reduced_interval.first);
interval.second = std::max(interval.second, reduced_interval.second);
}
}
absl::btree_set<std::pair<int64_t, bool>> times;
for (const Interval& interval : intervals) {
if (!IsValid(interval)) continue;
times.insert({interval.first, false});
times.insert({interval.second, true});
}
int64_t last_entering_time = -1;
absl::flat_hash_set<int64_t> reduced_times;
for (const auto& time : times) {
if ( time.second) {
reduced_times.insert(last_entering_time);
} else {
last_entering_time = time.first;
}
}
reduced_times.insert(last_entering_time);
return reduced_times;
}
}
} | #include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include <cstdint>
#include <functional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
namespace xla {
namespace spmd {
namespace {
std::function<tsl::protobuf::RepeatedField<int64_t>(int64_t)>
Convert(const std::vector<std::vector<int64_t>>& live) {
return [live](int64_t live_idx) {
return ::proto2::RepeatedField<int64_t>(live[live_idx].begin(),
live[live_idx].end());
};
}
std::function<std::pair<int64_t, int64_t>(int64_t)> Convert(
const std::vector<std::pair<int64_t, int64_t>>& intervals) {
return [intervals](int64_t prim_idx) { return intervals[prim_idx]; };
}
TEST(AutoShardingMemoryTest, WithoutOverlap) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0 },
{0 },
{ 1},
{ 1},
{ 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{0 },
{0 },
{ 1},
{ 1},
{ 1}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 2}, {3, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {6, 6};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, PartialOverlap) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{ 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 2},
{ 2},
{ 2},
{ 2},
{ 1 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {5, 5}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, PartialOverlapReversed) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{ 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 1 },
{ 2},
{ 2},
{ 2},
{ 2},
{0 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, 5}, {0, 0}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, DoesNotSplitPrimitive) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 5}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 10};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OnePrimitiveVanishes) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 2},
{ 2},
{ 2},
{ 2},
{ 2}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {6, 0}, {1, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {11, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, BothPrimitivesVanish) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{2},
{2},
{2},
{2},
{2},
{2}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, -1}, {6, -1}, {0, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {12, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OneGroupingPreventsAnother) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2},
{ 1, 2},
{ 1, 2},
{ 1, 2},
{ 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 3},
{ 3},
{ 3},
{ 3},
{ 2, 3},
{1, 2 },
{1, 2 },
{1, 2 },
{ 2 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, -1}, {5, 7}, {4, 8}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {18, 15};
const absl::flat_hash_set<int64_t> expected_reduced_times = {4};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, TwoGroups) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 2},
{0, 2},
{0, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{3},
{3},
{3},
{4},
{4},
{4}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, 2}, {3, -1}, {6, 2}, {0, 2}, {3, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {0, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {12, 10};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, TwoGroupsMutuallyExclusive) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1 },
{0, 1 },
{0, 1 },
{ 2, 3},
{ 2, 3},
{ 2, 3},
{ 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 4},
{ 4},
{ 4},
{ 5},
{ 5},
{ 5},
{ 3 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {4, 0}, {7, 3}, {7, 7}, {1, 3}, {4, 6}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {14, 12};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1, 4};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergingPrimitivesWouldNotReduceTerms) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0, 1},
{0, 1}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 1}, {0, 1}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {4, 4};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, AllPrimitivesVanish) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{3},
{3},
{3},
{3},
{3},
{3}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, -1}, {6, -1}, {6, -1}, {0, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {18, 9};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergingGroupsWouldNotReduceTerms) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{ 2, 3},
{ 2, 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{4 },
{4 },
{4 },
{4, 5},
{4, 5},
{4, 5},
{4, 5},
{ 5},
{ 5}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{7, -1}, {7, -1}, {9, 2}, {9, 2}, {0, 6}, {3, 8}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {26, 17};
const absl::flat_hash_set<int64_t> expected_reduced_times = {3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, ExampleFromDocumentation) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{ 2, 3},
{ 2, 3},
{ 2, 3},
{ 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 4},
{ 4},
{ 4},
{ 4},
{ 6},
{ 6},
{ 6},
{ 6},
{ 6},
{ 5},
{ 5},
{ 5},
{ 3 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 4}, {10, 12}, {5, 9}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}, {0, 1, 2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 22};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergesWithRightmost) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 2},
{0, 2},
{0, 2},
{ 1, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 3},
{ 3},
{ 3},
{1, 2 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{3, -1}, {3, 3}, {3, 3}, {0, 2}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {8, 7};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, ExampleFromDocumentationUsingIntervals) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 9}, {1, 9}, {5, 12}, {5, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals));
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 4}, {10, 12}, {5, 9}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}, {0, 1, 2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 22};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, InvalidIntervals) {
const int num_primitives = 3;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 4}, {9223372036854775807, 0}, {9223372036854775807, 0}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(5, num_primitives,
Convert(intervals));
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 4}, {9223372036854775807, 0}, {9223372036854775807, 0}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {5, 5};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OneIterationOnly) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 9}, {1, 9}, {5, 12}, {5, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 9}, {5, 12}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 23};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsBottomLeft) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 13}, {0, 10}, {0, 7}, {0, 4}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{11, 13}, {11, -1}, {5, 7}, {5, -1}, {0, 10}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsTopLeft) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 4}, {0, 7}, {0, 10}, {0, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, -1}, {5, 7}, {11, -1}, {11, 13}, {0, 10}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{2, 3}, {0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsTopRight) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{9, 13}, {6, 13}, {3, 13}, {0, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{14, 8}, {6, 8}, {14, 2}, {0, 2}, {3, 13}, {9, 13}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{2, 3}, {0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {9};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsBottomRight) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 13}, {3, 13}, {6, 13}, {9, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 2}, {14, 2}, {6, 8}, {14, 8}, {3, 13}, {9, 13}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {9};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_memory.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/experimental/auto_sharding/auto_sharding_memory_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
87bfd4b9-ec7b-4c54-b090-bb8a279883ad | cpp | tensorflow/tensorflow | sort_json | third_party/xla/xla/sort_json.cc | third_party/xla/xla/sort_json_test.cc | #include "xla/sort_json.h"
#include <algorithm>
#include <cctype>
#include <cstddef>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace {
void SkipWhitespace(absl::string_view json, size_t& index) {
while (index < json.size() && std::isspace(json[index])) {
++index;
}
}
absl::Status CheckNotEndOfString(absl::string_view json, int index,
absl::string_view expected) {
return index < json.size()
? absl::OkStatus()
: absl::InvalidArgumentError(absl::StrCat(
"Prematurely reached end of JSON while looking for ",
expected, "."));
}
absl::Status Consume(absl::string_view json, size_t& index, char c,
bool optional = false) {
SkipWhitespace(json, index);
TF_RETURN_IF_ERROR(CheckNotEndOfString(json, index, std::string(1, c)));
if (json[index] == c) {
++index;
SkipWhitespace(json, index);
} else if (!optional) {
return absl::InvalidArgumentError(
absl::StrCat("Expected '", std::string(1, c), "', but found '",
std::string(1, json[index]), "'."));
}
return absl::OkStatus();
}
struct JsonArray;
struct JsonObject;
using JsonValue = std::variant<absl::string_view, std::unique_ptr<JsonObject>,
std::unique_ptr<JsonArray>>;
struct JsonField {
absl::string_view name;
JsonValue value;
};
template <typename T>
struct JsonSequence {
std::vector<T> elements;
};
struct JsonArray : public JsonSequence<JsonValue> {};
struct JsonObject : public JsonSequence<JsonField> {};
template <typename T, char begin, char end, const char* name, typename ElemFn>
absl::StatusOr<std::unique_ptr<T>> ParseSequence(absl::string_view outer_json,
size_t& index,
ElemFn elem_fn) {
TF_RETURN_IF_ERROR(Consume(outer_json, index, begin));
TF_RETURN_IF_ERROR(CheckNotEndOfString(outer_json, index, name));
auto seq = std::make_unique<T>();
while (outer_json[index] != end) {
TF_ASSIGN_OR_RETURN(auto elem, elem_fn(outer_json, index));
seq->elements.emplace_back(std::move(elem));
TF_RETURN_IF_ERROR(Consume(outer_json, index, ',', true));
TF_RETURN_IF_ERROR(CheckNotEndOfString(outer_json, index, name));
}
TF_RETURN_IF_ERROR(Consume(outer_json, index, end));
return seq;
}
absl::Status EnsureValidLiteralStart(char c) {
if (c != '"' && c != '+' && c != '-' && c != 'f' && c != 't' && c != 'n' &&
(c < '0' || c > '9')) {
return absl::InvalidArgumentError(absl::StrCat(
"Invalid first character of literal: '", std::string(1, c), "'."));
}
return absl::OkStatus();
}
bool HandleEscape(absl::string_view outer_json, size_t& index,
bool& is_escaped) {
if (is_escaped) {
is_escaped = false;
++index;
return true;
}
if (outer_json[index] == '\\') {
is_escaped = true;
++index;
return true;
}
return false;
}
bool LiteralIsFinished(absl::string_view outer_json, size_t& index,
bool is_string_literal) {
char c = outer_json[index];
if (is_string_literal) {
index += (c == '"' ? 1 : 0);
return c == '"';
}
return std::isspace(c) || c == ',' || c == '{' || c == '}' || c == '[' ||
c == ']' || c == ':';
}
absl::StatusOr<absl::string_view> ParseLiteral(absl::string_view outer_json,
size_t& index) {
SkipWhitespace(outer_json, index);
TF_RETURN_IF_ERROR(CheckNotEndOfString(outer_json, index, "literal"));
auto c = outer_json[index];
TF_RETURN_IF_ERROR(EnsureValidLiteralStart(c));
bool is_string_literal = c == '"';
size_t start_index = index;
bool is_escaped = false;
++index;
while (index < outer_json.size()) {
if (HandleEscape(outer_json, index, is_escaped)) {
continue;
}
if (LiteralIsFinished(outer_json, index, is_string_literal)) {
break;
}
++index;
}
return outer_json.substr(start_index, index - start_index);
}
absl::StatusOr<JsonField> ParseField(absl::string_view outer_json,
size_t& index);
absl::StatusOr<JsonValue> ParseValue(absl::string_view outer_json,
size_t& index) {
JsonValue value;
SkipWhitespace(outer_json, index);
TF_RETURN_IF_ERROR(CheckNotEndOfString(outer_json, index, "value"));
auto c = outer_json[index];
if (c == '{') {
constexpr static char kObject[] = "object";
auto seq = ParseSequence<JsonObject, '{', '}', kObject>(outer_json, index,
ParseField);
TF_ASSIGN_OR_RETURN(value, std::move(seq));
} else if (c == '[') {
constexpr static char kArray[] = "array";
auto seq = ParseSequence<JsonArray, '[', ']', kArray>(outer_json, index,
ParseValue);
TF_ASSIGN_OR_RETURN(value, std::move(seq));
} else {
TF_ASSIGN_OR_RETURN(value, ParseLiteral(outer_json, index));
}
return value;
}
absl::StatusOr<JsonField> ParseField(absl::string_view outer_json,
size_t& index) {
JsonField field;
TF_ASSIGN_OR_RETURN(field.name, ParseLiteral(outer_json, index));
TF_RETURN_IF_ERROR(Consume(outer_json, index, ':'));
TF_ASSIGN_OR_RETURN(field.value, ParseValue(outer_json, index));
return field;
}
template <typename T>
std::vector<std::string> SerializedElements(const JsonSequence<T>& seq) {
std::vector<std::string> result;
for (const auto& field : seq.elements) {
result.push_back("");
Serialize(field, result.back());
}
return result;
}
template <typename ElemT, char begin_brace, char end_brace>
void Serialize(const JsonSequence<ElemT>& object, std::string& result) {
auto elems = SerializedElements(object);
if constexpr (std::is_same_v<ElemT, JsonField>) {
std::sort(elems.begin(), elems.end());
}
result += begin_brace;
bool has_preceeding = false;
for (const auto& elem : elems) {
if (has_preceeding) {
result += ',';
}
result += elem;
has_preceeding = true;
}
result += end_brace;
}
void Serialize(const JsonValue& value, std::string& result) {
if (auto* lit = std::get_if<absl::string_view>(&value)) {
absl::StrAppend(&result, *lit);
} else if (auto* object = std::get_if<std::unique_ptr<JsonObject>>(&value)) {
Serialize<JsonField, '{', '}'>(**object, result);
} else if (auto* array = std::get_if<std::unique_ptr<JsonArray>>(&value)) {
Serialize<JsonValue, '[', ']'>(**array, result);
}
}
void Serialize(const JsonField& field, std::string& result) {
absl::StrAppend(&result, field.name, ":");
Serialize(field.value, result);
}
}
namespace xla {
absl::StatusOr<std::string> SortJson(absl::string_view json) {
size_t index = 0;
TF_ASSIGN_OR_RETURN(auto value, ParseValue(json, index));
SkipWhitespace(json, index);
if (index < json.size()) {
return absl::InvalidArgumentError("Found trailing characters in JSON.");
}
std::string result;
Serialize(value, result);
return result;
}
} | #include "xla/sort_json.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::tsl::testing::IsOkAndHolds;
TEST(SortJsonTest, SortsJson) {
EXPECT_THAT(SortJson(R"({"a": 1, "c": 3,"b": 2, "b": 1,})"),
IsOkAndHolds(R"({"a":1,"b":1,"b":2,"c":3})"));
EXPECT_THAT(SortJson(R"({"a": 1 , "c": 1,"b": 1 })"),
IsOkAndHolds(R"({"a":1,"b":1,"c":1})"));
EXPECT_THAT(SortJson(R"({"a": 1,"c": 3,"b": 2,"b": [3,2,1],})"),
IsOkAndHolds(R"({"a":1,"b":2,"b":[3,2,1],"c":3})"));
EXPECT_THAT(SortJson(R"({"aa": 1, "a": {"c": "c", "b": "b"}})"),
IsOkAndHolds(R"({"a":{"b":"b","c":"c"},"aa":1})"));
EXPECT_THAT(
SortJson(
R"({"x": true, "x": false, "x": null, "x": 0, "x": -0.5,"x": "a"})"),
IsOkAndHolds(R"({"x":"a","x":-0.5,"x":0,"x":false,"x":null,"x":true})"));
EXPECT_THAT(SortJson(R"({"a": "a}", "a": "a"})"),
IsOkAndHolds(R"({"a":"a","a":"a}"})"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/sort_json.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/sort_json_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8367ff2e-15a0-4c35-9cf2-3c250ad2a825 | cpp | google/tensorstore | coordinator_server | tensorstore/kvstore/ocdbt/distributed/coordinator_server.cc | tensorstore/kvstore/ocdbt/distributed/coordinator_server_test.cc | #include "tensorstore/kvstore/ocdbt/distributed/coordinator_server.h"
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/absl_log.h"
#include "absl/meta/type_traits.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/compare.h"
#include "grpcpp/security/server_credentials.h"
#include "grpcpp/server.h"
#include "grpcpp/server_builder.h"
#include "grpcpp/server_context.h"
#include "grpcpp/support/server_callback.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/internal/container/intrusive_red_black_tree.h"
#include "tensorstore/internal/grpc/peer_address.h"
#include "tensorstore/internal/grpc/utils.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/log/verbose_flag.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.grpc.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security_registry.h"
#include "tensorstore/proto/encode_time.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace ocdbt {
namespace {
ABSL_CONST_INIT internal_log::VerboseFlag ocdbt_logging("ocdbt");
struct LeaseNode;
using LeaseTree = internal::intrusive_red_black_tree::Tree<LeaseNode>;
struct LeaseNode : public LeaseTree::NodeBase {
std::string key;
std::string owner;
absl::Time expiration_time;
uint64_t lease_id;
};
}
namespace jb = ::tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
CoordinatorServer::Spec,
jb::Object(
jb::Member("security",
jb::Projection<&CoordinatorServer::Spec::security>(
internal_ocdbt::RpcSecurityMethodJsonBinder)),
jb::Member("bind_addresses",
jb::Projection<&CoordinatorServer::Spec::bind_addresses>(
jb::DefaultInitializedValue()))));
CoordinatorServer::CoordinatorServer() = default;
CoordinatorServer::~CoordinatorServer() = default;
CoordinatorServer::CoordinatorServer(CoordinatorServer&&) = default;
CoordinatorServer& CoordinatorServer::operator=(CoordinatorServer&&) = default;
class CoordinatorServer::Impl
: public internal_ocdbt::grpc_gen::Coordinator::CallbackService {
public:
std::vector<int> listening_ports_;
std::unique_ptr<grpc::Server> server_;
internal_ocdbt::RpcSecurityMethod::Ptr security_;
Clock clock_;
grpc::ServerUnaryReactor* RequestLease(
grpc::CallbackServerContext* context,
const internal_ocdbt::grpc_gen::LeaseRequest* request,
internal_ocdbt::grpc_gen::LeaseResponse* response) override;
void PurgeExpiredLeases() ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
absl::Mutex mutex_;
LeaseTree leases_by_expiration_time_ ABSL_GUARDED_BY(mutex_);
using LeaseSet =
internal::HeterogeneousHashSet<std::unique_ptr<LeaseNode>,
std::string_view, &LeaseNode::key>;
LeaseSet leases_by_key_ ABSL_GUARDED_BY(mutex_);
};
span<const int> CoordinatorServer::ports() const {
return impl_->listening_ports_;
}
int CoordinatorServer::port() const { return impl_->listening_ports_.front(); }
void CoordinatorServer::Impl::PurgeExpiredLeases() {
auto now = clock_();
for (LeaseTree::iterator it = leases_by_expiration_time_.begin(), next;
it != leases_by_expiration_time_.end() && it->expiration_time < now;
it = next) {
next = std::next(it);
LeaseNode& node = *it;
leases_by_expiration_time_.Remove(node);
leases_by_key_.erase(node.key);
}
}
grpc::ServerUnaryReactor* CoordinatorServer::Impl::RequestLease(
grpc::CallbackServerContext* context,
const internal_ocdbt::grpc_gen::LeaseRequest* request,
internal_ocdbt::grpc_gen::LeaseResponse* response) {
auto* reactor = context->DefaultReactor();
if (auto status = security_->ValidateServerRequest(context); !status.ok()) {
reactor->Finish(internal::AbslStatusToGrpcStatus(status));
return reactor;
}
auto peer_address = internal::GetGrpcPeerAddressAndPort(context);
if (!peer_address.ok()) {
reactor->Finish(grpc::Status(grpc::StatusCode::INTERNAL,
std::string(peer_address.status().message())));
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Coordinator: internal error: request=" << *request;
return reactor;
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto lease_duration,
internal::ProtoToAbslDuration(request->lease_duration()),
(reactor->Finish(grpc::Status(
grpc::StatusCode::INVALID_ARGUMENT,
tensorstore::StrCat("Invalid lease duration: ", _.message()))),
reactor));
{
absl::MutexLock lock(&mutex_);
PurgeExpiredLeases();
LeaseNode* node;
bool assign_new_lease = false;
bool renew_lease = false;
if (auto it = leases_by_key_.find(request->key());
it != leases_by_key_.end()) {
node = it->get();
if (request->has_renew_lease_id() &&
request->renew_lease_id() == node->lease_id) {
leases_by_expiration_time_.Remove(*node);
renew_lease = true;
} else if (request->has_uncooperative_lease_id() &&
request->uncooperative_lease_id() == node->lease_id) {
leases_by_expiration_time_.Remove(*node);
assign_new_lease = true;
}
} else {
auto new_node = std::make_unique<LeaseNode>();
new_node->key = request->key();
node = new_node.get();
leases_by_key_.insert(std::move(new_node));
assign_new_lease = true;
}
if (assign_new_lease || renew_lease) {
auto cur_time = clock_();
node->expiration_time = cur_time + lease_duration;
if (assign_new_lease) {
node->lease_id = static_cast<uint64_t>(
absl::ToInt64Nanoseconds(cur_time - absl::UnixEpoch()));
node->owner = tensorstore::StrCat(peer_address->first, ":",
request->cooperator_port());
}
response->set_is_owner(true);
leases_by_expiration_time_.FindOrInsert(
[&](LeaseNode& other) {
return node->expiration_time > other.expiration_time
? absl::weak_ordering::greater
: absl::weak_ordering::less;
},
[&] { return node; });
}
response->set_owner(node->owner);
internal::AbslTimeToProto(node->expiration_time,
response->mutable_expiration_time());
response->set_lease_id(node->lease_id);
}
ABSL_LOG_IF(INFO, ocdbt_logging)
<< "Coordinator: request=" << *request << ", response=" << *response;
reactor->Finish(grpc::Status());
return reactor;
}
Result<CoordinatorServer> CoordinatorServer::Start(Options options) {
auto impl = std::make_unique<Impl>();
if (options.clock) {
impl->clock_ = std::move(options.clock);
} else {
impl->clock_ = [] { return absl::Now(); };
}
impl->security_ = options.spec.security;
if (!impl->security_) {
impl->security_ = internal_ocdbt::GetInsecureRpcSecurityMethod();
}
grpc::ServerBuilder builder;
builder.RegisterService(impl.get());
auto creds = impl->security_->GetServerCredentials();
if (options.spec.bind_addresses.empty()) {
options.spec.bind_addresses.push_back("[::]:0");
}
impl->listening_ports_.resize(options.spec.bind_addresses.size());
for (size_t i = 0; i < options.spec.bind_addresses.size(); ++i) {
builder.AddListeningPort(options.spec.bind_addresses[i], creds,
&impl->listening_ports_[i]);
}
impl->server_ = builder.BuildAndStart();
CoordinatorServer server;
server.impl_ = std::move(impl);
return server;
}
}
} | #include "tensorstore/kvstore/ocdbt/distributed/coordinator_server.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/absl_log.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "grpcpp/create_channel.h"
#include "tensorstore/kvstore/key_range.h"
#include "tensorstore/kvstore/ocdbt/distributed/btree_node_identifier.h"
#include "tensorstore/kvstore/ocdbt/distributed/coordinator.grpc.pb.h"
#include "tensorstore/kvstore/ocdbt/distributed/lease_cache_for_cooperator.h"
#include "tensorstore/kvstore/ocdbt/distributed/rpc_security.h"
#include "tensorstore/util/future.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::KeyRange;
using ::tensorstore::internal_ocdbt::BtreeNodeIdentifier;
using ::tensorstore::internal_ocdbt_cooperator::LeaseCacheForCooperator;
using ::tensorstore::ocdbt::CoordinatorServer;
class CoordinatorServerTest : public ::testing::Test {
protected:
absl::Time cur_time;
CoordinatorServer server_;
LeaseCacheForCooperator lease_cache;
void SetUp() override {
auto security =
::tensorstore::internal_ocdbt::GetInsecureRpcSecurityMethod();
CoordinatorServer::Options options;
options.spec.security = security;
options.spec.bind_addresses.push_back("localhost:0");
options.clock = [this] { return cur_time; };
TENSORSTORE_CHECK_OK_AND_ASSIGN(
server_, CoordinatorServer::Start(std::move(options)));
std::string address = tensorstore::StrCat("localhost:", server_.port());
auto channel =
::grpc::CreateChannel(address, security->GetClientCredentials());
if (!channel->WaitForConnected(
absl::ToChronoTime(absl::Now() + absl::Milliseconds(100)))) {
ABSL_LOG(WARNING) << "Failed to connect to coordinator after 100ms: "
<< address;
}
LeaseCacheForCooperator::Options lease_cache_options;
lease_cache_options.clock = {};
lease_cache_options.cooperator_port = 42;
lease_cache_options.coordinator_stub =
tensorstore::internal_ocdbt::grpc_gen::Coordinator::NewStub(
std::move(channel));
lease_cache_options.security = security;
lease_cache = LeaseCacheForCooperator(std::move(lease_cache_options));
}
};
TEST_F(CoordinatorServerTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto lease_info,
lease_cache
.GetLease("key", BtreeNodeIdentifier{1, KeyRange{"abc", "def"}})
.result());
EXPECT_FALSE(lease_info->peer_stub);
EXPECT_THAT(lease_info->peer_address, ::testing::MatchesRegex(".*:42"));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/distributed/coordinator_server.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/distributed/coordinator_server_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
7eeb4c59-1ba3-41fa-9a55-c2db5b2366a6 | cpp | tensorflow/tensorflow | gpu_hlo_cost_analysis | third_party/xla/xla/service/gpu/model/gpu_hlo_cost_analysis.cc | third_party/xla/xla/service/gpu/model/gpu_hlo_cost_analysis_test.cc | #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static constexpr absl::string_view kIRSizeKey = HloCostAnalysis::kReserved0Key;
static constexpr absl::string_view kBasicBlockSplitCountKey =
HloCostAnalysis::kReserved1Key;
static constexpr absl::string_view kCollAlgoScaleRatioKey =
"Collective algorithm's scaling ratio";
static constexpr absl::string_view kCollNumDevicesKey =
"Number of devices of a collective group";
absl::Status GpuHloCostAnalysis::Preprocess(const HloInstruction* hlo) {
TF_RETURN_IF_ERROR(HloCostAnalysis::Preprocess(hlo));
current_properties_[kIRSizeKey] = 1;
current_properties_[kBasicBlockSplitCountKey] =
ElementalIrEmitter::OpInvalidatesCache(hlo);
return absl::OkStatus();
}
float GpuHloCostAnalysis::ScalingRatio(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kCollAlgoScaleRatioKey, hlo_properties_);
}
int64_t GpuHloCostAnalysis::NumOfDevices(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kCollNumDevicesKey, hlo_properties_);
}
int64_t GpuHloCostAnalysis::FusionParameterReadBytes(
const HloInstruction* hlo) const {
CHECK(hlo->IsFused() && (hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kGetTupleElement));
float utilization = hlo_properties_.at(hlo)[kUtilizationKey];
if (!options_.count_multiple_input_accesses) {
utilization = fmin(utilization, 1.0);
}
return std::llround(GetShapeSize(hlo->shape()) * utilization);
}
absl::Status GpuHloCostAnalysis::FusionCalculateUtilizations(
const HloInstruction* fusion) {
const HloInstruction* root = fusion->fused_expression_root();
std::vector<HloInstruction*> instructions =
fusion->fused_instructions_computation()->MakeInstructionPostOrder();
absl::c_reverse(instructions);
absl::flat_hash_map<const HloInstruction*, int64_t> root_ir_sizes;
for (const HloInstruction* instr : instructions) {
hlo_properties_[instr][kUtilizationKey] = 0;
hlo_properties_[instr][kIRSizeKey] = 0;
elementwise_use_roots_[instr].clear();
root_utilizations_[instr] = 0;
}
root_utilizations_[root] = 1.0;
root_ir_sizes[root] = 1;
elementwise_use_roots_[root].insert(root);
current_properties_[kFlopsKey] = 0;
current_properties_[kBasicBlockSplitCountKey] = 0;
current_properties_[kIRSizeKey] = 0;
for (const HloInstruction* instr : instructions) {
VLOG(8) << instr->name() << ":";
VLOG(9) << "Elementwise use roots:";
Properties& instr_props = hlo_properties_[instr];
for (const HloInstruction* r : elementwise_use_roots_[instr]) {
VLOG(9) << "\t" << r->name() << ": " << root_utilizations_[r];
instr_props[kUtilizationKey] += root_utilizations_[r];
instr_props[kIRSizeKey] += root_ir_sizes[r];
}
float cur_instr_utilization = instr_props[kUtilizationKey];
VLOG(8) << "Total utilization: " << cur_instr_utilization;
float cur_instr_times_emitted = instr_props[kIRSizeKey];
VLOG(8) << "Times emitted: " << cur_instr_times_emitted;
current_properties_[kFlopsKey] +=
cur_instr_utilization * instr_props[kFlopsKey];
current_properties_[kIRSizeKey] += cur_instr_times_emitted;
current_properties_[kBasicBlockSplitCountKey] +=
cur_instr_times_emitted * ElementalIrEmitter::OpInvalidatesCache(instr);
for (int operand_idx = 0; operand_idx < instr->operand_count();
++operand_idx) {
const HloInstruction* operand = instr->operand(operand_idx);
if ((instr->IsElementwise()) || instr->opcode() == HloOpcode::kTuple ||
instr->opcode() == HloOpcode::kGetTupleElement) {
for (const HloInstruction* r : elementwise_use_roots_[instr]) {
elementwise_use_roots_[operand].insert(r);
}
} else {
elementwise_use_roots_[operand].insert(operand);
float cur_operand_utilization =
cur_instr_utilization * operand_utilization(*instr, operand_idx);
int64_t operand_elements =
ShapeUtil::ElementsInRecursive(operand->shape());
if (operand_elements == 0) {
cur_operand_utilization = 0;
} else {
cur_operand_utilization =
ceil(cur_operand_utilization * operand_elements) /
operand_elements;
}
root_utilizations_[operand] += cur_operand_utilization;
root_ir_sizes[operand] += cur_instr_times_emitted;
}
}
}
return absl::OkStatus();
}
float GpuHloCostAnalysis::CommonElementwiseUtilization(
const HloInstruction* a, const HloInstruction* b) const {
float ret = 0;
for (auto r : elementwise_use_roots_.at(a)) {
if (elementwise_use_roots_.at(b).count(r)) {
ret += root_utilizations_.at(r);
}
}
return ret;
}
bool GpuHloCostAnalysis::ProducerConsumerMergedTooLarge(
const HloInstruction& producer, const HloInstruction& consumer) {
int64_t producer_replication = 1;
if (consumer.opcode() == HloOpcode::kFusion) {
producer_replication =
IrSize(*consumer.fused_parameter(consumer.operand_index(&producer)));
}
VLOG(5) << producer.name() << " would be emitted by " << consumer.name()
<< " x" << producer_replication;
int64_t n_splits = producer_replication * IrBasicBlockSplitCount(producer) +
IrBasicBlockSplitCount(consumer);
VLOG(5) << "Basic block split counts: " << IrBasicBlockSplitCount(producer)
<< ", " << IrBasicBlockSplitCount(consumer) << " -> " << n_splits;
int64_t merged_ir_size =
(IrSize(producer) * producer_replication + IrSize(consumer));
if (producer.GetModule()
->config()
.debug_options()
.xla_gpu_mlir_emitter_level() < 4) {
if (n_splits > kMaxBasicBlockSplitsPerFusion) {
return true;
}
merged_ir_size *= (1 << n_splits);
}
VLOG(5) << "IR sizes: " << IrSize(producer) << ", " << IrSize(consumer)
<< " -> " << merged_ir_size;
return merged_ir_size > kMaxIRSize;
}
absl::Status GpuHloCostAnalysis::HandleCustomCall(
const HloInstruction* custom_call) {
if (IsCublasGemm(*custom_call)) {
TF_ASSIGN_OR_RETURN(auto gpu_config,
custom_call->backend_config<gpu::GpuBackendConfig>());
const gpu::GemmBackendConfig& gemm_config =
gpu_config.gemm_backend_config();
const Shape& output_shape = custom_call->shape().IsTuple()
? custom_call->shape().tuple_shapes(0)
: custom_call->shape();
current_properties_[kFlopsKey] =
GetDotFlops(custom_call->operand(0)->shape(), output_shape,
gemm_config.dot_dimension_numbers());
return absl::OkStatus();
}
if (IsCustomCallToDnnConvolution(*custom_call)) {
current_properties_[kFlopsKey] = GetConvolutionFlops(custom_call);
if (custom_call->shape().IsTuple()) {
float output_size =
options_.shape_size(custom_call->shape().tuple_shapes(0));
current_properties_[kBytesAccessedKey] -=
current_properties_.output_bytes_accessed();
current_properties_[kBytesAccessedKey] += output_size;
current_properties_.set_output_bytes_accessed(output_size);
}
return absl::OkStatus();
}
return HloCostAnalysis::HandleCustomCall(custom_call);
}
int64_t GpuHloCostAnalysis::GetConvolutionFlops(
const HloInstruction* convolution) {
auto lhs = convolution->operand(0);
auto rhs = convolution->operand(1);
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
const Shape& result_shape = [&]() -> const Shape& {
const Shape& shape = convolution->shape();
if (IsCustomCallToDnnConvolution(*convolution) &&
convolution->shape().IsTuple()) {
return shape.tuple_shapes(0);
}
return shape;
}();
return HloCostAnalysis::GetConvolutionFlops(convolution, lhs_shape, rhs_shape,
result_shape);
}
int64_t GpuHloCostAnalysis::GetFlopsPerElementwiseOpElement(
const PrimitiveType type, const HloOpcode opcode) {
constexpr int64_t kDefaultFlopsPerElement = 3;
return FindOrDefault(hlo_elementwise_op_profile_,
std::make_pair(opcode, type), kDefaultFlopsPerElement);
}
int64_t GpuHloCostAnalysis::GetFlopsForElementwiseOp(const HloOpcode op_code,
const Shape& shape) {
int64_t flop_per_element =
GetFlopsPerElementwiseOpElement(shape.element_type(), op_code);
return flop_per_element * ShapeUtil::ElementsInRecursive(shape);
}
int64_t GpuHloCostAnalysis::GetFlopsForElementwiseOp(
const HloInstruction* instr) {
return GetFlopsForElementwiseOp(instr->opcode(), instr->shape());
}
absl::Status GpuHloCostAnalysis::HandleAllReduce(
const HloInstruction* allreduce) {
const HloModuleConfig& config = allreduce->GetModule()->config();
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(
allreduce->channel_id().has_value(),
Cast<HloAllReduceInstruction>(allreduce)->use_global_device_ids()));
int64_t num_devices = config.num_partitions();
int64_t num_replicas = config.replica_count();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> participant_counts,
GetPariticipantCountsForReplicaGroups(
num_replicas, num_devices, allreduce->replica_groups(), group_mode));
int64_t num_ranks = 1;
for (auto count : participant_counts) {
num_ranks = std::max(num_ranks, count);
}
VLOG(5) << "Computing cost for " << num_ranks << " ranks in "
<< allreduce->ToString();
int64_t output_bytes_accessed = 0;
ShapeUtil::ForEachSubshape(
allreduce->shape(), [&](const Shape& subshape, const ShapeIndex&) {
if (subshape.IsArray()) {
output_bytes_accessed += GetShapeSize(subshape);
}
});
int64_t bytes_accessed = output_bytes_accessed;
for (const HloInstruction* operand : allreduce->operands()) {
bytes_accessed += GetShapeSize(operand->shape());
}
current_properties_.set_output_bytes_accessed(output_bytes_accessed);
current_properties_[kBytesAccessedKey] = bytes_accessed;
current_properties_[kCollNumDevicesKey] = num_ranks;
current_properties_[kFlopsKey] = GetFlopsForElementwiseOp(
allreduce->to_apply()->root_instruction()->opcode(), allreduce->shape());
int num_intra_steps = 2 * (num_ranks - 1);
float scaling_ratio = (1.0 * num_ranks) / num_intra_steps;
current_properties_[kCollAlgoScaleRatioKey] = scaling_ratio;
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleConcatenate(const HloInstruction* hlo) {
int64_t flop_per_element = 6;
int64_t dim = Cast<HloConcatenateInstruction>(hlo)->concatenate_dimension();
if (dim > 0 && hlo->operand(0)->shape().dimensions()[dim] & 31) {
flop_per_element = 400;
}
current_properties_[kFlopsKey] =
flop_per_element * ShapeUtil::ElementsInRecursive(hlo->shape());
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleReduce(const HloInstruction* hlo) {
TF_RETURN_IF_ERROR(HloCostAnalysis::HandleReduce(hlo));
const HloReduceInstruction* reduce = DynCast<HloReduceInstruction>(hlo);
auto output_shape = reduce->shape().IsArray()
? reduce->shape()
: reduce->shape().tuple_shapes(0);
int64_t output_bytes_accessed = 0;
ShapeUtil::ForEachLeafShape(
reduce->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) {
output_bytes_accessed += GetShapeSize(sub_shape);
});
current_properties_.set_output_bytes_accessed(output_bytes_accessed);
int64_t bytes_accessed = output_bytes_accessed;
for (int64_t input_operand_id = 0; input_operand_id < reduce->input_count();
++input_operand_id) {
bytes_accessed +=
current_properties_.operand_bytes_accessed(input_operand_id);
}
int64_t output_shape_size = ShapeUtil::ElementsIn(output_shape);
for (int64_t init_operand_id = reduce->input_count();
init_operand_id < reduce->operand_count(); ++init_operand_id) {
auto init_operand = reduce->operand(init_operand_id);
int64_t operand_bytes_accessed =
output_shape_size * GetShapeSize(init_operand->shape());
current_properties_.set_operand_bytes_accessed(init_operand_id,
operand_bytes_accessed);
current_properties_.set_operand_utilization(init_operand_id,
output_shape_size);
bytes_accessed += operand_bytes_accessed;
}
current_properties_[kBytesAccessedKey] = bytes_accessed;
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleElementwiseOp(
const HloInstruction* hlo) {
current_properties_[kFlopsKey] = GetFlopsForElementwiseOp(hlo);
return absl::OkStatus();
}
std::unique_ptr<HloCostAnalysis>
GpuHloCostAnalysis::CreateNestedCostAnalysis() {
return std::make_unique<GpuHloCostAnalysis>(options_,
hlo_elementwise_op_profile_);
}
bool GpuHloCostAnalysis::KeyToCopyFromSubcomputation(
absl::string_view key) const {
return !absl::StartsWith(key, kBytesAccessedKey) &&
!absl::StartsWith(key, kUtilizationKey) &&
!absl::StartsWith(key, kIRSizeKey) &&
!absl::StartsWith(key, kBasicBlockSplitCountKey);
}
float GpuHloCostAnalysis::IrBasicBlockSplitCount(
const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kBasicBlockSplitCountKey, hlo_properties_);
}
float GpuHloCostAnalysis::IrSize(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kIRSizeKey, hlo_properties_);
}
}
} | #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/hlo_op_profiles.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class GpuHloCostAnalysisTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
HloCostAnalysis::Options options_{ShapeSizeBytesFunction(),
{},
{},
true};
GpuHloCostAnalysis analysis_{options_};
GpuHloCostAnalysisTest() : HloTestBase() {}
};
TEST_F(GpuHloCostAnalysisTest, ConvCustomCall) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = s8[128,12,24,24,4]{4,3,2,1,0} parameter(0)
p1 = s8[16,12,5,5,4]{4,3,2,1,0} parameter(1)
p2 = f32[16]{0} parameter(2)
conv1 = (s8[128,4,24,24,4]{4,3,2,1,0}, u8[0]{0}) custom-call(p0, p1, p2),
window={size=5x5 pad=2_2x2_2},
dim_labels=bf01_oi01->bf01,
custom_call_target="__cudnn$convBiasActivationForward"
ROOT tuple = tuple(conv1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloComputation* comp = module->entry_computation();
const HloInstruction* conv1 = comp->GetInstructionWithName("conv1");
int op0_size = sizeof(int8_t) * 128 * 12 * 24 * 24 * 4;
int op1_size = sizeof(int8_t) * 16 * 12 * 5 * 5 * 4;
int op2_size = sizeof(float) * 16;
int out_size = sizeof(int8_t) * 128 * 4 * 24 * 24 * 4;
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 0), op0_size);
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 1), op1_size);
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 2), op2_size);
EXPECT_EQ(analysis_.output_bytes_accessed(*conv1), out_size);
EXPECT_EQ(analysis_.bytes_accessed(*conv1),
op0_size + op1_size + op2_size + out_size);
EXPECT_EQ(analysis_.flop_count(*conv1), 159694848);
}
TEST_F(GpuHloCostAnalysisTest, ReduceWindowWithOverlapsRepeatedReads) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
add {
a0 = f32[] parameter(0)
a1 = f32[] parameter(1)
ROOT _ = f32[] add(a0, a1)
}
ENTRY entry {
p0 = f32[8,8] parameter(0)
c0 = f32[] constant(0)
ROOT _ = f32[3,4] reduce-window(p0, c0), window={size=4x5 stride=2x1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
int n_output_elements = 3 * 4;
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.flop_count(), 3 * n_output_elements * (4 * 5 - 1));
EXPECT_EQ(analysis_.bytes_accessed(),
sizeof(float) * (8 * 8 + 1 + n_output_elements));
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0),
sizeof(float) * n_output_elements * 4 * 5);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis_.output_bytes_accessed(*root),
sizeof(float) * n_output_elements);
}
TEST_F(GpuHloCostAnalysisTest, BroadcastWithRepeats) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[] parameter(0)
c1 = s8[] constant(0)
a1 = s8[] add(p1, c1)
b1 = s8[10000] broadcast(a1), dimensions={}
b2 = s8[10000] broadcast(c1), dimensions={}
ROOT r1 = s8[10000] add(b1, b2)
}
ENTRY e {
p0 = s8[] parameter(0)
ROOT r0 = s8[10000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 10000);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 10000);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2 * 10000);
EXPECT_EQ(analysis_.bytes_accessed(), 2 * 10000);
}
TEST_F(GpuHloCostAnalysisTest, WithoutRepeats) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[] parameter(0)
a1 = s8[] add(p1, p1)
b1 = s8[10000] broadcast(a1), dimensions={}
a2 = s8[10000] add(b1, b1)
slice1 = s8[8000] slice(a2), slice={[0:8000]}
slice2 = s8[8000] slice(a2), slice={[2000:10000]}
c = s8[10000] constant({...})
slicec1 = s8[8000] slice(c), slice={[0:8000]}
slicec2 = s8[8000] slice(c), slice={[2000:10000]}
a3 = s8[8000] add(slice1, slice2)
a4 = s8[8000] add(slicec1, slicec2)
ROOT a5 = s8[8000] add(a3, a4)
}
ENTRY e {
p0 = s8[] parameter(0)
ROOT r0 = s8[8000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
options_.count_multiple_input_accesses = false;
GpuHloCostAnalysis analysis{options_};
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis));
EXPECT_EQ(analysis.output_bytes_accessed(*root), 8000);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis.bytes_accessed(*root), 1 + 8000 + 10000);
EXPECT_EQ(analysis.bytes_accessed(), 1 + 8000 + 10000);
}
TEST_F(GpuHloCostAnalysisTest, BroadcastFlops) {
absl::string_view hlo_string = R"(
HloModule m
f {
i0 = f32[1024] iota(), iota_dimension=0
m0 = f32[1024] add(i0, i0)
s0 = f32[1024] multiply(m0, m0)
b0 = f32[1024,1024] broadcast(s0), dimensions={0}
ROOT r0 = f32[1024,1024] negate(b0)
}
ENTRY e {
ROOT r = f32[1024,1024] fusion(), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto n_elements = 1024 * 1024;
EXPECT_EQ(analysis_.output_bytes_accessed(*root), n_elements * 4);
EXPECT_EQ(analysis_.bytes_accessed(*root), n_elements * 4);
EXPECT_EQ(analysis_.bytes_accessed(), n_elements * 4);
EXPECT_EQ(analysis_.flop_count(), n_elements * 3 * 3);
EXPECT_EQ(analysis_.IrSize(*root), 5);
}
TEST_F(GpuHloCostAnalysisTest, Slice) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[100000000] parameter(0)
i1 = s8[100000000] iota(), iota_dimension=0
a1 = s8[100000000] add(p1, i1)
ROOT r1 = s8[1] slice(a1), slice={[0:1]}
}
ENTRY e {
p0 = s8[100000000] parameter(0)
ROOT r0 = s8[1] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2);
EXPECT_EQ(analysis_.bytes_accessed(), 2);
EXPECT_EQ(analysis_.IrSize(*root), 4);
}
TEST_F(GpuHloCostAnalysisTest, TwoSlices) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[100] parameter(0)
i1 = s8[100] iota(), iota_dimension=0
a1 = s8[100] add(p1, i1)
slice1 = s8[1] slice(a1), slice={[0:1]}
slice2 = s8[1] slice(a1), slice={[3:4]}
ROOT r = s8[1] add(slice1, slice2)
}
ENTRY e {
p0 = s8[100] parameter(0)
ROOT r0 = s8[1] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 2);
EXPECT_EQ(analysis_.bytes_accessed(*root), 3);
EXPECT_EQ(analysis_.bytes_accessed(), 3);
EXPECT_EQ(analysis_.IrSize(*root), 9);
}
TEST_F(GpuHloCostAnalysisTest, MultipleTrivialUsers) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[] parameter(0)
m0 = s8[] multiply(p0, p0)
n0 = s8[] negate(p0)
ROOT a0 = s8[] add(m0, n0)
}
ENTRY e {
param0 = s8[] parameter(0)
ROOT r0 = s8[] fusion(param0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis_.bytes_accessed(*root), 1 + 1);
EXPECT_EQ(analysis_.bytes_accessed(), 1 + 1);
EXPECT_EQ(analysis_.IrSize(*root), 4);
}
TEST_F(GpuHloCostAnalysisTest, MixedUsers) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
n0 = s8[10] negate(p0)
m0 = s8[10] multiply(n0, n0)
a0 = s8[10] add(n0, n0)
s0 = s8[5] slice(a0), slice={[0:5]}
s1 = s8[2] slice(n0), slice={[4:6]}
n1 = s8[2] negate(s1)
ROOT c0 = s8[17] concatenate(s0, m0, n1), dimensions={0}
}
ENTRY e {
param0 = s8[10] parameter(0)
ROOT r0 = s8[17] fusion(param0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 17);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 17);
EXPECT_EQ(analysis_.bytes_accessed(*root), 17 + 17);
EXPECT_EQ(analysis_.bytes_accessed(), 17 + 17);
EXPECT_EQ(analysis_.IrSize(*root->fused_parameter(0)), 3);
EXPECT_EQ(analysis_.IrSize(*root->fused_parameter(0)),
analysis_.IrSize(*root->fused_parameter(0)->users()[0]));
EXPECT_EQ(analysis_.IrSize(*root), 12);
}
TEST_F(GpuHloCostAnalysisTest, FractionalUseRoundingUp) {
absl::string_view hlo_string = R"(
HloModule m
add_s8 {
lhs = s8[] parameter(0)
rhs = s8[] parameter(1)
ROOT add = s8[] add(lhs, rhs)
}
f {
p0 = s8[] parameter(0)
b0 = s8[10] broadcast(p0), dimensions={}
c0 = s8[] constant(0)
r0 = s8[] reduce(b0, c0), dimensions={0}, to_apply=add_s8
bitcast0 = s8[1] bitcast(r0)
i0 = s8[5] iota(), iota_dimension=0
cat0 = s8[6] concatenate(bitcast0, i0), dimensions={0}
p1 = s32[] parameter(1)
ROOT s0 = s8[2] dynamic-slice(cat0, p1), dynamic_slice_sizes={2}
}
ENTRY e {
p0 = s8[] parameter(0)
p1 = s32[] parameter(1)
ROOT r = s8[2] fusion(p0, p1), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 2);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 10);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 1), 4);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2 + 10 + 4);
EXPECT_EQ(analysis_.bytes_accessed(), 2 + 10 + 4);
}
TEST_F(GpuHloCostAnalysisTest, LargeConstant) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[1000] parameter(0)
c0 = s8[1000] constant({...})
ROOT a0 = s8[1000] add(p0, c0)
}
ENTRY e {
p0 = s8[1000] parameter(0)
ROOT r = s8[1000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1000);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1000);
EXPECT_EQ(analysis_.bytes_accessed(*root), 3000);
EXPECT_EQ(analysis_.bytes_accessed(), 3000);
EXPECT_EQ(analysis_.IrSize(*root), 3);
}
TEST_F(GpuHloCostAnalysisTest, DynUpdateSliceUsingOperandData) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
to_update = s8[3,1,1,1] parameter(0)
update = s8[1,1,1,1] constant(0)
a = s32[] constant(0)
dus = s8[3,1,1,1] dynamic-update-slice(to_update, update, a, a, a, a)
ROOT _ = s8[3,1,1,1] negate(dus)
}
ENTRY _ {
to_update = s8[3,1,1,1] parameter(0)
ROOT _ = s8[3,1,1,1] fusion(to_update), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
ASSERT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_EQ(analysis_.operand_bytes_accessed(*fusion, 0), 3 - 1);
EXPECT_EQ(analysis_.output_bytes_accessed(*fusion), 3);
}
TEST_F(GpuHloCostAnalysisTest, DynUpdateSliceNotUsingOperandData) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
to_update = s8[3,1,1,1] parameter(0)
update = s8[1,1,1,1] constant(0)
a = s32[] constant(0)
ROOT dus = s8[3,1,1,1] dynamic-update-slice(to_update, update, a, a, a, a)
}
ENTRY _ {
to_update = s8[3,1,1,1] parameter(0)
ROOT _ = s8[3,1,1,1] fusion(to_update), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
ASSERT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_EQ(analysis_.operand_bytes_accessed(*fusion, 0), 0);
EXPECT_EQ(analysis_.output_bytes_accessed(*fusion), 1);
}
TEST_F(GpuHloCostAnalysisTest, CommonElementwiseUseTwoParameters) {
const char* hlo_fusion_module_str = R"(
HloModule m
add {
p0 = s8[] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = s8[] add(p0, p1)
}
f {
p0 = s8[10] parameter(0)
p1 = s8[10] parameter(1)
a = s8[10] add(p0, p1)
c0 = s8[] constant(0)
r0 = s8[] reduce(a, c0), dimensions={0}, to_apply=add
c1 = s8[] constant(100)
r1 = s8[] reduce(a, c1), dimensions={0}, to_apply=add
ROOT _ = s8[] add(r0, r1)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[10] parameter(1)
ROOT _ = s8[] fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(fusion->fused_parameter(0),
fusion->fused_parameter(1)),
2.f);
}
TEST_F(GpuHloCostAnalysisTest, CommonElementwiseUseParameterAndRoot) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
p1b = s8[10] broadcast(p1)
a = s8[10] add(p0, p1b)
ROOT _ = s8[10] negate(a)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = s8[10] fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(0), fusion->fused_expression_root()),
1.f);
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(1), fusion->fused_expression_root()),
0.f);
}
TEST_F(GpuHloCostAnalysisTest,
CommonElementwiseUseParameterAndRootMultiOutputFusion) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
p1b = s8[10] broadcast(p1)
a = s8[10] add(p0, p1b)
neg = s8[10] negate(a)
ROOT _ = (s8[10], s8[10]) tuple(a, neg)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = (s8[10], s8[10]) fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(0), fusion->fused_expression_root()),
1.f);
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(1), fusion->fused_expression_root()),
0.f);
}
TEST_F(GpuHloCostAnalysisTest, Reduce) {
absl::string_view hlo_string = R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add.0 = f32[] add(param_0, param_1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(param_0.3, constant), dimensions={1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
int64_t input_bytes_accessed = 4 * 32 * 40;
int64_t init_bytes_accessed = 4 * 32;
int64_t output_bytes_accessed = 4 * 32;
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 0), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 1), init_bytes_accessed);
EXPECT_EQ(analysis_.output_bytes_accessed(*reduce), output_bytes_accessed);
EXPECT_EQ(analysis_.bytes_accessed(*reduce),
input_bytes_accessed + init_bytes_accessed + output_bytes_accessed);
EXPECT_EQ(analysis_.flop_count(*reduce), 32 * 39 * 3);
}
TEST_F(GpuHloCostAnalysisTest, VariadicReduce) {
absl::string_view hlo_string = R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add.0 = f32[] add(param_0, param_2)
add.1 = f32[] add(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
param_1.3 = f32[32,40]{1,0} parameter(1)
param_2.2 = f32[] parameter(2)
constant = f32[] constant(0)
ROOT reduce = (f32[32]{0}, f32[32]{0}) reduce(param_0.3, param_1.3, param_2.2, constant), dimensions={1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
int64_t input_bytes_accessed = 4 * 32 * 40;
int64_t init_bytes_accessed = 4 * 32;
int64_t output_bytes_accessed = 2 * 4 * 32;
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 0), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 1), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 2), init_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 3), init_bytes_accessed);
EXPECT_EQ(analysis_.output_bytes_accessed(*reduce), output_bytes_accessed);
EXPECT_EQ(analysis_.bytes_accessed(*reduce), 2 * input_bytes_accessed +
2 * init_bytes_accessed +
output_bytes_accessed);
EXPECT_EQ(analysis_.flop_count(*reduce), 32 * 39 * 6);
}
TEST_F(GpuHloCostAnalysisTest, CustomOpProfileIsUsed) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[10] parameter(0)
param_1 = f32[10] parameter(1)
param_2 = f32[10] parameter(2)
param_3 = f32[10] parameter(3)
tanh = f32[10] tanh(param_0)
mul = f32[10] multiply(tanh, param_1)
ROOT clamp = f32[10] clamp(mul, param_2, param_3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloOpProfiles::HloOpProfile hlo_op_profile;
const int kF32ClampFlopsPerElement = 7;
const int kF32MultiplyFlopsPerElement = 11;
const int kF32TanhFlopsPerElement = 13;
const int kNumElements = 10;
hlo_op_profile[{HloOpcode::kClamp, PrimitiveType::F32}] =
kF32ClampFlopsPerElement;
hlo_op_profile[{HloOpcode::kMultiply, PrimitiveType::F32}] =
kF32MultiplyFlopsPerElement;
hlo_op_profile[{HloOpcode::kTanh, PrimitiveType::F32}] =
kF32TanhFlopsPerElement;
GpuHloCostAnalysis analysis(options_, hlo_op_profile);
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis));
const HloInstruction* clamp = module->entry_computation()->root_instruction();
const HloInstruction* mul = clamp->operand(0);
const HloInstruction* tanh = mul->operand(0);
EXPECT_EQ(analysis.flop_count(*clamp),
kF32ClampFlopsPerElement * kNumElements);
EXPECT_EQ(analysis.flop_count(*mul),
kF32MultiplyFlopsPerElement * kNumElements);
EXPECT_EQ(analysis.flop_count(*tanh), kF32TanhFlopsPerElement * kNumElements);
};
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_hlo_cost_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_hlo_cost_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
91deb376-7ef7-42c4-bc88-4a85799d766c | cpp | google/cel-cpp | reference | common/reference.cc | common/reference_test.cc | #include "common/reference.h"
#include "absl/base/no_destructor.h"
namespace cel {
const VariableReference& VariableReference::default_instance() {
static const absl::NoDestructor<VariableReference> instance;
return *instance;
}
const FunctionReference& FunctionReference::default_instance() {
static const absl::NoDestructor<FunctionReference> instance;
return *instance;
}
} | #include "common/reference.h"
#include <cstdint>
#include <string>
#include <vector>
#include "common/constant.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::testing::_;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::IsEmpty;
using ::testing::VariantWith;
TEST(VariableReference, Value) {
VariableReference variable_reference;
EXPECT_FALSE(variable_reference.has_value());
EXPECT_EQ(variable_reference.value(), Constant{});
Constant value;
value.set_bool_value(true);
variable_reference.set_value(value);
EXPECT_TRUE(variable_reference.has_value());
EXPECT_EQ(variable_reference.value(), value);
EXPECT_EQ(variable_reference.release_value(), value);
EXPECT_EQ(variable_reference.value(), Constant{});
}
TEST(VariableReference, Equality) {
VariableReference variable_reference;
EXPECT_EQ(variable_reference, VariableReference{});
variable_reference.mutable_value().set_bool_value(true);
EXPECT_NE(variable_reference, VariableReference{});
}
TEST(FunctionReference, Overloads) {
FunctionReference function_reference;
EXPECT_THAT(function_reference.overloads(), IsEmpty());
function_reference.mutable_overloads().reserve(2);
function_reference.mutable_overloads().push_back("foo");
function_reference.mutable_overloads().push_back("bar");
EXPECT_THAT(function_reference.release_overloads(),
ElementsAre("foo", "bar"));
EXPECT_THAT(function_reference.overloads(), IsEmpty());
}
TEST(FunctionReference, Equality) {
FunctionReference function_reference;
EXPECT_EQ(function_reference, FunctionReference{});
function_reference.mutable_overloads().push_back("foo");
EXPECT_NE(function_reference, FunctionReference{});
}
TEST(Reference, Name) {
Reference reference;
EXPECT_THAT(reference.name(), IsEmpty());
reference.set_name("foo");
EXPECT_EQ(reference.name(), "foo");
EXPECT_EQ(reference.release_name(), "foo");
EXPECT_THAT(reference.name(), IsEmpty());
}
TEST(Reference, Variable) {
Reference reference;
EXPECT_THAT(reference.kind(), VariantWith<VariableReference>(_));
EXPECT_TRUE(reference.has_variable());
EXPECT_THAT(reference.release_variable(), Eq(VariableReference{}));
EXPECT_TRUE(reference.has_variable());
}
TEST(Reference, Function) {
Reference reference;
EXPECT_FALSE(reference.has_function());
EXPECT_THAT(reference.function(), Eq(FunctionReference{}));
reference.mutable_function();
EXPECT_TRUE(reference.has_function());
EXPECT_THAT(reference.variable(), Eq(VariableReference{}));
EXPECT_THAT(reference.kind(), VariantWith<FunctionReference>(_));
EXPECT_THAT(reference.release_function(), Eq(FunctionReference{}));
EXPECT_FALSE(reference.has_function());
}
TEST(Reference, Equality) {
EXPECT_EQ(MakeVariableReference("foo"), MakeVariableReference("foo"));
EXPECT_NE(MakeVariableReference("foo"),
MakeConstantVariableReference("foo", Constant(int64_t{1})));
EXPECT_EQ(
MakeFunctionReference("foo", std::vector<std::string>{"bar", "baz"}),
MakeFunctionReference("foo", std::vector<std::string>{"bar", "baz"}));
EXPECT_NE(
MakeFunctionReference("foo", std::vector<std::string>{"bar", "baz"}),
MakeFunctionReference("foo", std::vector<std::string>{"bar"}));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/reference.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/reference_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
32138b08-241d-42a6-8f88-50a041a5c0d0 | cpp | tensorflow/tensorflow | xplane_to_tf_data_stats | tensorflow/core/profiler/convert/xplane_to_tf_data_stats.cc | tensorflow/core/profiler/convert/xplane_to_tf_data_stats_test.cc | #include "tensorflow/core/profiler/convert/xplane_to_tf_data_stats.h"
#include <algorithm>
#include <optional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "xla/tsl/profiler/utils/group_events.h"
#include "xla/tsl/profiler/utils/tf_op_utils.h"
#include "xla/tsl/profiler/utils/tf_xplane_visitor.h"
#include "xla/tsl/profiler/utils/timespan.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/profiler/protobuf/tf_data_stats.pb.h"
#include "tensorflow/core/profiler/utils/html_utils.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_visitor.h"
namespace tensorflow {
namespace profiler {
const int64_t kSlowCallThresholdPs = 50 * 1000000;
namespace {
bool IsRootIteratorEvent(const XEventVisitor& iterator_event) {
std::vector<absl::string_view> split_result =
absl::StrSplit(iterator_event.Name(), "::");
return split_result.size() == 2;
}
bool IsAsyncIterator(absl::string_view iterator_event_name) {
static auto* kAsyncIterators = new absl::flat_hash_set<absl::string_view>(
{"Prefetch", "ParallelInterleave", "ParallelMap", "ParseExample",
"MapAndBatch", "DataService", "LegacyParallelInterleave",
"ParallelBatch"});
return kAsyncIterators->contains(iterator_event_name);
}
void SetIteratorMetadata(int64_t id, const XEventVisitor& event,
IteratorMetadata* metadata) {
metadata->set_id(id);
auto parent_id_stat = event.GetStat(StatType::kParentId);
if (parent_id_stat.has_value()) {
metadata->set_parent_id(parent_id_stat->IntValue());
}
metadata->set_name(tsl::profiler::IteratorName(event.Name()));
metadata->set_long_name(event.Name().data(), event.Name().size());
metadata->set_is_async(IsAsyncIterator(metadata->name()));
}
std::optional<int64_t> FindDeviceInputPipeline(const XEventVisitor& event) {
if (event.Type() == HostEventType::kDeviceInputPipelineSecondIterator) {
auto parent_id_stat = event.GetStat(StatType::kParentId);
if (parent_id_stat.has_value()) return parent_id_stat->IntValue();
}
return std::nullopt;
}
void ProcessEventForest(
const tsl::profiler::EventForest& event_forest,
absl::flat_hash_set<int64_t>* device_input_pipeline_ids,
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>*
root_iterator_event_map,
TfDataStats* tf_data_stats) {
const tsl::profiler::EventNodeMap& event_node_map =
event_forest.GetEventNodeMap();
auto* iterator_event_list =
gtl::FindOrNull(event_node_map, HostEventType::kIterator);
if (!iterator_event_list) return;
for (const tsl::profiler::EventNode& iterator_event : *iterator_event_list) {
const XEventVisitor& iterator_event_visitor =
iterator_event.GetEventVisitor();
auto iterator_id_stat = iterator_event_visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) continue;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = tf_data_stats->mutable_iterator_metadata()->insert(
{iterator_id, IteratorMetadata()});
IteratorMetadata& metadata = result.first->second;
if (result.second) {
SetIteratorMetadata(iterator_id, iterator_event_visitor, &metadata);
}
if (IsRootIteratorEvent(iterator_event_visitor)) {
(*root_iterator_event_map)[iterator_id].push_back(&iterator_event);
}
}
auto* device_input_pipeline_second_iterator_events = gtl::FindOrNull(
event_node_map, HostEventType::kDeviceInputPipelineSecondIterator);
if (!device_input_pipeline_second_iterator_events) return;
for (const tsl::profiler::EventNode& iterator_event :
*device_input_pipeline_second_iterator_events) {
const XEventVisitor& iterator_event_visitor =
iterator_event.GetEventVisitor();
auto iterator_id_stat = iterator_event_visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) continue;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = tf_data_stats->mutable_iterator_metadata()->insert(
{iterator_id, IteratorMetadata()});
IteratorMetadata& metadata = result.first->second;
if (result.second) {
SetIteratorMetadata(iterator_id, iterator_event_visitor, &metadata);
std::optional<int64_t> device_input_pipeline_id =
FindDeviceInputPipeline(iterator_event_visitor);
if (device_input_pipeline_id.has_value()) {
device_input_pipeline_ids->insert(*device_input_pipeline_id);
}
}
}
}
void SetInputPipelineMetadata(int64_t id, int64_t name_id,
bool is_device_input_pipeline,
InputPipelineMetadata* metadata) {
constexpr absl::string_view kHostInputPipelinePrefix = "Host:";
constexpr absl::string_view kDeviceInputPipelinePrefix = "Device:";
metadata->set_id(id);
if (is_device_input_pipeline) {
metadata->set_type(InputPipelineMetadata::DEVICE);
metadata->set_name(absl::StrCat(kDeviceInputPipelinePrefix, name_id));
} else {
metadata->set_type(InputPipelineMetadata::HOST);
metadata->set_name(absl::StrCat(kHostInputPipelinePrefix, name_id));
}
}
void ProcessIteratorEvent(const tsl::profiler::EventNode& iterator_event,
InputPipelineStat* input_pipeline_stat,
bool is_blocking, int level = 0) {
if (level > 100) return;
const XEventVisitor& visitor = iterator_event.GetEventVisitor();
auto iterator_id_stat = visitor.GetStat(StatType::kStepId);
if (!iterator_id_stat.has_value()) return;
int64_t iterator_id = iterator_id_stat->IntValue();
auto result = input_pipeline_stat->mutable_iterator_stats()->insert(
{iterator_id, IteratorStat()});
IteratorStat& iterator_stat = result.first->second;
if (result.second) {
iterator_stat.set_id(iterator_id);
iterator_stat.set_start_time_ps(visitor.TimestampPs());
}
iterator_stat.set_duration_ps(iterator_stat.duration_ps() +
visitor.DurationPs());
int64_t self_time_ps = visitor.DurationPs();
tsl::profiler::Timespan self_time_span = visitor.GetTimespan();
for (const tsl::profiler::EventNode* child : iterator_event.GetChildren()) {
const XEventVisitor& child_visitor = child->GetEventVisitor();
if (tsl::profiler::ParseTfOpFullname(child_visitor.Name()).category ==
tsl::profiler::Category::kTfData) {
int64_t overlap_duration_ps =
self_time_span.OverlappedDurationPs(child_visitor.GetTimespan());
ProcessIteratorEvent(*child, input_pipeline_stat,
is_blocking && overlap_duration_ps, level + 1);
self_time_ps -= overlap_duration_ps;
}
}
iterator_stat.set_self_time_ps(iterator_stat.self_time_ps() + self_time_ps);
iterator_stat.set_is_blocking(iterator_stat.is_blocking() || is_blocking);
iterator_stat.set_num_calls(iterator_stat.num_calls() + 1);
}
void SetBottleneckIteratorId(InputPipelineStat* input_pipeline_stat) {
int64_t bottleneck_iterator_id = 0;
int64_t max_self_time = 0;
for (const auto& pair : input_pipeline_stat->iterator_stats()) {
const auto& id = pair.first;
const auto& iterator_stat = pair.second;
if (iterator_stat.is_blocking() &&
iterator_stat.self_time_ps() > max_self_time) {
bottleneck_iterator_id = id;
max_self_time = iterator_stat.self_time_ps();
}
}
input_pipeline_stat->set_bottleneck_iterator_id(bottleneck_iterator_id);
input_pipeline_stat->set_bottleneck_iterator_latency_ps(max_self_time);
}
void ProcessInputPipelines(
const absl::flat_hash_set<int64_t>& device_input_pipeline_ids,
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>*
root_iterator_event_map,
TfDataStats* tf_data_stats) {
auto* input_pipelines = tf_data_stats->mutable_input_pipelines();
int64_t num_host_input_pipelines = 0;
int64_t num_device_input_pipelines = 0;
for (auto& id_and_events : *root_iterator_event_map) {
auto& root_iterator_id = id_and_events.first;
auto& root_iterator_events = id_and_events.second;
absl::c_sort(root_iterator_events, [](const tsl::profiler::EventNode* lhs,
const tsl::profiler::EventNode* rhs) {
return lhs->GetEventVisitor().DurationPs() >
rhs->GetEventVisitor().DurationPs();
});
auto result =
input_pipelines->insert({root_iterator_id, InputPipelineStats()});
InputPipelineStats& input_pipeline_stats = result.first->second;
InputPipelineMetadata* metadata = input_pipeline_stats.mutable_metadata();
if (result.second) {
bool is_device_input_pipeline =
device_input_pipeline_ids.contains(root_iterator_id);
int64_t name_id = is_device_input_pipeline ? num_device_input_pipelines++
: num_host_input_pipelines++;
SetInputPipelineMetadata(root_iterator_id, name_id,
is_device_input_pipeline, metadata);
}
int64_t sum_latency_ps = 0;
int64_t min_latency_ps = INT64_MAX;
int64_t max_latency_ps = 0;
int64_t num_slow_calls = 0;
for (const tsl::profiler::EventNode* root_iterator_event :
root_iterator_events) {
InputPipelineStat* stat = input_pipeline_stats.add_stats();
ProcessIteratorEvent(*root_iterator_event, stat,
true);
SetBottleneckIteratorId(stat);
int64_t latency_ps = root_iterator_event->GetEventVisitor().DurationPs();
sum_latency_ps += latency_ps;
min_latency_ps = std::min(min_latency_ps, latency_ps);
max_latency_ps = std::max(max_latency_ps, latency_ps);
if (latency_ps > kSlowCallThresholdPs) num_slow_calls++;
}
input_pipeline_stats.set_avg_latency_ps(sum_latency_ps /
root_iterator_events.size());
input_pipeline_stats.set_min_latency_ps(min_latency_ps);
input_pipeline_stats.set_max_latency_ps(max_latency_ps);
input_pipeline_stats.set_num_slow_calls(num_slow_calls);
}
}
void SetBottleneckAnalysis(CombinedTfDataStats* combined_tf_data_stats) {
struct InputPipeline {
InputPipeline(absl::string_view host_name,
absl::string_view input_pipeline_name, int64_t max_latency_ps,
absl::string_view iterator_name,
absl::string_view iterator_long_name,
int64_t iterator_latency_ps)
: host_name(host_name),
input_pipeline_name(input_pipeline_name),
max_latency_ps(max_latency_ps),
iterator_name(iterator_name),
iterator_long_name(iterator_long_name),
iterator_latency_ps(iterator_latency_ps) {}
absl::string_view host_name;
absl::string_view input_pipeline_name;
int64_t max_latency_ps;
absl::string_view iterator_name;
absl::string_view iterator_long_name;
int64_t iterator_latency_ps;
bool operator<(const InputPipeline& rhs) const {
return max_latency_ps > rhs.max_latency_ps;
}
};
std::vector<InputPipeline> slow_input_pipelines;
for (const auto& host_name_and_tf_data_stats :
combined_tf_data_stats->tf_data_stats()) {
absl::string_view host_name = host_name_and_tf_data_stats.first;
const TfDataStats& tf_data_stats = host_name_and_tf_data_stats.second;
for (const auto& id_and_stats : tf_data_stats.input_pipelines()) {
const InputPipelineStats& input_pipeline_stats = id_and_stats.second;
if (input_pipeline_stats.metadata().type() ==
InputPipelineMetadata::DEVICE) {
continue;
}
const InputPipelineStat& input_pipeline_stat =
input_pipeline_stats.stats(0);
const IteratorMetadata& metadata = tf_data_stats.iterator_metadata().at(
input_pipeline_stat.bottleneck_iterator_id());
slow_input_pipelines.emplace_back(
host_name, input_pipeline_stats.metadata().name(),
input_pipeline_stats.max_latency_ps(), metadata.name(),
metadata.long_name(),
input_pipeline_stat.bottleneck_iterator_latency_ps());
}
}
std::sort(slow_input_pipelines.begin(), slow_input_pipelines.end());
for (const auto& input_pipeline : slow_input_pipelines) {
TfDataBottleneckAnalysis* bottleneck_analysis =
combined_tf_data_stats->add_bottleneck_analysis();
bottleneck_analysis->set_host(input_pipeline.host_name.data(),
input_pipeline.host_name.size());
bottleneck_analysis->set_input_pipeline(
input_pipeline.input_pipeline_name.data(),
input_pipeline.input_pipeline_name.size());
bottleneck_analysis->set_max_latency_ps(input_pipeline.max_latency_ps);
bottleneck_analysis->set_iterator_name(input_pipeline.iterator_name.data(),
input_pipeline.iterator_name.size());
bottleneck_analysis->set_iterator_long_name(
input_pipeline.iterator_long_name.data(),
input_pipeline.iterator_long_name.size());
bottleneck_analysis->set_iterator_latency_ps(
input_pipeline.iterator_latency_ps);
}
}
std::string GetSuggestion(BottleneckType type) {
constexpr absl::string_view kPlaybookLink =
"https:
constexpr absl::string_view kPlaybookSourceDatasetLink =
"https:
"data_performance_analysis#source_datasets";
constexpr absl::string_view kPlaybookCpuUtilizationLink =
"https:
"data_performance_analysis#3_are_you_reaching_high_cpu_utilization";
constexpr absl::string_view kPlaybookTransformationLink =
"https:
"data_performance_analysis#transformation_datasets";
constexpr absl::string_view kTfGuideParallelDataExtractionLink =
"https:
"data_performance#parallelizing_data_extraction";
constexpr absl::string_view kTfGuideParallelTransformationLink =
"https:
"data_performance#parallelizing_data_transformation";
constexpr absl::string_view kTfGuideCacheLink =
"https:
constexpr absl::string_view kTfDataServiceLink =
"https:
"service?version=nightly";
switch (type) {
case BottleneckType::kSlowSource:
return absl::StrFormat(
"1. Check the locality of a host and input data. Ideally, they "
"should be in the same cell (or very close, like the same "
"region).<br/>"
"2. Parallelize reading from this dataset source. See %s and %s for "
"more details.<br/>",
AnchorElement(kPlaybookSourceDatasetLink, "here"),
AnchorElement(kTfGuideParallelDataExtractionLink, "here"));
case BottleneckType::kSlowDataService:
return absl::StrFormat(
"1. Fetching data from tf.data service took a while. Profile the "
"tf.data service worker to analyze the issue further.<br/>"
"2. See %s for more details on tf.data service.<br/>"
"3. See %s for other suggestions.",
AnchorElement(kTfDataServiceLink, "this"),
AnchorElement(kPlaybookLink, "this"));
case BottleneckType::kSlowRemoteSource:
return absl::StrFormat(
"1. The remote data source is slow. Profile its host to analyze the "
"issue further.<br/>"
"2. See %s for other suggestions.",
AnchorElement(kPlaybookLink, "this"));
case BottleneckType::kSlowTransformationWithParallelVersion:
return absl::StrFormat(
"1. Parallelize this transformation by setting "
"<code>num_parallel_calls=tf.data.experimental.AUTOTUNE</code>. See "
"%s for more details.<br/>"
"2. Consider adding <code>cache</code> after this transformation if "
"your data fits into memory and it is appropriate (e.g., there is no "
"randomness in upstream transformations like <code>shuffle</code>). "
"See %s for more details.<br/>"
"3. Find more resources %s.",
AnchorElement(kTfGuideParallelTransformationLink, "this"),
AnchorElement(kTfGuideCacheLink, "this"),
AnchorElement(kPlaybookTransformationLink, "here"));
case BottleneckType::kSlowTransformationWithoutParallelVersion:
return absl::StrFormat(
"1. This transformation is inherently sequential. Add outer "
"parallelism by running multiple copies of the input pipeline over "
"sharded inputs and combining the results. See %s for more "
"details.<br/>"
"2. Consider adding <code>cache</code> after this transformation if "
"your data fits into memory and it is appropriate (e.g., there is no "
"randomness in upstream transformations like <code>shuffle</code>). "
"See %s for more details.<br/>"
"3. Find more resources %s.",
AnchorElement(kPlaybookTransformationLink, "this"),
AnchorElement(kTfGuideCacheLink, "this"),
AnchorElement(kPlaybookCpuUtilizationLink, "here"));
default:
return absl::StrFormat("See %s for suggestions.",
AnchorElement(kPlaybookLink, "this"));
}
}
void SetSuggestion(CombinedTfDataStats* combined_tf_data_stats) {
for (TfDataBottleneckAnalysis& bottleneck_analysis :
*combined_tf_data_stats->mutable_bottleneck_analysis()) {
bottleneck_analysis.set_suggestion(
GetSuggestion(GetBottleneckType(bottleneck_analysis.iterator_name())));
}
}
void SetSummary(CombinedTfDataStats* combined_tf_data_stats) {
int64_t max_latency_ps = 0;
if (combined_tf_data_stats->bottleneck_analysis_size()) {
max_latency_ps =
combined_tf_data_stats->bottleneck_analysis().at(0).max_latency_ps();
}
if (max_latency_ps > kSlowCallThresholdPs) {
combined_tf_data_stats->set_is_input_bound(true);
combined_tf_data_stats->set_summary(
"Your profile has a tf.data input pipeline slower than 50 us. For each "
"slow input pipeline, below shows a bottleneck in the input pipeline "
"and a suggestion on how to fix it.");
} else if (max_latency_ps > 0) {
combined_tf_data_stats->set_is_input_bound(false);
combined_tf_data_stats->set_summary(
"Your profile does not have any tf.data input pipeline slower than 50 "
"us. Your job could be still input bound if this profile didn't "
"capture all workers.");
} else {
combined_tf_data_stats->set_is_input_bound(false);
combined_tf_data_stats->set_summary(
"No tf.data activity captured in your profile. If your job uses "
"tf.data, try to capture a longer profile.");
}
}
}
BottleneckType GetBottleneckType(absl::string_view bottleneck_iterator_name) {
static auto* kBottleneckTypeMap = new absl::flat_hash_map<absl::string_view,
BottleneckType>(
{
{"TFRecord", BottleneckType::kSlowSource},
{"SSTable", BottleneckType::kSlowSource},
{"RecordIO", BottleneckType::kSlowSource},
{"Spanner", BottleneckType::kSlowSource},
{"TFColumn", BottleneckType::kSlowSource},
{"SleepwalkRemoteDataset", BottleneckType::kSlowSource},
{"TextLine", BottleneckType::kSlowSource},
{"StitchedTimelineDataset", BottleneckType::kSlowSource},
{"DateKeyDataset", BottleneckType::kSlowSource},
{"CapacitorProto", BottleneckType::kSlowSource},
{"LMDB", BottleneckType::kSlowSource},
{"ExternalDataset", BottleneckType::kSlowSource},
{"PearModel", BottleneckType::kSlowSource},
{"FixedLengthRecordV2", BottleneckType::kSlowSource},
{"FromTensor", BottleneckType::kSlowSource},
{"TensorSlice", BottleneckType::kSlowSource},
{"Generator", BottleneckType::kSlowSource},
{"SyntheticDatasetOp", BottleneckType::kSlowSource},
{"DataService", BottleneckType::kSlowDataService},
{"GuzzlerDataGuzzlerRemoteDataset", BottleneckType::kSlowRemoteSource},
{"ReverbDataset", BottleneckType::kSlowRemoteSource},
{"DatasetSampleGame", BottleneckType::kSlowRemoteSource},
{"Courier", BottleneckType::kSlowRemoteSource},
{"ReverbEpisodeDataset", BottleneckType::kSlowRemoteSource},
{"Map", BottleneckType::kSlowTransformationWithParallelVersion},
{"Interleave", BottleneckType::kSlowTransformationWithParallelVersion},
{"Filter", BottleneckType::kSlowTransformationWithoutParallelVersion},
{"Batch", BottleneckType::kSlowTransformationWithoutParallelVersion},
{"Unbatch", BottleneckType::kSlowTransformationWithoutParallelVersion}});
if (auto type =
gtl::FindOrNull(*kBottleneckTypeMap, bottleneck_iterator_name)) {
return *type;
}
return BottleneckType::kOther;
}
void CombinedTfDataStatsBuilder::Add(absl::string_view host_name,
XPlane* host_plane) {
TfDataStats& tf_data_stats =
(*combined_tf_data_stats_
->mutable_tf_data_stats())[std::string(host_name)];
tsl::profiler::EventForest event_forest;
event_forest.AddPlanes(tsl::profiler::CreateTfXPlaneVisitor, {host_plane});
event_forest.ConnectEvents();
event_forest.ConnectTfDataEvents();
absl::flat_hash_set<int64_t> device_input_pipeline_ids;
absl::flat_hash_map<int64_t, std::vector<const tsl::profiler::EventNode*>>
root_iterator_event_map;
ProcessEventForest(event_forest, &device_input_pipeline_ids,
&root_iterator_event_map, &tf_data_stats);
ProcessInputPipelines(device_input_pipeline_ids, &root_iterator_event_map,
&tf_data_stats);
}
void CombinedTfDataStatsBuilder::Finalize() {
SetBottleneckAnalysis(combined_tf_data_stats_);
if (generate_suggestion_) SetSuggestion(combined_tf_data_stats_);
SetSummary(combined_tf_data_stats_);
}
}
} | #include "tensorflow/core/profiler/convert/xplane_to_tf_data_stats.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/profiler/protobuf/tf_data_stats.pb.h"
#include "tensorflow/core/profiler/protobuf/xplane.pb.h"
#include "tensorflow/core/profiler/utils/xplane_builder.h"
#include "tensorflow/core/profiler/utils/xplane_schema.h"
#include "tensorflow/core/profiler/utils/xplane_test_utils.h"
namespace tensorflow {
namespace profiler {
namespace {
using ::testing::EqualsProto;
TEST(XPlaneToTfDataStatsTest, HostInputPipeline) {
constexpr int64_t kPrefetchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kFirstElementId = 100;
constexpr int64_t kSecondElementId = 200;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
auto consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch", 0,
100000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 80000000, 20000000,
{{StatType::kElementId, kFirstElementId}});
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch",
200000000, 20000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 210000000, 10000000,
{{StatType::kElementId, kSecondElementId}});
auto producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 0, 80000000,
{{StatType::kElementId, kFirstElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Range", 0, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 100000000, 80000000,
{{StatType::kElementId, kSecondElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Range", 100000000, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
bottleneck_analysis: {
host: "host1"
input_pipeline: "Host:0"
max_latency_ps: 100000000
iterator_name: "Range"
iterator_long_name: "Iterator::Prefetch::Range"
iterator_latency_ps: 80000000
suggestion: "See <a href=\"https:
}
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "Prefetch"
long_name: "Iterator::Prefetch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Range"
long_name: "Iterator::Prefetch::Range"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: HOST name: "Host:0" }
avg_latency_ps: 60000000
min_latency_ps: 20000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 80000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 100000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 0
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: true
num_calls: 1
}
}
}
stats {
bottleneck_iterator_id: 123
bottleneck_iterator_latency_ps: 20000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 200000000
duration_ps: 20000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 100000000
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: false
num_calls: 1
}
}
}
}
}
}
}
is_input_bound: true
summary: "Your profile has a tf.data input pipeline slower than 50 us. For each slow input pipeline, below shows a bottleneck in the input pipeline and a suggestion on how to fix it."
)pb"));
}
TEST(XPlaneToTfDataStatsTest, DeviceInputPipeline) {
constexpr int64_t kPrefetchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kElementId = 100;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
auto consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch", 0,
30000000, {{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::Prefetch",
100000000, 100000000,
{{StatType::kStepId, kPrefetchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kPrefetchConsume, 180000000, 20000000,
{{StatType::kElementId, kElementId}});
auto producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kPrefetchProduce, 100000000, 80000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::Prefetch::Generator", 100000000, 80000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kPrefetchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "Prefetch"
long_name: "Iterator::Prefetch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Generator"
long_name: "Iterator::Prefetch::Generator"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: DEVICE name: "Device:0" }
avg_latency_ps: 65000000
min_latency_ps: 30000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 80000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 100000000
duration_ps: 100000000
self_time_ps: 20000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 100000000
duration_ps: 80000000
self_time_ps: 80000000
is_blocking: true
num_calls: 1
}
}
}
stats {
bottleneck_iterator_id: 123
bottleneck_iterator_latency_ps: 30000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 30000000
self_time_ps: 30000000
is_blocking: true
num_calls: 1
}
}
}
}
}
}
}
summary: "No tf.data activity captured in your profile. If your job uses tf.data, try to capture a longer profile."
)pb"));
}
TEST(XPlaneToTfDataStatsTest, MapAndBatch) {
constexpr int64_t kMapAndBatchIteratorId = 123;
constexpr int64_t kRangeIteratorId = 456;
constexpr int64_t kElementId = 100;
XPlane host_plane;
XPlaneBuilder host_plane_builder(&host_plane);
host_plane_builder.ReserveLines(2);
XLineBuilder consumer_thread = host_plane_builder.GetOrCreateLine(0);
CreateXEvent(&host_plane_builder, &consumer_thread, "Iterator::MapAndBatch",
0, 100000000, {{StatType::kStepId, kMapAndBatchIteratorId}});
CreateXEvent(&host_plane_builder, &consumer_thread,
HostEventType::kMapAndBatchConsume, 80000000, 20000000,
{{StatType::kElementId, kElementId}});
XLineBuilder producer_thread = host_plane_builder.GetOrCreateLine(1);
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kMapAndBatchProduce, 0, 30000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::MapAndBatch::Range", 0, 30000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kMapAndBatchIteratorId}});
CreateXEvent(&host_plane_builder, &producer_thread,
HostEventType::kMapAndBatchProduce, 40000000, 30000000,
{{StatType::kElementId, kElementId}});
CreateXEvent(&host_plane_builder, &producer_thread,
"Iterator::MapAndBatch::Range", 40000000, 30000000,
{{StatType::kStepId, kRangeIteratorId},
{StatType::kParentId, kMapAndBatchIteratorId}});
CombinedTfDataStats combined_tf_data_stats;
CombinedTfDataStatsBuilder builder(&combined_tf_data_stats);
builder.Add("host1", &host_plane);
builder.Finalize();
EXPECT_THAT(
combined_tf_data_stats, EqualsProto(R"pb(
bottleneck_analysis: {
host: "host1"
input_pipeline: "Host:0"
max_latency_ps: 100000000
iterator_name: "Range"
iterator_long_name: "Iterator::MapAndBatch::Range"
iterator_latency_ps: 60000000
suggestion: "See <a href=\"https:
}
tf_data_stats: {
key: "host1"
value: {
iterator_metadata: {
key: 123,
value: {
id: 123
name: "MapAndBatch"
long_name: "Iterator::MapAndBatch"
is_async: true
}
}
iterator_metadata: {
key: 456,
value: {
id: 456
parent_id: 123
name: "Range"
long_name: "Iterator::MapAndBatch::Range"
is_async: false
}
}
input_pipelines {
key: 123,
value: {
metadata { id: 123 type: HOST name: "Host:0" }
avg_latency_ps: 100000000
min_latency_ps: 100000000
max_latency_ps: 100000000
num_slow_calls: 1
stats {
bottleneck_iterator_id: 456
bottleneck_iterator_latency_ps: 60000000
iterator_stats {
key: 123,
value: {
id: 123
start_time_ps: 0
duration_ps: 100000000
self_time_ps: 40000000
is_blocking: true
num_calls: 1
}
}
iterator_stats {
key: 456,
value: {
id: 456
start_time_ps: 0
duration_ps: 60000000
self_time_ps: 60000000
is_blocking: true
num_calls: 2
}
}
}
}
}
}
}
is_input_bound: true
summary: "Your profile has a tf.data input pipeline slower than 50 us. For each slow input pipeline, below shows a bottleneck in the input pipeline and a suggestion on how to fix it."
)pb"));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tf_data_stats.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/convert/xplane_to_tf_data_stats_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5a78cd31-7e4a-41ac-97e8-3fcb5cc4806a | cpp | google/cel-cpp | comprehension_step | eval/eval/comprehension_step.cc | eval/eval/comprehension_step_test.cc | #include "eval/eval/comprehension_step.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "base/attribute.h"
#include "base/kind.h"
#include "common/casting.h"
#include "common/value.h"
#include "common/value_kind.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/comprehension_slots.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "eval/internal/errors.h"
#include "eval/public/cel_attribute.h"
#include "internal/status_macros.h"
#include "runtime/internal/mutable_list_impl.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::BoolValue;
using ::cel::Cast;
using ::cel::InstanceOf;
using ::cel::IntValue;
using ::cel::ListValue;
using ::cel::MapValue;
using ::cel::UnknownValue;
using ::cel::Value;
using ::cel::runtime_internal::CreateNoMatchingOverloadError;
using ::cel::runtime_internal::MutableListValue;
class ComprehensionFinish : public ExpressionStepBase {
public:
ComprehensionFinish(size_t accu_slot, int64_t expr_id);
absl::Status Evaluate(ExecutionFrame* frame) const override;
private:
size_t accu_slot_;
};
ComprehensionFinish::ComprehensionFinish(size_t accu_slot, int64_t expr_id)
: ExpressionStepBase(expr_id), accu_slot_(accu_slot) {}
absl::Status ComprehensionFinish::Evaluate(ExecutionFrame* frame) const {
if (!frame->value_stack().HasEnough(3)) {
return absl::Status(absl::StatusCode::kInternal, "Value stack underflow");
}
Value result = frame->value_stack().Peek();
frame->value_stack().Pop(3);
if (frame->enable_comprehension_list_append() &&
MutableListValue::Is(result)) {
MutableListValue& list_value = MutableListValue::Cast(result);
CEL_ASSIGN_OR_RETURN(result, std::move(list_value).Build());
}
frame->value_stack().Push(std::move(result));
frame->comprehension_slots().ClearSlot(accu_slot_);
return absl::OkStatus();
}
class ComprehensionInitStep : public ExpressionStepBase {
public:
explicit ComprehensionInitStep(int64_t expr_id)
: ExpressionStepBase(expr_id, false) {}
absl::Status Evaluate(ExecutionFrame* frame) const override;
private:
absl::Status ProjectKeys(ExecutionFrame* frame) const;
};
absl::StatusOr<Value> ProjectKeysImpl(ExecutionFrameBase& frame,
const MapValue& range,
const AttributeTrail& trail) {
if (frame.unknown_processing_enabled()) {
if (frame.attribute_utility().CheckForUnknownPartial(trail)) {
return frame.attribute_utility().CreateUnknownSet(trail.attribute());
}
}
return range.ListKeys(frame.value_manager());
}
absl::Status ComprehensionInitStep::ProjectKeys(ExecutionFrame* frame) const {
const auto& map_value = Cast<MapValue>(frame->value_stack().Peek());
CEL_ASSIGN_OR_RETURN(
Value keys,
ProjectKeysImpl(*frame, map_value, frame->value_stack().PeekAttribute()));
frame->value_stack().PopAndPush(std::move(keys));
return absl::OkStatus();
}
absl::Status ComprehensionInitStep::Evaluate(ExecutionFrame* frame) const {
if (!frame->value_stack().HasEnough(1)) {
return absl::Status(absl::StatusCode::kInternal, "Value stack underflow");
}
if (frame->value_stack().Peek()->Is<cel::MapValue>()) {
CEL_RETURN_IF_ERROR(ProjectKeys(frame));
}
const auto& range = frame->value_stack().Peek();
if (!range->Is<cel::ListValue>() && !range->Is<cel::ErrorValue>() &&
!range->Is<cel::UnknownValue>()) {
frame->value_stack().PopAndPush(frame->value_factory().CreateErrorValue(
CreateNoMatchingOverloadError("<iter_range>")));
}
frame->value_stack().Push(frame->value_factory().CreateIntValue(-1));
return absl::OkStatus();
}
class ComprehensionDirectStep : public DirectExpressionStep {
public:
explicit ComprehensionDirectStep(
size_t iter_slot, size_t accu_slot,
std::unique_ptr<DirectExpressionStep> range,
std::unique_ptr<DirectExpressionStep> accu_init,
std::unique_ptr<DirectExpressionStep> loop_step,
std::unique_ptr<DirectExpressionStep> condition_step,
std::unique_ptr<DirectExpressionStep> result_step, bool shortcircuiting,
int64_t expr_id)
: DirectExpressionStep(expr_id),
iter_slot_(iter_slot),
accu_slot_(accu_slot),
range_(std::move(range)),
accu_init_(std::move(accu_init)),
loop_step_(std::move(loop_step)),
condition_(std::move(condition_step)),
result_step_(std::move(result_step)),
shortcircuiting_(shortcircuiting) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& trail) const override;
private:
size_t iter_slot_;
size_t accu_slot_;
std::unique_ptr<DirectExpressionStep> range_;
std::unique_ptr<DirectExpressionStep> accu_init_;
std::unique_ptr<DirectExpressionStep> loop_step_;
std::unique_ptr<DirectExpressionStep> condition_;
std::unique_ptr<DirectExpressionStep> result_step_;
bool shortcircuiting_;
};
absl::Status ComprehensionDirectStep::Evaluate(ExecutionFrameBase& frame,
Value& result,
AttributeTrail& trail) const {
cel::Value range;
AttributeTrail range_attr;
CEL_RETURN_IF_ERROR(range_->Evaluate(frame, range, range_attr));
if (InstanceOf<MapValue>(range)) {
const auto& map_value = Cast<MapValue>(range);
CEL_ASSIGN_OR_RETURN(range, ProjectKeysImpl(frame, map_value, range_attr));
}
switch (range.kind()) {
case cel::ValueKind::kError:
case cel::ValueKind::kUnknown:
result = range;
return absl::OkStatus();
break;
default:
if (!InstanceOf<ListValue>(range)) {
result = frame.value_manager().CreateErrorValue(
CreateNoMatchingOverloadError("<iter_range>"));
return absl::OkStatus();
}
}
const auto& range_list = Cast<ListValue>(range);
Value accu_init;
AttributeTrail accu_init_attr;
CEL_RETURN_IF_ERROR(accu_init_->Evaluate(frame, accu_init, accu_init_attr));
frame.comprehension_slots().Set(accu_slot_, std::move(accu_init),
accu_init_attr);
ComprehensionSlots::Slot* accu_slot =
frame.comprehension_slots().Get(accu_slot_);
ABSL_DCHECK(accu_slot != nullptr);
frame.comprehension_slots().Set(iter_slot_);
ComprehensionSlots::Slot* iter_slot =
frame.comprehension_slots().Get(iter_slot_);
ABSL_DCHECK(iter_slot != nullptr);
Value condition;
AttributeTrail condition_attr;
bool should_skip_result = false;
CEL_RETURN_IF_ERROR(range_list.ForEach(
frame.value_manager(),
[&](size_t index, const Value& v) -> absl::StatusOr<bool> {
CEL_RETURN_IF_ERROR(frame.IncrementIterations());
CEL_RETURN_IF_ERROR(
condition_->Evaluate(frame, condition, condition_attr));
if (condition.kind() == cel::ValueKind::kError ||
condition.kind() == cel::ValueKind::kUnknown) {
result = std::move(condition);
should_skip_result = true;
return false;
}
if (condition.kind() != cel::ValueKind::kBool) {
result = frame.value_manager().CreateErrorValue(
CreateNoMatchingOverloadError("<loop_condition>"));
should_skip_result = true;
return false;
}
if (shortcircuiting_ && !Cast<BoolValue>(condition).NativeValue()) {
return false;
}
iter_slot->value = v;
if (frame.unknown_processing_enabled()) {
iter_slot->attribute =
range_attr.Step(CelAttributeQualifier::OfInt(index));
if (frame.attribute_utility().CheckForUnknownExact(
iter_slot->attribute)) {
iter_slot->value = frame.attribute_utility().CreateUnknownSet(
iter_slot->attribute.attribute());
}
}
CEL_RETURN_IF_ERROR(loop_step_->Evaluate(frame, accu_slot->value,
accu_slot->attribute));
return true;
}));
frame.comprehension_slots().ClearSlot(iter_slot_);
if (should_skip_result) {
frame.comprehension_slots().ClearSlot(accu_slot_);
return absl::OkStatus();
}
CEL_RETURN_IF_ERROR(result_step_->Evaluate(frame, result, trail));
if (frame.options().enable_comprehension_list_append &&
MutableListValue::Is(result)) {
MutableListValue& list_value = MutableListValue::Cast(result);
CEL_ASSIGN_OR_RETURN(result, std::move(list_value).Build());
}
frame.comprehension_slots().ClearSlot(accu_slot_);
return absl::OkStatus();
}
}
ComprehensionNextStep::ComprehensionNextStep(size_t iter_slot, size_t accu_slot,
int64_t expr_id)
: ExpressionStepBase(expr_id, false),
iter_slot_(iter_slot),
accu_slot_(accu_slot) {}
void ComprehensionNextStep::set_jump_offset(int offset) {
jump_offset_ = offset;
}
void ComprehensionNextStep::set_error_jump_offset(int offset) {
error_jump_offset_ = offset;
}
absl::Status ComprehensionNextStep::Evaluate(ExecutionFrame* frame) const {
enum {
POS_ITER_RANGE,
POS_CURRENT_INDEX,
POS_LOOP_STEP_ACCU,
};
constexpr int kStackSize = 3;
if (!frame->value_stack().HasEnough(kStackSize)) {
return absl::Status(absl::StatusCode::kInternal, "Value stack underflow");
}
absl::Span<const Value> state = frame->value_stack().GetSpan(kStackSize);
const cel::Value& iter_range = state[POS_ITER_RANGE];
if (!iter_range->Is<cel::ListValue>()) {
if (iter_range->Is<cel::ErrorValue>() ||
iter_range->Is<cel::UnknownValue>()) {
frame->value_stack().PopAndPush(kStackSize, std::move(iter_range));
} else {
frame->value_stack().PopAndPush(
kStackSize, frame->value_factory().CreateErrorValue(
CreateNoMatchingOverloadError("<iter_range>")));
}
return frame->JumpTo(error_jump_offset_);
}
const ListValue& iter_range_list = Cast<ListValue>(iter_range);
const auto& current_index_value = state[POS_CURRENT_INDEX];
if (!InstanceOf<IntValue>(current_index_value)) {
return absl::InternalError(absl::StrCat(
"ComprehensionNextStep: want int, got ",
cel::KindToString(ValueKindToKind(current_index_value->kind()))));
}
CEL_RETURN_IF_ERROR(frame->IncrementIterations());
int64_t next_index = Cast<IntValue>(current_index_value).NativeValue() + 1;
frame->comprehension_slots().Set(accu_slot_, state[POS_LOOP_STEP_ACCU]);
CEL_ASSIGN_OR_RETURN(auto iter_range_list_size, iter_range_list.Size());
if (next_index >= static_cast<int64_t>(iter_range_list_size)) {
frame->comprehension_slots().ClearSlot(iter_slot_);
frame->value_stack().Pop(1);
return frame->JumpTo(jump_offset_);
}
AttributeTrail iter_trail;
if (frame->enable_unknowns()) {
iter_trail =
frame->value_stack().GetAttributeSpan(kStackSize)[POS_ITER_RANGE].Step(
cel::AttributeQualifier::OfInt(next_index));
}
Value current_value;
if (frame->enable_unknowns() && frame->attribute_utility().CheckForUnknown(
iter_trail, false)) {
current_value =
frame->attribute_utility().CreateUnknownSet(iter_trail.attribute());
} else {
CEL_ASSIGN_OR_RETURN(current_value,
iter_range_list.Get(frame->value_factory(),
static_cast<size_t>(next_index)));
}
frame->value_stack().PopAndPush(
2, frame->value_factory().CreateIntValue(next_index));
frame->comprehension_slots().Set(iter_slot_, std::move(current_value),
std::move(iter_trail));
return absl::OkStatus();
}
ComprehensionCondStep::ComprehensionCondStep(size_t iter_slot, size_t accu_slot,
bool shortcircuiting,
int64_t expr_id)
: ExpressionStepBase(expr_id, false),
iter_slot_(iter_slot),
accu_slot_(accu_slot),
shortcircuiting_(shortcircuiting) {}
void ComprehensionCondStep::set_jump_offset(int offset) {
jump_offset_ = offset;
}
void ComprehensionCondStep::set_error_jump_offset(int offset) {
error_jump_offset_ = offset;
}
absl::Status ComprehensionCondStep::Evaluate(ExecutionFrame* frame) const {
if (!frame->value_stack().HasEnough(3)) {
return absl::Status(absl::StatusCode::kInternal, "Value stack underflow");
}
auto& loop_condition_value = frame->value_stack().Peek();
if (!loop_condition_value->Is<cel::BoolValue>()) {
if (loop_condition_value->Is<cel::ErrorValue>() ||
loop_condition_value->Is<cel::UnknownValue>()) {
frame->value_stack().PopAndPush(3, std::move(loop_condition_value));
} else {
frame->value_stack().PopAndPush(
3, frame->value_factory().CreateErrorValue(
CreateNoMatchingOverloadError("<loop_condition>")));
}
frame->comprehension_slots().ClearSlot(iter_slot_);
frame->comprehension_slots().ClearSlot(accu_slot_);
return frame->JumpTo(error_jump_offset_);
}
bool loop_condition = loop_condition_value.GetBool().NativeValue();
frame->value_stack().Pop(1);
if (!loop_condition && shortcircuiting_) {
return frame->JumpTo(jump_offset_);
}
return absl::OkStatus();
}
std::unique_ptr<DirectExpressionStep> CreateDirectComprehensionStep(
size_t iter_slot, size_t accu_slot,
std::unique_ptr<DirectExpressionStep> range,
std::unique_ptr<DirectExpressionStep> accu_init,
std::unique_ptr<DirectExpressionStep> loop_step,
std::unique_ptr<DirectExpressionStep> condition_step,
std::unique_ptr<DirectExpressionStep> result_step, bool shortcircuiting,
int64_t expr_id) {
return std::make_unique<ComprehensionDirectStep>(
iter_slot, accu_slot, std::move(range), std::move(accu_init),
std::move(loop_step), std::move(condition_step), std::move(result_step),
shortcircuiting, expr_id);
}
std::unique_ptr<ExpressionStep> CreateComprehensionFinishStep(size_t accu_slot,
int64_t expr_id) {
return std::make_unique<ComprehensionFinish>(accu_slot, expr_id);
}
std::unique_ptr<ExpressionStep> CreateComprehensionInitStep(int64_t expr_id) {
return std::make_unique<ComprehensionInitStep>(expr_id);
}
} | #include "eval/eval/comprehension_step.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/struct.pb.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "base/ast_internal/expr.h"
#include "base/type_provider.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/cel_expression_flat_impl.h"
#include "eval/eval/comprehension_slots.h"
#include "eval/eval/const_value_step.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "eval/eval/ident_step.h"
#include "eval/public/activation.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_value.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "runtime/activation.h"
#include "runtime/managed_value_factory.h"
#include "runtime/runtime_options.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using ::cel::BoolValue;
using ::cel::IntValue;
using ::cel::TypeProvider;
using ::cel::Value;
using ::cel::ast_internal::Expr;
using ::cel::ast_internal::Ident;
using ::cel::extensions::ProtoMemoryManagerRef;
using ::cel::test::BoolValueIs;
using ::google::protobuf::ListValue;
using ::google::protobuf::Struct;
using ::google::protobuf::Arena;
using ::testing::_;
using ::testing::Eq;
using ::testing::Return;
using ::testing::SizeIs;
Ident CreateIdent(const std::string& var) {
Ident expr;
expr.set_name(var);
return expr;
}
class ListKeysStepTest : public testing::Test {
public:
ListKeysStepTest() = default;
std::unique_ptr<CelExpressionFlatImpl> MakeExpression(
ExecutionPath&& path, bool unknown_attributes = false) {
cel::RuntimeOptions options;
if (unknown_attributes) {
options.unknown_processing =
cel::UnknownProcessingOptions::kAttributeAndFunction;
}
return std::make_unique<CelExpressionFlatImpl>(
FlatExpression(std::move(path), 0,
TypeProvider::Builtin(), options));
}
private:
Expr dummy_expr_;
};
class GetListKeysResultStep : public ExpressionStepBase {
public:
GetListKeysResultStep() : ExpressionStepBase(-1, false) {}
absl::Status Evaluate(ExecutionFrame* frame) const override {
frame->value_stack().Pop(1);
return absl::OkStatus();
}
};
MATCHER_P(CelStringValue, val, "") {
const CelValue& to_match = arg;
absl::string_view value = val;
return to_match.IsString() && to_match.StringOrDie().value() == value;
}
TEST_F(ListKeysStepTest, ListPassedThrough) {
ExecutionPath path;
Ident ident = CreateIdent("var");
auto result = CreateIdentStep(ident, 0);
ASSERT_OK(result);
path.push_back(*std::move(result));
result = CreateComprehensionInitStep(1);
ASSERT_OK(result);
path.push_back(*std::move(result));
path.push_back(std::make_unique<GetListKeysResultStep>());
auto expression = MakeExpression(std::move(path));
Activation activation;
Arena arena;
ListValue value;
value.add_values()->set_number_value(1.0);
value.add_values()->set_number_value(2.0);
value.add_values()->set_number_value(3.0);
activation.InsertValue("var", CelProtoWrapper::CreateMessage(&value, &arena));
auto eval_result = expression->Evaluate(activation, &arena);
ASSERT_OK(eval_result);
ASSERT_TRUE(eval_result->IsList());
EXPECT_THAT(*eval_result->ListOrDie(), SizeIs(3));
}
TEST_F(ListKeysStepTest, MapToKeyList) {
ExecutionPath path;
Ident ident = CreateIdent("var");
auto result = CreateIdentStep(ident, 0);
ASSERT_OK(result);
path.push_back(*std::move(result));
result = CreateComprehensionInitStep(1);
ASSERT_OK(result);
path.push_back(*std::move(result));
path.push_back(std::make_unique<GetListKeysResultStep>());
auto expression = MakeExpression(std::move(path));
Activation activation;
Arena arena;
Struct value;
(*value.mutable_fields())["key1"].set_number_value(1.0);
(*value.mutable_fields())["key2"].set_number_value(2.0);
(*value.mutable_fields())["key3"].set_number_value(3.0);
activation.InsertValue("var", CelProtoWrapper::CreateMessage(&value, &arena));
auto eval_result = expression->Evaluate(activation, &arena);
ASSERT_OK(eval_result);
ASSERT_TRUE(eval_result->IsList());
EXPECT_THAT(*eval_result->ListOrDie(), SizeIs(3));
std::vector<CelValue> keys;
keys.reserve(eval_result->ListOrDie()->size());
for (int i = 0; i < eval_result->ListOrDie()->size(); i++) {
keys.push_back(eval_result->ListOrDie()->operator[](i));
}
EXPECT_THAT(keys, testing::UnorderedElementsAre(CelStringValue("key1"),
CelStringValue("key2"),
CelStringValue("key3")));
}
TEST_F(ListKeysStepTest, MapPartiallyUnknown) {
ExecutionPath path;
Ident ident = CreateIdent("var");
auto result = CreateIdentStep(ident, 0);
ASSERT_OK(result);
path.push_back(*std::move(result));
result = CreateComprehensionInitStep(1);
ASSERT_OK(result);
path.push_back(*std::move(result));
path.push_back(std::make_unique<GetListKeysResultStep>());
auto expression =
MakeExpression(std::move(path), true);
Activation activation;
Arena arena;
Struct value;
(*value.mutable_fields())["key1"].set_number_value(1.0);
(*value.mutable_fields())["key2"].set_number_value(2.0);
(*value.mutable_fields())["key3"].set_number_value(3.0);
activation.InsertValue("var", CelProtoWrapper::CreateMessage(&value, &arena));
activation.set_unknown_attribute_patterns({CelAttributePattern(
"var",
{CreateCelAttributeQualifierPattern(CelValue::CreateStringView("key2")),
CreateCelAttributeQualifierPattern(CelValue::CreateStringView("foo")),
CelAttributeQualifierPattern::CreateWildcard()})});
auto eval_result = expression->Evaluate(activation, &arena);
ASSERT_OK(eval_result);
ASSERT_TRUE(eval_result->IsUnknownSet());
const auto& attrs = eval_result->UnknownSetOrDie()->unknown_attributes();
EXPECT_THAT(attrs, SizeIs(1));
EXPECT_THAT(attrs.begin()->variable_name(), Eq("var"));
EXPECT_THAT(attrs.begin()->qualifier_path(), SizeIs(0));
}
TEST_F(ListKeysStepTest, ErrorPassedThrough) {
ExecutionPath path;
Ident ident = CreateIdent("var");
auto result = CreateIdentStep(ident, 0);
ASSERT_OK(result);
path.push_back(*std::move(result));
result = CreateComprehensionInitStep(1);
ASSERT_OK(result);
path.push_back(*std::move(result));
path.push_back(std::make_unique<GetListKeysResultStep>());
auto expression = MakeExpression(std::move(path));
Activation activation;
Arena arena;
auto eval_result = expression->Evaluate(activation, &arena);
ASSERT_OK(eval_result);
ASSERT_TRUE(eval_result->IsError());
EXPECT_THAT(eval_result->ErrorOrDie()->message(),
testing::HasSubstr("\"var\""));
EXPECT_EQ(eval_result->ErrorOrDie()->code(), absl::StatusCode::kUnknown);
}
TEST_F(ListKeysStepTest, UnknownSetPassedThrough) {
ExecutionPath path;
Ident ident = CreateIdent("var");
auto result = CreateIdentStep(ident, 0);
ASSERT_OK(result);
path.push_back(*std::move(result));
result = CreateComprehensionInitStep(1);
ASSERT_OK(result);
path.push_back(*std::move(result));
path.push_back(std::make_unique<GetListKeysResultStep>());
auto expression =
MakeExpression(std::move(path), true);
Activation activation;
Arena arena;
activation.set_unknown_attribute_patterns({CelAttributePattern("var", {})});
auto eval_result = expression->Evaluate(activation, &arena);
ASSERT_OK(eval_result);
ASSERT_TRUE(eval_result->IsUnknownSet());
EXPECT_THAT(eval_result->UnknownSetOrDie()->unknown_attributes(), SizeIs(1));
}
class MockDirectStep : public DirectExpressionStep {
public:
MockDirectStep() : DirectExpressionStep(-1) {}
MOCK_METHOD(absl::Status, Evaluate,
(ExecutionFrameBase&, Value&, AttributeTrail&),
(const, override));
};
class DirectComprehensionTest : public testing::Test {
public:
DirectComprehensionTest()
: value_manager_(TypeProvider::Builtin(), ProtoMemoryManagerRef(&arena_)),
slots_(2) {}
absl::StatusOr<cel::ListValue> MakeList() {
CEL_ASSIGN_OR_RETURN(auto builder,
value_manager_.get().NewListValueBuilder(
value_manager_.get().GetDynListType()));
CEL_RETURN_IF_ERROR(builder->Add(IntValue(1)));
CEL_RETURN_IF_ERROR(builder->Add(IntValue(2)));
return std::move(*builder).Build();
}
protected:
google::protobuf::Arena arena_;
cel::ManagedValueFactory value_manager_;
ComprehensionSlots slots_;
cel::Activation empty_activation_;
};
TEST_F(DirectComprehensionTest, PropagateRangeNonOkStatus) {
cel::RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, nullptr, options,
value_manager_.get(), slots_);
auto range_step = std::make_unique<MockDirectStep>();
MockDirectStep* mock = range_step.get();
ON_CALL(*mock, Evaluate(_, _, _))
.WillByDefault(Return(absl::InternalError("test range error")));
auto compre_step = CreateDirectComprehensionStep(
0, 1,
std::move(range_step),
CreateConstValueDirectStep(BoolValue(false)),
CreateConstValueDirectStep(BoolValue(false)),
CreateConstValueDirectStep(BoolValue(true)),
CreateDirectSlotIdentStep("__result__", 1, -1),
true, -1);
Value result;
AttributeTrail trail;
EXPECT_THAT(compre_step->Evaluate(frame, result, trail),
StatusIs(absl::StatusCode::kInternal, "test range error"));
}
TEST_F(DirectComprehensionTest, PropagateAccuInitNonOkStatus) {
cel::RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, nullptr, options,
value_manager_.get(), slots_);
auto accu_init = std::make_unique<MockDirectStep>();
MockDirectStep* mock = accu_init.get();
ON_CALL(*mock, Evaluate(_, _, _))
.WillByDefault(Return(absl::InternalError("test accu init error")));
ASSERT_OK_AND_ASSIGN(auto list, MakeList());
auto compre_step = CreateDirectComprehensionStep(
0, 1,
CreateConstValueDirectStep(std::move(list)),
std::move(accu_init),
CreateConstValueDirectStep(BoolValue(false)),
CreateConstValueDirectStep(BoolValue(true)),
CreateDirectSlotIdentStep("__result__", 1, -1),
true, -1);
Value result;
AttributeTrail trail;
EXPECT_THAT(compre_step->Evaluate(frame, result, trail),
StatusIs(absl::StatusCode::kInternal, "test accu init error"));
}
TEST_F(DirectComprehensionTest, PropagateLoopNonOkStatus) {
cel::RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, nullptr, options,
value_manager_.get(), slots_);
auto loop_step = std::make_unique<MockDirectStep>();
MockDirectStep* mock = loop_step.get();
ON_CALL(*mock, Evaluate(_, _, _))
.WillByDefault(Return(absl::InternalError("test loop error")));
ASSERT_OK_AND_ASSIGN(auto list, MakeList());
auto compre_step = CreateDirectComprehensionStep(
0, 1,
CreateConstValueDirectStep(std::move(list)),
CreateConstValueDirectStep(BoolValue(false)),
std::move(loop_step),
CreateConstValueDirectStep(BoolValue(true)),
CreateDirectSlotIdentStep("__result__", 1, -1),
true, -1);
Value result;
AttributeTrail trail;
EXPECT_THAT(compre_step->Evaluate(frame, result, trail),
StatusIs(absl::StatusCode::kInternal, "test loop error"));
}
TEST_F(DirectComprehensionTest, PropagateConditionNonOkStatus) {
cel::RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, nullptr, options,
value_manager_.get(), slots_);
auto condition = std::make_unique<MockDirectStep>();
MockDirectStep* mock = condition.get();
ON_CALL(*mock, Evaluate(_, _, _))
.WillByDefault(Return(absl::InternalError("test condition error")));
ASSERT_OK_AND_ASSIGN(auto list, MakeList());
auto compre_step = CreateDirectComprehensionStep(
0, 1,
CreateConstValueDirectStep(std::move(list)),
CreateConstValueDirectStep(BoolValue(false)),
CreateConstValueDirectStep(BoolValue(false)),
std::move(condition),
CreateDirectSlotIdentStep("__result__", 1, -1),
true, -1);
Value result;
AttributeTrail trail;
EXPECT_THAT(compre_step->Evaluate(frame, result, trail),
StatusIs(absl::StatusCode::kInternal, "test condition error"));
}
TEST_F(DirectComprehensionTest, PropagateResultNonOkStatus) {
cel::RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, nullptr, options,
value_manager_.get(), slots_);
auto result_step = std::make_unique<MockDirectStep>();
MockDirectStep* mock = result_step.get();
ON_CALL(*mock, Evaluate(_, _, _))
.WillByDefault(Return(absl::InternalError("test result error")));
ASSERT_OK_AND_ASSIGN(auto list, MakeList());
auto compre_step = CreateDirectComprehensionStep(
0, 1,
CreateConstValueDirectStep(std::move(list)),
CreateConstValueDirectStep(BoolValue(false)),
CreateConstValueDirectStep(BoolValue(false)),
CreateConstValueDirectStep(BoolValue(true)),
std::move(result_step),
true, -1);
Value result;
AttributeTrail trail;
EXPECT_THAT(compre_step->Evaluate(frame, result, trail),
StatusIs(absl::StatusCode::kInternal, "test result error"));
}
TEST_F(DirectComprehensionTest, Shortcircuit) {
cel::RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, nullptr, options,
value_manager_.get(), slots_);
auto loop_step = std::make_unique<MockDirectStep>();
MockDirectStep* mock = loop_step.get();
EXPECT_CALL(*mock, Evaluate(_, _, _))
.Times(0)
.WillRepeatedly([](ExecutionFrameBase&, Value& result, AttributeTrail&) {
result = BoolValue(false);
return absl::OkStatus();
});
ASSERT_OK_AND_ASSIGN(auto list, MakeList());
auto compre_step = CreateDirectComprehensionStep(
0, 1,
CreateConstValueDirectStep(std::move(list)),
CreateConstValueDirectStep(BoolValue(false)),
std::move(loop_step),
CreateConstValueDirectStep(BoolValue(false)),
CreateDirectSlotIdentStep("__result__", 1, -1),
true, -1);
Value result;
AttributeTrail trail;
ASSERT_OK(compre_step->Evaluate(frame, result, trail));
EXPECT_THAT(result, BoolValueIs(false));
}
TEST_F(DirectComprehensionTest, IterationLimit) {
cel::RuntimeOptions options;
options.comprehension_max_iterations = 2;
ExecutionFrameBase frame(empty_activation_, nullptr, options,
value_manager_.get(), slots_);
auto loop_step = std::make_unique<MockDirectStep>();
MockDirectStep* mock = loop_step.get();
EXPECT_CALL(*mock, Evaluate(_, _, _))
.Times(1)
.WillRepeatedly([](ExecutionFrameBase&, Value& result, AttributeTrail&) {
result = BoolValue(false);
return absl::OkStatus();
});
ASSERT_OK_AND_ASSIGN(auto list, MakeList());
auto compre_step = CreateDirectComprehensionStep(
0, 1,
CreateConstValueDirectStep(std::move(list)),
CreateConstValueDirectStep(BoolValue(false)),
std::move(loop_step),
CreateConstValueDirectStep(BoolValue(true)),
CreateDirectSlotIdentStep("__result__", 1, -1),
true, -1);
Value result;
AttributeTrail trail;
EXPECT_THAT(compre_step->Evaluate(frame, result, trail),
StatusIs(absl::StatusCode::kInternal));
}
TEST_F(DirectComprehensionTest, Exhaustive) {
cel::RuntimeOptions options;
ExecutionFrameBase frame(empty_activation_, nullptr, options,
value_manager_.get(), slots_);
auto loop_step = std::make_unique<MockDirectStep>();
MockDirectStep* mock = loop_step.get();
EXPECT_CALL(*mock, Evaluate(_, _, _))
.Times(2)
.WillRepeatedly([](ExecutionFrameBase&, Value& result, AttributeTrail&) {
result = BoolValue(false);
return absl::OkStatus();
});
ASSERT_OK_AND_ASSIGN(auto list, MakeList());
auto compre_step = CreateDirectComprehensionStep(
0, 1,
CreateConstValueDirectStep(std::move(list)),
CreateConstValueDirectStep(BoolValue(false)),
std::move(loop_step),
CreateConstValueDirectStep(BoolValue(false)),
CreateDirectSlotIdentStep("__result__", 1, -1),
false, -1);
Value result;
AttributeTrail trail;
ASSERT_OK(compre_step->Evaluate(frame, result, trail));
EXPECT_THAT(result, BoolValueIs(false));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/comprehension_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/comprehension_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
ee620caa-1725-45c5-9079-95411bac2806 | cpp | tensorflow/tensorflow | debug_io_utils | tensorflow/core/debug/debug_io_utils.cc | tensorflow/core/debug/debug_io_utils_test.cc | #include "tensorflow/core/debug/debug_io_utils.h"
#include <stddef.h>
#include <string.h>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <utility>
#include <vector>
#ifndef PLATFORM_WINDOWS
#include "grpcpp/create_channel.h"
#else
#endif
#include "absl/strings/ascii.h"
#include "absl/strings/match.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/debug/debug_callback_registry.h"
#include "tensorflow/core/debug/debugger_event_metadata.pb.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/bits.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/strcat.h"
#include "tensorflow/core/util/event.pb.h"
#define GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR \
return errors::Unimplemented( \
kGrpcURLScheme, " debug URL scheme is not implemented on Windows yet.")
namespace tensorflow {
namespace {
constexpr absl::string_view kDumpSubDirName = "node-io-dump";
Event PrepareChunkEventProto(const DebugNodeKey& debug_node_key,
const uint64 wall_time_us, const size_t num_chunks,
const size_t chunk_index,
const DataType& tensor_dtype,
const TensorShapeProto& tensor_shape) {
Event event;
event.set_wall_time(static_cast<double>(wall_time_us));
Summary::Value* value = event.mutable_summary()->add_value();
value->set_node_name(debug_node_key.debug_node_name);
value->set_tag(debug_node_key.node_name);
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
metadata.set_device(debug_node_key.device_name);
metadata.set_output_slot(debug_node_key.output_slot);
metadata.set_num_chunks(num_chunks);
metadata.set_chunk_index(chunk_index);
string json_output;
tensorflow::protobuf::util::JsonPrintOptions json_options;
json_options.always_print_primitive_fields = true;
auto status = tensorflow::protobuf::util::MessageToJsonString(
metadata, &json_output, json_options);
if (status.ok()) {
SummaryMetadata::PluginData* plugin_data =
value->mutable_metadata()->mutable_plugin_data();
plugin_data->set_plugin_name(DebugIO::kDebuggerPluginName);
plugin_data->set_content(json_output);
} else {
LOG(WARNING) << "Failed to convert DebuggerEventMetadata proto to JSON. "
<< "The debug_node_name is " << debug_node_key.debug_node_name
<< ".";
}
value->mutable_tensor()->set_dtype(tensor_dtype);
*value->mutable_tensor()->mutable_tensor_shape() = tensor_shape;
return event;
}
const size_t StringValMaxBytesInProto(const string& str) {
#if defined(PLATFORM_GOOGLE)
return str.size() + DebugGrpcIO::kGrpcMaxVarintLengthSize;
#else
return str.size();
#endif
}
Status WrapStringTensorAsEvents(const DebugNodeKey& debug_node_key,
const uint64 wall_time_us,
const size_t chunk_size_limit,
TensorProto* tensor_proto,
std::vector<Event>* events) {
const protobuf::RepeatedPtrField<string>& strs = tensor_proto->string_val();
const size_t num_strs = strs.size();
const size_t chunk_size_ub = chunk_size_limit > 0
? chunk_size_limit
: std::numeric_limits<size_t>::max();
std::vector<size_t> cutoffs;
size_t chunk_size = 0;
for (size_t i = 0; i < num_strs; ++i) {
if (StringValMaxBytesInProto(strs[i]) > chunk_size_ub) {
return errors::FailedPrecondition(
"string value at index ", i, " from debug node ",
debug_node_key.debug_node_name,
" does not fit gRPC message size limit (", chunk_size_ub, ")");
}
if (chunk_size + StringValMaxBytesInProto(strs[i]) > chunk_size_ub) {
cutoffs.push_back(i);
chunk_size = 0;
}
chunk_size += StringValMaxBytesInProto(strs[i]);
}
cutoffs.push_back(num_strs);
const size_t num_chunks = cutoffs.size();
for (size_t i = 0; i < num_chunks; ++i) {
Event event = PrepareChunkEventProto(debug_node_key, wall_time_us,
num_chunks, i, tensor_proto->dtype(),
tensor_proto->tensor_shape());
Summary::Value* value = event.mutable_summary()->mutable_value(0);
if (cutoffs.size() == 1) {
value->mutable_tensor()->mutable_string_val()->Swap(
tensor_proto->mutable_string_val());
} else {
const size_t begin = (i == 0) ? 0 : cutoffs[i - 1];
const size_t end = cutoffs[i];
for (size_t j = begin; j < end; ++j) {
value->mutable_tensor()->add_string_val(strs[j]);
}
}
events->push_back(std::move(event));
}
return absl::OkStatus();
}
Status WrapTensorAsEvents(const DebugNodeKey& debug_node_key,
const Tensor& tensor, const uint64 wall_time_us,
const size_t chunk_size_limit,
std::vector<Event>* events) {
TensorProto tensor_proto;
if (tensor.dtype() == DT_STRING) {
tensor.AsProtoField(&tensor_proto);
TF_RETURN_IF_ERROR(WrapStringTensorAsEvents(
debug_node_key, wall_time_us, chunk_size_limit, &tensor_proto, events));
} else {
tensor.AsProtoTensorContent(&tensor_proto);
const size_t total_length = tensor_proto.tensor_content().size();
const size_t chunk_size_ub =
chunk_size_limit > 0 ? chunk_size_limit : total_length;
const size_t num_chunks =
(total_length == 0)
? 1
: (total_length + chunk_size_ub - 1) / chunk_size_ub;
for (size_t i = 0; i < num_chunks; ++i) {
const size_t pos = i * chunk_size_ub;
const size_t len =
(i == num_chunks - 1) ? (total_length - pos) : chunk_size_ub;
Event event = PrepareChunkEventProto(debug_node_key, wall_time_us,
num_chunks, i, tensor_proto.dtype(),
tensor_proto.tensor_shape());
event.mutable_summary()
->mutable_value(0)
->mutable_tensor()
->set_tensor_content(tensor_proto.tensor_content().substr(pos, len));
events->push_back(std::move(event));
}
}
return absl::OkStatus();
}
string AppendTimestampToFilePath(const string& in, const uint64 timestamp) {
string out = strings::StrCat(in, "_", timestamp);
uint64 i = 1;
while (Env::Default()->FileExists(out).ok()) {
out = strings::StrCat(in, "_", timestamp, "-", i);
++i;
}
return out;
}
#ifndef PLATFORM_WINDOWS
Status PublishEncodedGraphDefInChunks(const string& encoded_graph_def,
const string& device_name,
const int64_t wall_time,
const string& debug_url) {
const uint64 hash = ::tensorflow::Hash64(encoded_graph_def);
const size_t total_length = encoded_graph_def.size();
const size_t num_chunks =
static_cast<size_t>(std::ceil(static_cast<float>(total_length) /
DebugGrpcIO::kGrpcMessageSizeLimitBytes));
for (size_t i = 0; i < num_chunks; ++i) {
const size_t pos = i * DebugGrpcIO::kGrpcMessageSizeLimitBytes;
const size_t len = (i == num_chunks - 1)
? (total_length - pos)
: DebugGrpcIO::kGrpcMessageSizeLimitBytes;
Event event;
event.set_wall_time(static_cast<double>(wall_time));
event.set_graph_def(strings::StrCat(hash, ",", device_name, ",", wall_time,
"|", i, "|", num_chunks, "|",
encoded_graph_def.substr(pos, len)));
const Status s = DebugGrpcIO::SendEventProtoThroughGrpcStream(
event, debug_url, num_chunks - 1 == i);
if (!s.ok()) {
return errors::FailedPrecondition(
"Failed to send chunk ", i, " of ", num_chunks,
" of encoded GraphDef of size ", encoded_graph_def.size(), " bytes, ",
"due to: ", s.message());
}
}
return absl::OkStatus();
}
#endif
}
const char* const DebugIO::kDebuggerPluginName = "debugger";
const char* const DebugIO::kCoreMetadataTag = "core_metadata_";
const char* const DebugIO::kGraphTag = "graph_";
const char* const DebugIO::kHashTag = "hash";
Status ReadEventFromFile(const string& dump_file_path, Event* event) {
Env* env(Env::Default());
string content;
uint64 file_size = 0;
Status s = env->GetFileSize(dump_file_path, &file_size);
if (!s.ok()) {
return s;
}
content.resize(file_size);
std::unique_ptr<RandomAccessFile> file;
s = env->NewRandomAccessFile(dump_file_path, &file);
if (!s.ok()) {
return s;
}
StringPiece result;
s = file->Read(0, file_size, &result, &(content)[0]);
if (!s.ok()) {
return s;
}
event->ParseFromString(content);
return absl::OkStatus();
}
const char* const DebugIO::kFileURLScheme = "file:
const char* const DebugIO::kGrpcURLScheme = "grpc:
const char* const DebugIO::kMemoryURLScheme = "memcbk:
Status DebugIO::PublishDebugMetadata(
const int64_t global_step, const int64_t session_run_index,
const int64_t executor_step_index, const std::vector<string>& input_names,
const std::vector<string>& output_names,
const std::vector<string>& target_nodes,
const std::unordered_set<string>& debug_urls) {
std::ostringstream oss;
oss << "{";
oss << "\"global_step\":" << global_step << ",";
oss << "\"session_run_index\":" << session_run_index << ",";
oss << "\"executor_step_index\":" << executor_step_index << ",";
oss << "\"input_names\":[";
for (size_t i = 0; i < input_names.size(); ++i) {
oss << "\"" << input_names[i] << "\"";
if (i < input_names.size() - 1) {
oss << ",";
}
}
oss << "],";
oss << "\"output_names\":[";
for (size_t i = 0; i < output_names.size(); ++i) {
oss << "\"" << output_names[i] << "\"";
if (i < output_names.size() - 1) {
oss << ",";
}
}
oss << "],";
oss << "\"target_nodes\":[";
for (size_t i = 0; i < target_nodes.size(); ++i) {
oss << "\"" << target_nodes[i] << "\"";
if (i < target_nodes.size() - 1) {
oss << ",";
}
}
oss << "]";
oss << "}";
const string json_metadata = oss.str();
Event event;
event.set_wall_time(static_cast<double>(Env::Default()->NowMicros()));
LogMessage* log_message = event.mutable_log_message();
log_message->set_message(json_metadata);
Status status;
for (const string& url : debug_urls) {
if (absl::StartsWith(absl::AsciiStrToLower(url), kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
Event grpc_event;
const string address = url.substr(strlen(DebugIO::kFileURLScheme));
const string path = address.find('/') == string::npos
? ""
: address.substr(address.find('/'));
grpc_event.set_wall_time(event.wall_time());
LogMessage* log_message_grpc = grpc_event.mutable_log_message();
log_message_grpc->set_message(
strings::StrCat(json_metadata.substr(0, json_metadata.size() - 1),
",\"grpc_path\":\"", path, "\"}"));
status.Update(
DebugGrpcIO::SendEventProtoThroughGrpcStream(grpc_event, url, true));
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
} else if (absl::StartsWith(absl::AsciiStrToLower(url), kFileURLScheme)) {
const string dump_root_dir = url.substr(strlen(kFileURLScheme));
const string core_metadata_path = AppendTimestampToFilePath(
io::JoinPath(dump_root_dir,
strings::StrCat(
DebugNodeKey::kMetadataFilePrefix,
DebugIO::kCoreMetadataTag, "sessionrun",
strings::Printf("%.14lld", static_cast<long long>(
session_run_index)))),
Env::Default()->NowMicros());
status.Update(DebugFileIO::DumpEventProtoToFile(
event, string(io::Dirname(core_metadata_path)),
string(io::Basename(core_metadata_path))));
}
}
return status;
}
Status DebugIO::PublishDebugTensor(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const absl::Span<const string> debug_urls,
const bool gated_grpc,
const int64_t step_id) {
int32_t num_failed_urls = 0;
std::vector<Status> fail_statuses;
for (const string& url : debug_urls) {
if (absl::StartsWith(absl::AsciiStrToLower(url), kFileURLScheme)) {
const string dump_root_dir = url.substr(strlen(kFileURLScheme));
const int64_t tensorBytes =
tensor.IsInitialized() ? tensor.TotalBytes() : 0;
if (!DebugFileIO::requestDiskByteUsage(tensorBytes)) {
return errors::ResourceExhausted(
"TensorFlow Debugger has exhausted file-system byte-size "
"allowance (",
DebugFileIO::global_disk_bytes_limit_, "), therefore it cannot ",
"dump an additional ", tensorBytes, " byte(s) of tensor data ",
"for the debug tensor ", debug_node_key.node_name, ":",
debug_node_key.output_slot, ". You may use the environment ",
"variable TFDBG_DISK_BYTES_LIMIT to set a higher limit.");
}
Status s = debug_node_key.io_of_node.empty()
? DebugFileIO::DumpTensorToDir(debug_node_key, tensor,
wall_time_us, dump_root_dir,
nullptr)
: DebugFileIO::DumpTensorToDirForNodeDumping(
debug_node_key, tensor, wall_time_us, dump_root_dir,
nullptr, step_id);
if (!s.ok()) {
num_failed_urls++;
fail_statuses.push_back(s);
}
} else if (absl::StartsWith(absl::AsciiStrToLower(url), kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
Status s = DebugGrpcIO::SendTensorThroughGrpcStream(
debug_node_key, tensor, wall_time_us, url, gated_grpc);
if (!s.ok()) {
num_failed_urls++;
fail_statuses.push_back(s);
}
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
} else if (absl::StartsWith(absl::AsciiStrToLower(url), kMemoryURLScheme)) {
const string dump_root_dir = url.substr(strlen(kMemoryURLScheme));
auto* callback_registry = DebugCallbackRegistry::singleton();
auto* callback = callback_registry->GetCallback(dump_root_dir);
CHECK(callback) << "No callback registered for: " << dump_root_dir;
(*callback)(debug_node_key, tensor);
} else {
return Status(absl::StatusCode::kUnavailable,
strings::StrCat("Invalid debug target URL: ", url));
}
}
if (num_failed_urls == 0) {
return absl::OkStatus();
} else {
string error_message = strings::StrCat(
"Publishing to ", num_failed_urls, " of ", debug_urls.size(),
" debug target URLs failed, due to the following errors:");
for (Status& status : fail_statuses) {
error_message =
strings::StrCat(error_message, " ", status.message(), ";");
}
return Status(absl::StatusCode::kInternal, error_message);
}
}
Status DebugIO::PublishDebugTensor(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const absl::Span<const string> debug_urls) {
return PublishDebugTensor(debug_node_key, tensor, wall_time_us, debug_urls,
false);
}
Status DebugIO::PublishGraph(const Graph& graph, const string& device_name,
const std::unordered_set<string>& debug_urls) {
GraphDef graph_def;
graph.ToGraphDef(&graph_def);
string buf;
graph_def.SerializeToString(&buf);
const int64_t now_micros = Env::Default()->NowMicros();
Event event;
event.set_wall_time(static_cast<double>(now_micros));
event.set_graph_def(buf);
Status status = absl::OkStatus();
for (const string& debug_url : debug_urls) {
if (absl::StartsWith(debug_url, kFileURLScheme)) {
const string dump_root_dir =
io::JoinPath(debug_url.substr(strlen(kFileURLScheme)),
DebugNodeKey::DeviceNameToDevicePath(device_name));
const uint64 graph_hash = ::tensorflow::Hash64(buf);
const string file_name =
strings::StrCat(DebugNodeKey::kMetadataFilePrefix, DebugIO::kGraphTag,
DebugIO::kHashTag, graph_hash, "_", now_micros);
status.Update(
DebugFileIO::DumpEventProtoToFile(event, dump_root_dir, file_name));
} else if (absl::StartsWith(debug_url, kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
status.Update(PublishEncodedGraphDefInChunks(buf, device_name, now_micros,
debug_url));
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
}
}
return status;
}
bool DebugIO::IsCopyNodeGateOpen(
const std::vector<DebugWatchAndURLSpec>& specs) {
#ifndef PLATFORM_WINDOWS
for (const DebugWatchAndURLSpec& spec : specs) {
if (!spec.gated_grpc || spec.url.compare(0, strlen(DebugIO::kGrpcURLScheme),
DebugIO::kGrpcURLScheme)) {
return true;
} else {
if (DebugGrpcIO::IsReadGateOpen(spec.url, spec.watch_key)) {
return true;
}
}
}
return false;
#else
return true;
#endif
}
bool DebugIO::IsDebugNodeGateOpen(const string& watch_key,
const std::vector<string>& debug_urls) {
#ifndef PLATFORM_WINDOWS
for (const string& debug_url : debug_urls) {
if (debug_url.compare(0, strlen(DebugIO::kGrpcURLScheme),
DebugIO::kGrpcURLScheme)) {
return true;
} else {
if (DebugGrpcIO::IsReadGateOpen(debug_url, watch_key)) {
return true;
}
}
}
return false;
#else
return true;
#endif
}
bool DebugIO::IsDebugURLGateOpen(const string& watch_key,
const string& debug_url) {
#ifndef PLATFORM_WINDOWS
if (debug_url != kGrpcURLScheme) {
return true;
} else {
return DebugGrpcIO::IsReadGateOpen(debug_url, watch_key);
}
#else
return true;
#endif
}
Status DebugIO::CloseDebugURL(const string& debug_url) {
if (absl::StartsWith(debug_url, DebugIO::kGrpcURLScheme)) {
#ifndef PLATFORM_WINDOWS
return DebugGrpcIO::CloseGrpcStream(debug_url);
#else
GRPC_OSS_WINDOWS_UNIMPLEMENTED_ERROR;
#endif
} else {
return absl::OkStatus();
}
}
Status DebugFileIO::DumpTensorToDir(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const string& dump_root_dir,
string* dump_file_path) {
const string file_path =
GetDumpFilePath(dump_root_dir, debug_node_key, wall_time_us);
if (dump_file_path != nullptr) {
*dump_file_path = file_path;
}
return DumpTensorToEventFile(debug_node_key, tensor, wall_time_us, file_path);
}
Status DebugFileIO::DumpTensorToDirForNodeDumping(
const DebugNodeKey& debug_node_key, const Tensor& tensor,
const uint64 wall_time_us, const string& dump_root_dir,
string* dump_file_path, const int64_t step_id) {
const string file_path = GetDumpFilePathForNodeDumping(
dump_root_dir, debug_node_key, wall_time_us, step_id);
if (dump_file_path != nullptr) {
*dump_file_path = file_path;
}
return DumpTensorToEventFile(debug_node_key, tensor, wall_time_us, file_path);
}
string DebugFileIO::GetDumpFilePath(const string& dump_root_dir,
const DebugNodeKey& debug_node_key,
const uint64 wall_time_us) {
return AppendTimestampToFilePath(
io::JoinPath(dump_root_dir, debug_node_key.device_path,
strings::StrCat(debug_node_key.node_name, "_",
debug_node_key.output_slot, "_",
debug_node_key.debug_op)),
wall_time_us);
}
string DebugFileIO::GetDumpFilePathForNodeDumping(
const string& dump_root_dir, const DebugNodeKey& debug_node_key,
const uint64 wall_time_us, const int64_t step_id) {
return AppendTimestampToFilePath(
io::JoinPath(
dump_root_dir, kDumpSubDirName, strings::StrCat("step-", step_id),
strings::StrCat(
absl::StrReplaceAll(debug_node_key.io_of_node, {{"/", "-"}}), ":",
debug_node_key.is_input ? "in" : "out", ":",
debug_node_key.io_index)),
wall_time_us);
}
Status DebugFileIO::DumpEventProtoToFile(const Event& event_proto,
const string& dir_name,
const string& file_name) {
Env* env(Env::Default());
Status s = RecursiveCreateDir(env, dir_name);
if (!s.ok()) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create directory ", dir_name,
", due to: ", s.message()));
}
const string file_path = io::JoinPath(dir_name, file_name);
string event_str;
event_proto.SerializeToString(&event_str);
std::unique_ptr<WritableFile> f = nullptr;
TF_CHECK_OK(env->NewWritableFile(file_path, &f));
f->Append(event_str).IgnoreError();
TF_CHECK_OK(f->Close());
return absl::OkStatus();
}
Status DebugFileIO::DumpTensorToEventFile(const DebugNodeKey& debug_node_key,
const Tensor& tensor,
const uint64 wall_time_us,
const string& file_path) {
std::vector<Event> events;
TF_RETURN_IF_ERROR(
WrapTensorAsEvents(debug_node_key, tensor, wall_time_us, 0, &events));
return DumpEventProtoToFile(events[0], string(io::Dirname(file_path)),
string(io::Basename(file_path)));
}
Status DebugFileIO::RecursiveCreateDir(Env* env, const string& dir) {
if (env->FileExists(dir).ok() && env->IsDirectory(dir).ok()) {
return absl::OkStatus();
}
string parent_dir(io::Dirname(dir));
if (!env->FileExists(parent_dir).ok()) {
Status s = RecursiveCreateDir(env, parent_dir);
if (!s.ok()) {
return Status(
absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create directory ", parent_dir));
}
} else if (env->FileExists(parent_dir).ok() &&
!env->IsDirectory(parent_dir).ok()) {
return Status(absl::StatusCode::kFailedPrecondition,
strings::StrCat("Failed to create directory ", parent_dir,
" because the path exists as a file "));
}
env->CreateDir(dir).IgnoreError();
if (env->FileExists(dir).ok() && env->IsDirectory(dir).ok()) {
return absl::OkStatus();
} else {
return Status(absl::StatusCode::kAborted,
strings::StrCat("Failed to create directory ", parent_dir));
}
}
const uint64 DebugFileIO::kDefaultGlobalDiskBytesLimit = 107374182400L;
uint64 DebugFileIO::global_disk_bytes_limit_ = 0;
uint64 DebugFileIO::disk_bytes_used_ = 0;
mutex DebugFileIO::bytes_mu_(LINKER_INITIALIZED);
bool DebugFileIO::requestDiskByteUsage(uint64 bytes) {
mutex_lock l(bytes_mu_);
if (global_disk_bytes_limit_ == 0) {
const char* env_tfdbg_disk_bytes_limit = getenv("TFDBG_DISK_BYTES_LIMIT");
if (env_tfdbg_disk_bytes_limit == nullptr ||
strlen(env_tfdbg_disk_bytes_limit) == 0) {
global_disk_bytes_limit_ = kDefaultGlobalDiskBytesLimit;
} else {
strings::safe_strtou64(string(env_tfdbg_disk_bytes_limit),
&global_disk_bytes_limit_);
}
}
if (bytes == 0) {
return true;
}
if (disk_bytes_used_ + bytes < global_disk_bytes_limit_) {
disk_bytes_used_ += bytes;
return true;
} else {
return false;
}
}
void DebugFileIO::resetDiskByteUsage() {
mutex_lock l(bytes_mu_);
disk_bytes_used_ = 0;
}
#ifndef PLATFORM_WINDOWS
DebugGrpcChannel::DebugGrpcChannel(const string& server_stream_addr)
: server_stream_addr_(server_stream_addr),
url_(strings::StrCat(DebugIO::kGrpcURLScheme, server_stream_addr)) {}
Status DebugGrpcChannel::Connect(const int64_t timeout_micros) {
::grpc::ChannelArguments args;
args.SetInt(GRPC_ARG_MAX_MESSAGE_LENGTH, std::numeric_limits<int32>::max());
args.SetInt(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, 1000);
channel_ = ::grpc::CreateCustomChannel(
server_stream_addr_, ::grpc::InsecureChannelCredentials(), args);
if (!channel_->WaitForConnected(
gpr_time_add(gpr_now(GPR_CLOCK_REALTIME),
gpr_time_from_micros(timeout_micros, GPR_TIMESPAN)))) {
return errors::FailedPrecondition(
"Failed to connect to gRPC channel at ", server_stream_addr_,
" within a timeout of ", timeout_micros / 1e6, " s.");
}
stub_ = grpc::EventListener::NewStub(channel_);
reader_writer_ = stub_->SendEvents(&ctx_);
return absl::OkStatus();
}
bool DebugGrpcChannel::WriteEvent(const Event& event) {
mutex_lock l(mu_);
return reader_writer_->Write(event);
}
bool DebugGrpcChannel::ReadEventReply(EventReply* event_reply) {
mutex_lock l(mu_);
return reader_writer_->Read(event_reply);
}
void DebugGrpcChannel::ReceiveAndProcessEventReplies(const size_t max_replies) {
EventReply event_reply;
size_t num_replies = 0;
while ((max_replies == 0 || ++num_replies <= max_replies) &&
ReadEventReply(&event_reply)) {
for (const EventReply::DebugOpStateChange& debug_op_state_change :
event_reply.debug_op_state_changes()) {
string watch_key = strings::StrCat(debug_op_state_change.node_name(), ":",
debug_op_state_change.output_slot(),
":", debug_op_state_change.debug_op());
DebugGrpcIO::SetDebugNodeKeyGrpcState(url_, watch_key,
debug_op_state_change.state());
}
}
}
Status DebugGrpcChannel::ReceiveServerRepliesAndClose() {
reader_writer_->WritesDone();
ReceiveAndProcessEventReplies(0);
if (reader_writer_->Finish().ok()) {
return absl::OkStatus();
} else {
return Status(absl::StatusCode::kFailedPrecondition,
"Failed to close debug GRPC stream.");
}
}
mutex DebugGrpcIO::streams_mu_(LINKER_INITIALIZED);
int64_t DebugGrpcIO::channel_connection_timeout_micros_ = 900 * 1000 * 1000;
const size_t DebugGrpcIO::kGrpcMessageSizeLimitBytes = 4000 * 1024;
const size_t DebugGrpcIO::kGrpcMaxVarintLengthSize = 6;
std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
DebugGrpcIO::GetStreamChannels() {
static std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
stream_channels =
new std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>();
return stream_channels;
}
Status DebugGrpcIO::SendTensorThroughGrpcStream(
const DebugNodeKey& debug_node_key, const Tensor& tensor,
const uint64 wall_time_us, const string& grpc_stream_url,
const bool gated) {
if (gated &&
!IsReadGateOpen(grpc_stream_url, debug_node_key.debug_node_name)) {
return absl::OkStatus();
} else {
std::vector<Event> events;
TF_RETURN_IF_ERROR(WrapTensorAsEvents(debug_node_key, tensor, wall_time_us,
kGrpcMessageSizeLimitBytes, &events));
for (const Event& event : events) {
TF_RETURN_IF_ERROR(
SendEventProtoThroughGrpcStream(event, grpc_stream_url));
}
if (IsWriteGateOpen(grpc_stream_url, debug_node_key.debug_node_name)) {
DebugGrpcChannel* debug_grpc_channel = nullptr;
TF_RETURN_IF_ERROR(
GetOrCreateDebugGrpcChannel(grpc_stream_url, &debug_grpc_channel));
debug_grpc_channel->ReceiveAndProcessEventReplies(1);
}
return absl::OkStatus();
}
}
Status DebugGrpcIO::ReceiveEventReplyProtoThroughGrpcStream(
EventReply* event_reply, const string& grpc_stream_url) {
DebugGrpcChannel* debug_grpc_channel = nullptr;
TF_RETURN_IF_ERROR(
GetOrCreateDebugGrpcChannel(grpc_stream_url, &debug_grpc_channel));
if (debug_grpc_channel->ReadEventReply(event_reply)) {
return absl::OkStatus();
} else {
return errors::Cancelled(strings::StrCat(
"Reading EventReply from stream URL ", grpc_stream_url, " failed."));
}
}
Status DebugGrpcIO::GetOrCreateDebugGrpcChannel(
const string& grpc_stream_url, DebugGrpcChannel** debug_grpc_channel) {
const string addr_with_path =
absl::StartsWith(grpc_stream_url, DebugIO::kGrpcURLScheme)
? grpc_stream_url.substr(strlen(DebugIO::kGrpcURLScheme))
: grpc_stream_url;
const string server_stream_addr =
addr_with_path.substr(0, addr_with_path.find('/'));
{
mutex_lock l(streams_mu_);
std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
stream_channels = GetStreamChannels();
if (stream_channels->find(grpc_stream_url) == stream_channels->end()) {
std::unique_ptr<DebugGrpcChannel> channel(
new DebugGrpcChannel(server_stream_addr));
TF_RETURN_IF_ERROR(channel->Connect(channel_connection_timeout_micros_));
stream_channels->insert(
std::make_pair(grpc_stream_url, std::move(channel)));
}
*debug_grpc_channel = (*stream_channels)[grpc_stream_url].get();
}
return absl::OkStatus();
}
Status DebugGrpcIO::SendEventProtoThroughGrpcStream(
const Event& event_proto, const string& grpc_stream_url,
const bool receive_reply) {
DebugGrpcChannel* debug_grpc_channel;
TF_RETURN_IF_ERROR(
GetOrCreateDebugGrpcChannel(grpc_stream_url, &debug_grpc_channel));
bool write_ok = debug_grpc_channel->WriteEvent(event_proto);
if (!write_ok) {
return errors::Cancelled(strings::StrCat("Write event to stream URL ",
grpc_stream_url, " failed."));
}
if (receive_reply) {
debug_grpc_channel->ReceiveAndProcessEventReplies(1);
}
return absl::OkStatus();
}
bool DebugGrpcIO::IsReadGateOpen(const string& grpc_debug_url,
const string& watch_key) {
const DebugNodeName2State* enabled_node_to_state =
GetEnabledDebugOpStatesAtUrl(grpc_debug_url);
return enabled_node_to_state->find(watch_key) != enabled_node_to_state->end();
}
bool DebugGrpcIO::IsWriteGateOpen(const string& grpc_debug_url,
const string& watch_key) {
const DebugNodeName2State* enabled_node_to_state =
GetEnabledDebugOpStatesAtUrl(grpc_debug_url);
auto it = enabled_node_to_state->find(watch_key);
if (it == enabled_node_to_state->end()) {
return false;
} else {
return it->second == EventReply::DebugOpStateChange::READ_WRITE;
}
}
Status DebugGrpcIO::CloseGrpcStream(const string& grpc_stream_url) {
mutex_lock l(streams_mu_);
std::unordered_map<string, std::unique_ptr<DebugGrpcChannel>>*
stream_channels = GetStreamChannels();
if (stream_channels->find(grpc_stream_url) != stream_channels->end()) {
Status s =
(*stream_channels)[grpc_stream_url]->ReceiveServerRepliesAndClose();
(*stream_channels).erase(grpc_stream_url);
return s;
} else {
return absl::OkStatus();
}
}
std::unordered_map<string, DebugGrpcIO::DebugNodeName2State>*
DebugGrpcIO::GetEnabledDebugOpStates() {
static std::unordered_map<string, DebugNodeName2State>*
enabled_debug_op_states =
new std::unordered_map<string, DebugNodeName2State>();
return enabled_debug_op_states;
}
DebugGrpcIO::DebugNodeName2State* DebugGrpcIO::GetEnabledDebugOpStatesAtUrl(
const string& grpc_debug_url) {
static mutex* debug_ops_state_mu = new mutex();
std::unordered_map<string, DebugNodeName2State>* states =
GetEnabledDebugOpStates();
mutex_lock l(*debug_ops_state_mu);
if (states->find(grpc_debug_url) == states->end()) {
DebugNodeName2State url_enabled_debug_op_states;
(*states)[grpc_debug_url] = url_enabled_debug_op_states;
}
return &(*states)[grpc_debug_url];
}
void DebugGrpcIO::SetDebugNodeKeyGrpcState(
const string& grpc_debug_url, const string& watch_key,
const EventReply::DebugOpStateChange::State new_state) {
DebugNodeName2State* states = GetEnabledDebugOpStatesAtUrl(grpc_debug_url);
if (new_state == EventReply::DebugOpStateChange::DISABLED) {
if (states->find(watch_key) == states->end()) {
LOG(ERROR) << "Attempt to disable a watch key that is not currently "
<< "enabled at " << grpc_debug_url << ": " << watch_key;
} else {
states->erase(watch_key);
}
} else if (new_state != EventReply::DebugOpStateChange::STATE_UNSPECIFIED) {
(*states)[watch_key] = new_state;
}
}
void DebugGrpcIO::ClearEnabledWatchKeys() {
GetEnabledDebugOpStates()->clear();
}
#endif
} | #include "tensorflow/core/debug/debug_io_utils.h"
#include <cstdlib>
#include <memory>
#include <unordered_set>
#include "tensorflow/core/debug/debug_callback_registry.h"
#include "tensorflow/core/debug/debug_node_key.h"
#include "tensorflow/core/debug/debugger_event_metadata.pb.h"
#include "tensorflow/core/framework/summary.pb.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/notification.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/str_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/util/event.pb.h"
namespace tensorflow {
namespace {
class DebugIOUtilsTest : public ::testing::Test {
public:
void Initialize() {
env_ = Env::Default();
tensor_a_ = std::make_unique<Tensor>(DT_FLOAT, TensorShape({2, 2}));
tensor_a_->flat<float>()(0) = 5.0;
tensor_a_->flat<float>()(1) = 3.0;
tensor_a_->flat<float>()(2) = -1.0;
tensor_a_->flat<float>()(3) = 0.0;
tensor_b_.reset(new Tensor(DT_STRING, TensorShape{2}));
tensor_b_->flat<tstring>()(0) = "corge";
tensor_b_->flat<tstring>()(1) = "garply";
}
Env* env_;
std::unique_ptr<Tensor> tensor_a_;
std::unique_ptr<Tensor> tensor_b_;
};
TEST_F(DebugIOUtilsTest, ConstructDebugNodeKey) {
DebugNodeKey debug_node_key("/job:worker/replica:1/task:0/device:GPU:2",
"hidden_1/MatMul", 0, "DebugIdentity");
EXPECT_EQ("/job:worker/replica:1/task:0/device:GPU:2",
debug_node_key.device_name);
EXPECT_EQ("hidden_1/MatMul", debug_node_key.node_name);
EXPECT_EQ(0, debug_node_key.output_slot);
EXPECT_EQ("DebugIdentity", debug_node_key.debug_op);
EXPECT_EQ("hidden_1/MatMul:0:DebugIdentity", debug_node_key.debug_node_name);
EXPECT_EQ("_tfdbg_device_,job_worker,replica_1,task_0,device_GPU_2",
debug_node_key.device_path);
}
TEST_F(DebugIOUtilsTest, EqualityOfDebugNodeKeys) {
const DebugNodeKey debug_node_key_1("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_2("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_3("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/BiasAdd", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_4("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0,
"DebugNumericSummary");
EXPECT_EQ(debug_node_key_1, debug_node_key_2);
EXPECT_NE(debug_node_key_1, debug_node_key_3);
EXPECT_NE(debug_node_key_1, debug_node_key_4);
EXPECT_NE(debug_node_key_3, debug_node_key_4);
}
TEST_F(DebugIOUtilsTest, DebugNodeKeysIsHashable) {
const DebugNodeKey debug_node_key_1("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_2("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/MatMul", 0, "DebugIdentity");
const DebugNodeKey debug_node_key_3("/job:worker/replica:1/task:0/gpu:2",
"hidden_1/BiasAdd", 0, "DebugIdentity");
std::unordered_set<DebugNodeKey> keys;
keys.insert(debug_node_key_1);
ASSERT_EQ(1, keys.size());
keys.insert(debug_node_key_3);
ASSERT_EQ(2, keys.size());
keys.erase(debug_node_key_2);
ASSERT_EQ(1, keys.size());
}
TEST_F(DebugIOUtilsTest, DumpFloatTensorToFileSunnyDay) {
Initialize();
const string test_dir =
strings::StrCat(testing::TmpDir(), "/DumpFloatTensorToFileSunnyDay");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const uint64 wall_time = env_->NowMicros();
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
string dump_file_path;
TF_ASSERT_OK(DebugFileIO::DumpTensorToDir(
kDebugNodeKey, *tensor_a_, wall_time, test_dir, &dump_file_path));
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_path, &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, DumpStringTensorToFileSunnyDay) {
Initialize();
const string test_dir =
strings::StrCat(testing::TmpDir(), "/DumpStringTensorToFileSunnyDay");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"quux/grault/tensor_b", 1, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
string dump_file_name;
Status s = DebugFileIO::DumpTensorToDir(kDebugNodeKey, *tensor_b_, wall_time,
test_dir, &dump_file_name);
ASSERT_TRUE(s.ok());
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_name, &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(), &metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor b_prime(DT_STRING);
ASSERT_TRUE(b_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_b_->shape(), b_prime.shape());
for (int i = 0; i < b_prime.flat<tstring>().size(); ++i) {
ASSERT_EQ(tensor_b_->flat<tstring>()(i), b_prime.flat<tstring>()(i));
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, DumpTensorToFileCannotCreateDirectory) {
Initialize();
const string test_dir = strings::StrCat(
testing::TmpDir(), "/DumpTensorToFileCannotCreateDirectory");
if (!env_->FileExists(test_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(test_dir).ok());
}
const string kDeviceName = "/job:localhost/replica:0/task:0/cpu:0";
const DebugNodeKey kDebugNodeKey(kDeviceName, "baz/tensor_a", 0,
"DebugIdentity");
const string txt_file_dir =
io::JoinPath(test_dir, DebugNodeKey::DeviceNameToDevicePath(kDeviceName));
const string txt_file_name = io::JoinPath(txt_file_dir, "baz");
if (!env_->FileExists(txt_file_dir).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(txt_file_dir).ok());
}
ASSERT_EQ(error::Code::NOT_FOUND, env_->FileExists(txt_file_name).code());
std::unique_ptr<WritableFile> file;
ASSERT_TRUE(env_->NewWritableFile(txt_file_name, &file).ok());
TF_EXPECT_OK(file->Append("text in baz"));
TF_EXPECT_OK(file->Flush());
TF_ASSERT_OK(file->Close());
ASSERT_TRUE(env_->FileExists(txt_file_name).ok());
ASSERT_FALSE(env_->IsDirectory(txt_file_name).ok());
const uint64 wall_time = env_->NowMicros();
string dump_file_name;
Status s = DebugFileIO::DumpTensorToDir(kDebugNodeKey, *tensor_a_, wall_time,
test_dir, &dump_file_name);
ASSERT_FALSE(s.ok());
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(
env_->DeleteRecursively(test_dir, &undeleted_files, &undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
TEST_F(DebugIOUtilsTest, PublishTensorToMultipleFileURLs) {
Initialize();
const int kNumDumpRoots = 3;
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
std::vector<string> dump_roots;
std::vector<string> dump_file_paths;
std::vector<string> urls;
for (int i = 0; i < kNumDumpRoots; ++i) {
string dump_root = strings::StrCat(testing::TmpDir(),
"/PublicTensorToMultipleFileUrls_", i);
dump_roots.push_back(dump_root);
dump_file_paths.push_back(
DebugFileIO::GetDumpFilePath(dump_root, kDebugNodeKey, wall_time));
urls.push_back(strings::StrCat("file:
}
for (int i = 1; i < kNumDumpRoots; ++i) {
ASSERT_NE(dump_roots[0], dump_roots[i]);
}
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
for (int i = 0; i < kNumDumpRoots; ++i) {
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_paths[i], &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(), &metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
}
for (int i = 0; i < kNumDumpRoots; ++i) {
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
ASSERT_TRUE(env_->DeleteRecursively(dump_roots[i], &undeleted_files,
&undeleted_dirs)
.ok());
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
TEST_F(DebugIOUtilsTest, PublishTensorToMemoryCallback) {
Initialize();
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"foo/bar/qux/tensor_a", 0, "DebugIdentity");
const uint64 wall_time = env_->NowMicros();
bool called = false;
std::vector<string> urls = {"memcbk:
;
auto* callback_registry = DebugCallbackRegistry::singleton();
callback_registry->RegisterCallback(
"test_callback", [this, &kDebugNodeKey, &called](const DebugNodeKey& key,
const Tensor& tensor) {
called = true;
ASSERT_EQ(kDebugNodeKey.device_name, key.device_name);
ASSERT_EQ(kDebugNodeKey.node_name, key.node_name);
ASSERT_EQ(tensor_a_->shape(), tensor.shape());
for (int i = 0; i < tensor.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), tensor.flat<float>()(i));
}
});
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
ASSERT_TRUE(called);
callback_registry->UnregisterCallback("test_callback");
}
TEST_F(DebugIOUtilsTest, PublishTensorConcurrentlyToPartiallyOverlappingPaths) {
Initialize();
const int kConcurrentPubs = 3;
const DebugNodeKey kDebugNodeKey("/job:localhost/replica:0/task:0/cpu:0",
"tensor_a", 0, "DebugIdentity");
thread::ThreadPool* tp =
new thread::ThreadPool(Env::Default(), "test", kConcurrentPubs);
const uint64 wall_time = env_->NowMicros();
const string dump_root_base =
strings::StrCat(testing::TmpDir(),
"/PublishTensorConcurrentlyToPartiallyOverlappingPaths");
if (!env_->FileExists(dump_root_base).ok()) {
ASSERT_TRUE(env_->RecursivelyCreateDir(dump_root_base).ok());
}
mutex mu;
std::vector<string> dump_roots TF_GUARDED_BY(mu);
std::vector<string> dump_file_paths TF_GUARDED_BY(mu);
int dump_count TF_GUARDED_BY(mu) = 0;
int done_count TF_GUARDED_BY(mu) = 0;
Notification all_done;
auto fn = [this, &dump_count, &done_count, &mu, &dump_root_base, &dump_roots,
&dump_file_paths, &wall_time, &kDebugNodeKey, &kConcurrentPubs,
&all_done]() {
string dump_root;
string debug_url;
{
mutex_lock l(mu);
dump_root =
strings::StrCat(dump_root_base, "grumpy/", "dump_", dump_count++);
dump_roots.push_back(dump_root);
dump_file_paths.push_back(
DebugFileIO::GetDumpFilePath(dump_root, kDebugNodeKey, wall_time));
debug_url = strings::StrCat("file:
}
std::vector<string> urls;
urls.push_back(debug_url);
Status s =
DebugIO::PublishDebugTensor(kDebugNodeKey, *tensor_a_, wall_time, urls);
ASSERT_TRUE(s.ok());
{
mutex_lock l(mu);
done_count++;
if (done_count == kConcurrentPubs) {
all_done.Notify();
}
}
};
for (int i = 0; i < kConcurrentPubs; ++i) {
tp->Schedule(fn);
}
all_done.WaitForNotification();
delete tp;
{
mutex_lock l(mu);
for (int i = 1; i < kConcurrentPubs; ++i) {
ASSERT_NE(dump_roots[0], dump_roots[i]);
}
for (int i = 0; i < kConcurrentPubs; ++i) {
Event event;
TF_ASSERT_OK(ReadEventFromFile(dump_file_paths[i], &event));
ASSERT_GE(wall_time, event.wall_time());
ASSERT_EQ(1, event.summary().value().size());
ASSERT_EQ(kDebugNodeKey.node_name, event.summary().value(0).tag());
ASSERT_EQ(kDebugNodeKey.debug_node_name,
event.summary().value(0).node_name());
third_party::tensorflow::core::debug::DebuggerEventMetadata metadata;
auto status = tensorflow::protobuf::util::JsonStringToMessage(
event.summary().value(0).metadata().plugin_data().content(),
&metadata);
ASSERT_TRUE(status.ok());
ASSERT_EQ(kDebugNodeKey.device_name, metadata.device());
ASSERT_EQ(kDebugNodeKey.output_slot, metadata.output_slot());
Tensor a_prime(DT_FLOAT);
ASSERT_TRUE(a_prime.FromProto(event.summary().value(0).tensor()));
ASSERT_EQ(tensor_a_->shape(), a_prime.shape());
for (int i = 0; i < a_prime.flat<float>().size(); ++i) {
ASSERT_EQ(tensor_a_->flat<float>()(i), a_prime.flat<float>()(i));
}
}
int64_t undeleted_files = 0;
int64_t undeleted_dirs = 0;
auto delete_files = env_->DeleteRecursively(
dump_root_base, &undeleted_files, &undeleted_dirs);
ASSERT_TRUE(delete_files.ok()) << delete_files;
ASSERT_EQ(0, undeleted_files);
ASSERT_EQ(0, undeleted_dirs);
}
}
class DiskUsageLimitTest : public ::testing::Test {
public:
void Initialize() {
setenv("TFDBG_DISK_BYTES_LIMIT", "", 1);
DebugFileIO::resetDiskByteUsage();
DebugFileIO::global_disk_bytes_limit_ = 0;
}
};
TEST_F(DiskUsageLimitTest, RequestWithZeroByteIsOkay) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(0L));
}
TEST_F(DiskUsageLimitTest, ExceedingLimitAfterOneCall) {
Initialize();
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(100L * 1024L * 1024L * 1024L));
}
TEST_F(DiskUsageLimitTest, ExceedingLimitAfterTwoCalls) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1024L));
}
TEST_F(DiskUsageLimitTest, ResetDiskByteUsageWorks) {
Initialize();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
DebugFileIO::resetDiskByteUsage();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(50L * 1024L * 1024L * 1024L));
}
TEST_F(DiskUsageLimitTest, CustomEnvVarIsObeyed) {
Initialize();
setenv("TFDBG_DISK_BYTES_LIMIT", "1024", 1);
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(1024L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1000L));
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(23L));
ASSERT_FALSE(DebugFileIO::requestDiskByteUsage(1L));
DebugFileIO::resetDiskByteUsage();
ASSERT_TRUE(DebugFileIO::requestDiskByteUsage(1023L));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/debug/debug_io_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/debug/debug_io_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bef8cc3a-91b3-489e-bded-4866f414d7d9 | cpp | google/arolla | status_macros_backport | arolla/util/status_macros_backport.h | arolla/util/status_macros_backport_test.cc | #ifndef AROLLA_UTIL_STATUS_MACROS_BACKPORT_H_
#define AROLLA_UTIL_STATUS_MACROS_BACKPORT_H_
#include <sstream>
#include <string>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace arolla {
namespace status_macros_backport_internal {
inline absl::string_view GetStatusMessage(const absl::Status& status) {
return status.message();
}
template <typename T>
inline absl::string_view GetStatusMessage(const absl::StatusOr<T>& status_or) {
return status_or.status().message();
}
class StatusBuilder {
public:
explicit StatusBuilder(const ::absl::Status& status) : status_(status) {}
explicit StatusBuilder(::absl::Status&& status)
: status_(std::move(status)) {}
operator ::absl::Status() const {
const auto& stream_msg = stream_.str();
if (stream_msg.empty()) {
return status_;
}
::absl::Status result;
if (status_.message().empty()) {
result = absl::Status(status_.code(), stream_msg);
} else {
result = absl::Status(status_.code(),
absl::StrCat(status_.message(), "; ", stream_msg));
}
status_.ForEachPayload([&](auto type_url, auto payload) {
result.SetPayload(std::move(type_url), std::move(payload));
});
return result;
}
template <typename Adaptor>
auto With(Adaptor&& adaptor) {
return std::forward<Adaptor>(adaptor)(std::move(*this));
}
template <typename T>
StatusBuilder& operator<<(const T& extra_msg) & {
stream_ << extra_msg;
return *this;
}
template <typename T>
StatusBuilder&& operator<<(const T& extra_msg) && {
stream_ << extra_msg;
return std::move(*this);
}
private:
::absl::Status status_;
std::ostringstream stream_;
};
}
#define RETURN_IF_ERROR(expr) \
RETURN_IF_ERROR_IMPL(AROLLA_STATUS_IMPL_CONCAT(status, __COUNTER__), expr)
#define RETURN_IF_ERROR_IMPL(_status, expr) \
if (auto _status = (expr); _status.ok()) { \
} else \
return ::arolla::status_macros_backport_internal::StatusBuilder(_status)
#define ASSIGN_OR_RETURN(...) \
AROLLA_STATUS_IMPL_GET_VARIADIC( \
(__VA_ARGS__, ASSIGN_OR_RETURN_IMPL_3, ASSIGN_OR_RETURN_IMPL_2)) \
(__VA_ARGS__)
#define ASSIGN_OR_RETURN_IMPL_2(lhs, rexpr) \
ASSIGN_OR_RETURN_IMPL_3(lhs, rexpr, _)
#define ASSIGN_OR_RETURN_IMPL_3(lhs, rexpr, error_expression) \
ASSIGN_OR_RETURN_IMPL(AROLLA_STATUS_IMPL_CONCAT(statusor, __COUNTER__), lhs, \
rexpr, error_expression)
#define ASSIGN_OR_RETURN_IMPL(_statusor, lhs, rexpr, error_expression) \
auto _statusor = (rexpr); \
if (ABSL_PREDICT_FALSE(!_statusor.ok())) { \
::arolla::status_macros_backport_internal::StatusBuilder _( \
std::move(_statusor).status()); \
(void)_; \
return (error_expression); \
} \
AROLLA_STATUS_IMPL_UNPARENTHESIZE_IF_PARENTHESIZED(lhs) = \
(*std::move(_statusor))
#define EXPECT_OK(value) \
EXPECT_TRUE((value).ok()) \
<< ::arolla::status_macros_backport_internal::GetStatusMessage(value)
#define ASSERT_OK(value) \
ASSERT_TRUE((value).ok()) \
<< ::arolla::status_macros_backport_internal::GetStatusMessage(value)
#define ASSERT_OK_AND_ASSIGN(lhs, rexpr) \
ASSERT_OK_AND_ASSIGN_IMPL(AROLLA_STATUS_IMPL_CONCAT(statusor, __COUNTER__), \
lhs, rexpr)
#define ASSERT_OK_AND_ASSIGN_IMPL(_statusor, lhs, rexpr) \
auto _statusor = (rexpr); \
ASSERT_OK(_statusor); \
AROLLA_STATUS_IMPL_UNPARENTHESIZE_IF_PARENTHESIZED(lhs) = \
(*std::move(_statusor));
#define AROLLA_STATUS_IMPL_CONCAT_INNER(a, b) a##b
#define AROLLA_STATUS_IMPL_CONCAT(a, b) AROLLA_STATUS_IMPL_CONCAT_INNER(a, b)
#define AROLLA_STATUS_IMPL_GET_VARIADIC_INNER(_1, _2, _3, NAME, ...) NAME
#define AROLLA_STATUS_IMPL_GET_VARIADIC(args) \
AROLLA_STATUS_IMPL_GET_VARIADIC_INNER args
#define AROLLA_STATUS_IMPL_EAT(...)
#define AROLLA_STATUS_IMPL_REM(...) __VA_ARGS__
#define AROLLA_STATUS_IMPL_EMPTY()
#define AROLLA_STATUS_IMPL_IS_EMPTY_INNER(...) \
AROLLA_STATUS_IMPL_IS_EMPTY_INNER_HELPER((__VA_ARGS__, 0, 1))
#define AROLLA_STATUS_IMPL_IS_EMPTY_INNER_HELPER(args) \
AROLLA_STATUS_IMPL_IS_EMPTY_INNER_I args
#define AROLLA_STATUS_IMPL_IS_EMPTY_INNER_I(e0, e1, is_empty, ...) is_empty
#define AROLLA_STATUS_IMPL_IS_EMPTY(...) \
AROLLA_STATUS_IMPL_IS_EMPTY_I(__VA_ARGS__)
#define AROLLA_STATUS_IMPL_IS_EMPTY_I(...) \
AROLLA_STATUS_IMPL_IS_EMPTY_INNER(_, ##__VA_ARGS__)
#define AROLLA_STATUS_IMPL_IF_1(_Then, _Else) _Then
#define AROLLA_STATUS_IMPL_IF_0(_Then, _Else) _Else
#define AROLLA_STATUS_IMPL_IF(_Cond, _Then, _Else) \
AROLLA_STATUS_IMPL_CONCAT(AROLLA_STATUS_IMPL_IF_, _Cond)(_Then, _Else)
#define AROLLA_STATUS_IMPL_IS_PARENTHESIZED(...) \
AROLLA_STATUS_IMPL_IS_EMPTY(AROLLA_STATUS_IMPL_EAT __VA_ARGS__)
#define AROLLA_STATUS_IMPL_UNPARENTHESIZE_IF_PARENTHESIZED(...) \
AROLLA_STATUS_IMPL_IF(AROLLA_STATUS_IMPL_IS_PARENTHESIZED(__VA_ARGS__), \
AROLLA_STATUS_IMPL_REM, AROLLA_STATUS_IMPL_EMPTY()) \
__VA_ARGS__
}
#endif | #include "arolla/util/status_macros_backport.h"
#include <tuple>
#include "gtest/gtest-spi.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "arolla/util/status_macros_backport.h"
namespace {
#define INTERNAL_ASSERT_EQ(lhs, rhs) \
[](auto lhs_, auto rhs_) { \
if (lhs_ != rhs_) { \
LOG(FATAL) << "assertion " #lhs " == " #rhs " failed: " << lhs_ \
<< " != " << rhs_; \
} \
}(lhs, rhs)
template <typename T>
absl::StatusOr<T> ReturnStatusOrValue(T v) {
return v;
}
absl::StatusOr<int> ReturnStatusOrError(absl::string_view msg) {
return absl::Status(absl::StatusCode::kUnknown, msg);
}
absl::Status ReturnError(absl::string_view msg) {
return absl::Status(absl::StatusCode::kUnknown, msg);
}
absl::Status ReturnOk() { return absl::Status(); }
TEST(ExternalStatusTest, ReturnIfError) {
auto func = []() -> absl::StatusOr<int> {
RETURN_IF_ERROR(ReturnOk()) << "UNEXPECTED";
RETURN_IF_ERROR(ReturnError("EXPECTED")) << "ALSO " << "EXPECTED";
return 5;
};
ASSERT_EQ(func().status().message(), "EXPECTED; ALSO EXPECTED");
}
TEST(ExternalStatusTest, ReturnIfErrorAnnotateEmpty) {
auto err = [] { return absl::InvalidArgumentError(""); };
auto func = [&]() -> absl::Status {
RETURN_IF_ERROR(err()) << "suffix";
return absl::OkStatus();
};
ASSERT_EQ(func().message(), "suffix");
}
TEST(ExternalStatusTest, ReturnIfErrorPayload) {
auto err = [] {
auto status = absl::InvalidArgumentError("message");
status.SetPayload("url", absl::Cord("payload"));
return status;
};
auto func = [&]() -> absl::Status {
RETURN_IF_ERROR(err()) << "suffix";
return absl::OkStatus();
};
ASSERT_EQ(func().message(), "message; suffix");
ASSERT_EQ(func().GetPayload("url"), "payload");
}
TEST(ExternalStatusTest, AssignOrReturn) {
auto func = []() -> absl::StatusOr<int> {
ASSIGN_OR_RETURN(int value1, ReturnStatusOrValue(1));
INTERNAL_ASSERT_EQ(1, value1);
ASSIGN_OR_RETURN(const int value2, ReturnStatusOrValue(2));
INTERNAL_ASSERT_EQ(2, value2);
ASSIGN_OR_RETURN(const int& value3, ReturnStatusOrValue(3));
INTERNAL_ASSERT_EQ(3, value3);
ASSIGN_OR_RETURN((const auto& [tuple1, tuple2]),
ReturnStatusOrValue(std::make_tuple(1, 2)));
INTERNAL_ASSERT_EQ(1, tuple1);
INTERNAL_ASSERT_EQ(2, tuple2);
ASSIGN_OR_RETURN(int value4, ReturnStatusOrError("EXPECTED"));
return value4;
};
ASSERT_EQ(func().status().message(), "EXPECTED");
}
TEST(ExternalStatusTest, AssignOrReturn3) {
auto func1 = []() -> absl::StatusOr<int> {
ASSIGN_OR_RETURN(int value1, ReturnStatusOrValue(1), _ << "NOT EXPECTED");
INTERNAL_ASSERT_EQ(1, value1);
ASSIGN_OR_RETURN((const auto& [tuple1, tuple2]),
ReturnStatusOrValue(std::make_tuple(1, 2)),
_ << "NOT EXPECTED");
INTERNAL_ASSERT_EQ(1, tuple1);
INTERNAL_ASSERT_EQ(2, tuple2);
ASSIGN_OR_RETURN(int value2, ReturnStatusOrError("EXPECTED"),
_ << "ALSO " << "EXPECTED");
return value2;
};
ASSERT_EQ(func1().status().message(), "EXPECTED; ALSO EXPECTED");
auto func2 = []() -> void {
ASSIGN_OR_RETURN(int value, absl::StatusOr<int>(5), (void)_);
INTERNAL_ASSERT_EQ(value, 5);
};
func2();
}
TEST(ExternalStatusTest, AssignOrReturnAnnotateEmpty) {
auto err = [] { return absl::StatusOr<int>(absl::InvalidArgumentError("")); };
auto func = [&]() -> absl::StatusOr<int> {
ASSIGN_OR_RETURN(auto result, err(), _ << "suffix");
return result;
};
ASSERT_EQ(func().status().message(), "suffix");
}
TEST(ExternalStatusTest, AssignOrReturn3Payload) {
auto err = [] {
auto status = absl::InvalidArgumentError("message");
status.SetPayload("url", absl::Cord("payload"));
return absl::StatusOr<int>(status);
};
auto func = [&]() -> absl::StatusOr<int> {
ASSIGN_OR_RETURN(auto result, err(), _ << "suffix");
return result;
};
ASSERT_EQ(func().status().message(), "message; suffix");
ASSERT_EQ(func().status().GetPayload("url"), "payload");
}
TEST(ExternalStatusTest, AssertOkAndAssign) {
ASSERT_OK_AND_ASSIGN(auto value, ReturnStatusOrValue(1));
ASSERT_EQ(1, value);
ASSERT_OK_AND_ASSIGN((const auto& [tuple1, tuple2]),
ReturnStatusOrValue(std::make_tuple(1, 2)));
ASSERT_EQ(1, tuple1);
ASSERT_EQ(2, tuple2);
EXPECT_FATAL_FAILURE(
[]() {
ASSERT_OK_AND_ASSIGN(auto x, ReturnStatusOrError("Expected error"));
(void)x;
}(),
"Expected error");
}
TEST(ExternalStatusTest, AssertOk) {
ASSERT_OK(ReturnOk());
ASSERT_OK(ReturnStatusOrValue(1));
EXPECT_FATAL_FAILURE(ASSERT_OK(ReturnStatusOrError("Expected error")),
"Expected error");
}
TEST(ExternalStatusTest, ExpectOk) {
EXPECT_OK(ReturnOk());
EXPECT_OK(ReturnStatusOrValue(1));
EXPECT_NONFATAL_FAILURE(EXPECT_OK(ReturnStatusOrError("Expected error")),
"Expected error");
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/status_macros_backport.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/status_macros_backport_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0b41c07f-45b3-4650-abe4-781be34d4dd6 | cpp | tensorflow/tensorflow | tf_mlir_translate_registration | tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc | tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration_test.cc | #include <memory>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Tools/mlir-translate/Translation.h"
#include "tensorflow/compiler/mlir/tensorflow/dialect_registration.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_cl.h"
#include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "xla/client/client_library.h"
#include "xla/client/compile_only_client.h"
#include "xla/stream_executor/host/host_platform_id.h"
#include "xla/stream_executor/platform_manager.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tsl/platform/protobuf.h"
namespace mlir {
using tsl::Status;
using tsl::StatusOr;
static constexpr char kMlirToGraphCompilationCheckName[] =
"mlir-to-graph-compilation-check";
static constexpr char kArbitraryDeviceName[] = "XLA_CPU_JIT";
namespace {
inline absl::string_view StringRefToView(llvm::StringRef ref) {
return {ref.data(), ref.size()};
}
}
static OwningOpRef<mlir::ModuleOp> GraphdefToMlirTranslateFunction(
llvm::StringRef input, MLIRContext* context) {
tensorflow::GraphdefToMlirOptions options{
debug_info_file, xla_compile_device_type,
prune_unused_nodes, convert_legacy_fed_inputs,
graph_as_function, upgrade_legacy,
enable_shape_inference, unconditionally_use_set_output_shapes,
enable_soft_placement, set_original_tf_func_name};
auto module_or = tensorflow::GraphdefToMlirTranslateFunction(
input, input_arrays, input_dtypes, input_shapes, output_arrays,
control_output_arrays, options, context);
if (!module_or.status().ok()) return nullptr;
return std::move(module_or).value();
}
static TranslateToMLIRRegistration GraphdefToMlirTranslate(
"graphdef-to-mlir", "graphdef-to-mlir", GraphdefToMlirTranslateFunction);
static OwningOpRef<mlir::ModuleOp> GraphdefToSplattedMlirTranslateFunction(
llvm::StringRef input, MLIRContext* context) {
tensorflow::GraphdefToMlirOptions options{
debug_info_file, xla_compile_device_type,
prune_unused_nodes, convert_legacy_fed_inputs,
graph_as_function, upgrade_legacy,
enable_shape_inference, unconditionally_use_set_output_shapes};
auto module_or = tensorflow::GraphdefToSplattedMlirTranslateFunction(
input, input_arrays, input_dtypes, input_shapes, output_arrays,
control_output_arrays, options, context);
if (!module_or.status().ok()) return nullptr;
return std::move(module_or).value();
}
static TranslateToMLIRRegistration GraphdefToSplattedMlirTranslate(
"graphdef-to-splatted-mlir", "graphdef-to-splatted-mlir",
GraphdefToSplattedMlirTranslateFunction);
static Status CompileGraph(tensorflow::Graph* graph,
xla::CompileOnlyClient* client) {
if (!graph || !client) {
return Status(absl::StatusCode::kInvalidArgument,
"Invalid graph or client");
}
tensorflow::FunctionDefLibrary flib;
auto flib_def = std::make_unique<tensorflow::FunctionLibraryDefinition>(
tensorflow::OpRegistry::Global(), flib);
tensorflow::XlaCompiler::Options options;
options.device_type = tensorflow::DeviceType(kArbitraryDeviceName);
options.client = client;
options.flib_def = flib_def.get();
tensorflow::XlaCompiler compiler(options);
std::unique_ptr<tensorflow::Graph> graph_copy(
new tensorflow::Graph(tensorflow::OpRegistry::Global()));
tensorflow::CopyGraph(*graph, graph_copy.get());
tensorflow::XlaCompiler::CompileOptions compile_options;
tensorflow::XlaCompiler::CompilationResult result;
return compiler.CompileGraph(compile_options,
kMlirToGraphCompilationCheckName,
std::move(graph_copy), {}, &result);
}
static LogicalResult MlirToGraphTranslateFunction(ModuleOp module,
llvm::raw_ostream& output) {
if (!module) return failure();
tensorflow::GraphExportConfig confs;
confs.export_entry_func_to_flib = export_entry_func_to_flib;
confs.export_original_tf_func_name = export_original_tf_func_name;
std::unique_ptr<tensorflow::FunctionLibraryDefinition> flib_def;
auto graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
absl::flat_hash_set<tensorflow::Node*> control_ret_nodes;
auto status = tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
module, confs, &graph, flib_def.get(), &control_ret_nodes);
if (!status.ok()) {
LOG(ERROR) << "Export to Graph failed: " << status;
return mlir::failure();
}
auto platform = stream_executor::PlatformManager::PlatformWithId(
stream_executor::host::kHostPlatformId);
auto client =
xla::ClientLibrary::GetOrCreateCompileOnlyClient(platform.value());
tensorflow::XlaOpRegistry::RegisterCompilationKernels();
if (!CompileGraph(graph.get(), client.value()).ok()) {
return mlir::failure();
}
auto graphdef = std::make_unique<tensorflow::GraphDef>();
graph->ToGraphDef(graphdef.get());
output << tsl::LegacyUnredactedDebugString(*graphdef);
return success();
}
static TranslateFromMLIRRegistration mlir_to_graph_translate(
"mlir-to-graph", "convert mlir to graph",
MlirToGraphTranslateFunction, [](DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
});
static LogicalResult MlirToGraphdefTranslateFunction(
ModuleOp module, llvm::raw_ostream& output) {
if (!module) return failure();
tensorflow::GraphExportConfig confs;
confs.export_entry_func_to_flib = export_entry_func_to_flib;
confs.export_original_tf_func_name = export_original_tf_func_name;
tensorflow::FunctionLibraryDefinition flib_def(
tensorflow::OpRegistry::Global(), tensorflow::FunctionDefLibrary());
auto graph =
std::make_unique<tensorflow::Graph>(tensorflow::OpRegistry::Global());
absl::flat_hash_set<tensorflow::Node*> control_ret_nodes;
auto status = tensorflow::tf2xla::v2::ConvertTfExecutorToGraph(
module, confs, &graph, &flib_def, &control_ret_nodes);
if (!status.ok()) {
LOG(ERROR) << "Export to Graph failed: " << status;
return mlir::failure();
}
tensorflow::GraphDef graphdef;
graph->ToGraphDef(&graphdef);
output << tsl::LegacyUnredactedDebugString(graphdef);
return success();
}
static TranslateFromMLIRRegistration mlir_to_graphdef_translate(
"mlir-to-graphdef", "mlir-to-graphdef", MlirToGraphdefTranslateFunction,
[](DialectRegistry& registry) {
mlir::RegisterAllTensorFlowDialects(registry);
});
} | #include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/match.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Tools/mlir-translate/Translation.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace {
class MlirTranslationTest : public ::testing::Test {
private:
static constexpr char kMlirToGraphFlag[] = "-mlir-to-graph";
public:
MlirTranslationTest() : translation_(RegisterTranslation()) {
std::vector<const char*> argv = {""};
argv.push_back(kMlirToGraphFlag);
llvm::cl::ParseCommandLineOptions(argv.size(), &argv[0],
"TF MLIR translation test\n");
}
LogicalResult Translate(StringRef source, std::string& sink) {
auto source_manager = std::make_shared<llvm::SourceMgr>();
auto source_buffer = llvm::MemoryBuffer::getMemBuffer(source);
source_manager->AddNewSourceBuffer(std::move(source_buffer), llvm::SMLoc());
mlir::MLIRContext context;
llvm::raw_string_ostream os(sink);
return (**translation_)(source_manager, os, &context);
}
private:
llvm::cl::opt<const mlir::Translation*, false, mlir::TranslationParser>*
RegisterTranslation() {
static const auto requested_translation =
new llvm::cl::opt<const mlir::Translation*, false,
mlir::TranslationParser>(
llvm::cl::desc("Translation to perform"));
return requested_translation;
}
llvm::cl::opt<const mlir::Translation*, false, mlir::TranslationParser>*
translation_;
};
TEST_F(MlirTranslationTest, TranslatesMlirToGraph) {
static constexpr char kMlirSource[] = R"(
func.func @main() -> (tensor<1x2xf16>, tensor<2xf16>) {
%graph:2 = tf_executor.graph {
%0:2 = tf_executor.island wraps "tf.Const"() {device = "", dtype = "tfdtype$DT_HALF", value = dense<1.0> : tensor<1x2xf16>} : () -> tensor<1x2xf16> loc("const1")
%1:2 = tf_executor.island wraps "tf.Const"() {device = "", dtype = "tfdtype$DT_HALF", value = dense<[1.0, 2.0]> : tensor<2xf16>} : () -> tensor<2xf16> loc("const2")
tf_executor.fetch %0#0, %1#0 : tensor<1x2xf16>, tensor<2xf16>
}
func.return %graph#0, %graph#1 : tensor<1x2xf16>, tensor<2xf16>
})";
std::string result;
auto status = Translate(kMlirSource, result);
ASSERT_TRUE(status.succeeded());
EXPECT_TRUE(absl::StrContains(result, "node {"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate_registration_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3bd1293f-ecb9-4fcb-98fd-11ab2bc9252d | cpp | tensorflow/tensorflow | schedule_postprocessing | third_party/xla/xla/service/gpu/transforms/schedule_postprocessing.cc | third_party/xla/xla/service/gpu/transforms/schedule_postprocessing_test.cc | #include "xla/service/gpu/transforms/schedule_postprocessing.h"
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using CustomCallInComputation =
absl::flat_hash_map<const HloComputation*, bool>;
bool MayInvokeCustomCall(
const HloInstruction* hlo,
const CustomCallInComputation& custom_call_in_computation) {
if (hlo->opcode() == HloOpcode::kCustomCall) {
return true;
}
return absl::c_any_of(
hlo->called_computations(), [&](const HloComputation* callee) {
return custom_call_in_computation.find(callee)->second;
});
}
absl::StatusOr<bool> IsRelevantAsynchronousStart(const HloInstruction* hlo) {
if (!hlo_query::IsAsyncCollectiveStartOp(hlo,
false)) {
return false;
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
hlo->backend_config<GpuBackendConfig>());
const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
return !collective_backend_config.is_sync();
}
absl::StatusOr<bool> IsRelevantAsynchronousDone(const HloInstruction* hlo) {
return hlo_query::IsAsyncCollectiveDoneOp(hlo,
false);
}
absl::StatusOr<bool> ProcessComputation(
const HloSchedule& schedule, HloComputation* computation,
CustomCallInComputation& custom_call_in_computation) {
bool changed = false;
bool has_custom_call = false;
absl::flat_hash_set<HloInstruction*> async_starts;
const HloInstructionSequence& sequence = schedule.sequence(computation);
const std::vector<HloInstruction*>& all_instructions =
sequence.instructions();
for (HloInstruction* hlo : all_instructions) {
if (MayInvokeCustomCall(hlo, custom_call_in_computation)) {
async_starts.clear();
has_custom_call = true;
continue;
}
TF_ASSIGN_OR_RETURN(bool is_async_start, IsRelevantAsynchronousStart(hlo));
if (is_async_start) {
async_starts.insert(hlo);
continue;
}
TF_ASSIGN_OR_RETURN(bool is_async_done, IsRelevantAsynchronousDone(hlo));
if (is_async_done) {
HloInstruction* async_start = hlo->mutable_operand(0);
if (async_starts.contains(async_start)) {
changed = true;
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
async_start->backend_config<GpuBackendConfig>());
CollectiveBackendConfig& collective_backend_config =
*gpu_config.mutable_collective_backend_config();
collective_backend_config.set_no_parallel_custom_call(true);
TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config));
async_starts.erase(async_start);
}
}
}
custom_call_in_computation[computation] = has_custom_call;
return changed;
}
}
absl::StatusOr<bool> SchedulePostprocessing::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!module->has_schedule()) return false;
HloSchedule& schedule = module->schedule();
bool changed = false;
CustomCallInComputation custom_call_in_computation;
std::vector<HloComputation*> all_computations =
module->MakeComputationPostOrder(execution_threads);
for (auto iter = all_computations.begin(); iter != all_computations.end();
++iter) {
HloComputation* computation = *iter;
if (computation->IsFusionComputation()) {
custom_call_in_computation[computation] = false;
continue;
}
TF_ASSIGN_OR_RETURN(
bool result,
ProcessComputation(schedule, computation, custom_call_in_computation));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/schedule_postprocessing.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using SchedulePostprocessingTest = HloTestBase;
TEST_F(SchedulePostprocessingTest, SynchronousOpsNotChanged) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
pf32 = f32[1] parameter(0)
all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":true,"no_parallel_custom_call":false}}
ROOT all-gather-done = f32[2] all-gather-done(all-gather-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
SchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(SchedulePostprocessingTest, P2POpsNotChanged) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY main {
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}}"
}
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=2
ROOT recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
SchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(SchedulePostprocessingTest, AsynchronousOpsChanged) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
pf32 = f32[1] parameter(0)
pf32.2 = f32[1] custom-call(pf32), custom_call_target="my_custom_call"
all-gather-start = (f32[1], f32[2]) all-gather-start(pf32.2), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}}
ROOT all-gather-done = f32[2] all-gather-done(all-gather-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
SchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* start = FindInstruction(module.get(), "all-gather-start");
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
start->backend_config<GpuBackendConfig>());
const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
EXPECT_TRUE(collective_backend_config.no_parallel_custom_call());
}
TEST_F(SchedulePostprocessingTest, AsynchronousOpsWithParallelCustomcall) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
pf32 = f32[1] parameter(0)
all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}}
pf32.2 = f32[1] custom-call(pf32), custom_call_target="my_custom_call"
all-gather-done = f32[2] all-gather-done(all-gather-start)
ROOT out = (f32[1], f32[2]) tuple(f32[1] pf32.2, f32[2] all-gather-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
SchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
HloInstruction* start = FindInstruction(module.get(), "all-gather-start");
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
start->backend_config<GpuBackendConfig>());
const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
EXPECT_FALSE(collective_backend_config.no_parallel_custom_call());
}
TEST_F(SchedulePostprocessingTest,
AsynchronousOpsWithParallelNestedCustomcall) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
foo {
v = f32[1] parameter(0)
ROOT ret = f32[1] custom-call(v), custom_call_target="my_custom_call"
}
ENTRY entry {
pf32 = f32[1] parameter(0)
all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}}
pf32.2 = f32[1] call(f32[1] pf32), to_apply=foo
all-gather-done = f32[2] all-gather-done(all-gather-start)
ROOT out = (f32[1], f32[2]) tuple(f32[1] pf32.2, f32[2] all-gather-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
SchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
HloInstruction* start = FindInstruction(module.get(), "all-gather-start");
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
start->backend_config<GpuBackendConfig>());
const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
EXPECT_FALSE(collective_backend_config.no_parallel_custom_call());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/schedule_postprocessing.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/schedule_postprocessing_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2e2c616a-8376-4f45-a80a-b75bf4ec95f6 | cpp | tensorflow/tensorflow | conv_weights_converter | tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.cc | tensorflow/lite/delegates/gpu/cl/kernels/conv_weights_converter_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.h"
#include <cstring>
#include <memory>
#include <string>
#include <utility>
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
ConverterToConvWeights::ConverterToConvWeights(
const OperationDef& definition, const WeightsDescription& weights_desc,
Layout input_layout)
: GPUOperation(definition),
weights_desc_(weights_desc),
input_layout_(input_layout) {
code_ = GetConverterToConvWeightsCode();
}
std::string ConverterToConvWeights::GetConverterToConvWeightsCode() {
AddSrcTensor("src_tensor", definition_.src_tensors[0]);
args_.AddFloat("mask_x");
args_.AddFloat("mask_y");
args_.AddFloat("mask_z");
args_.AddFloat("mask_w");
args_.AddInt("out_ch");
args_.AddInt("out_ch_x4_groups");
args_.AddInt("in_ch");
args_.AddInt("in_ch_x4_groups");
args_.AddInt("kernel_width");
args_.AddInt("kernel_height");
args_.AddInt("kernel_spatial_size");
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
std::vector<int32_t> remap(weights_desc_.spatial_remap.size());
for (int i = 0; i < remap.size(); ++i) {
remap[i] = weights_desc_.spatial_remap[i];
}
BufferDescriptor desc;
desc.element_type = DataType::INT32;
desc.element_size = 1;
desc.memory_type = MemoryType::GLOBAL;
desc.size = remap.size() * sizeof(int32_t);
desc.data.resize(desc.size);
std::memcpy(desc.data.data(), remap.data(), desc.size);
args_.AddObject("spatial_remap",
std::make_unique<BufferDescriptor>(std::move(desc)));
}
std::string c;
c += "MAIN_FUNCTION($0) {\n";
c += " int O = GLOBAL_ID_0;\n";
c += " int I = GLOBAL_ID_1;\n";
c += " int spatial_linear = GLOBAL_ID_2;\n";
c += " if (O >= args.out_ch_x4_groups) return;\n";
c += " if (I >= args.in_ch_x4_groups) return;\n";
c += " if (spatial_linear >= args.kernel_spatial_size) return;\n";
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
c += " int linear_remap = args.spatial_remap.Read(spatial_linear);\n";
c += " int W = linear_remap % args.kernel_width;\n";
c += " int H = linear_remap / args.kernel_width;\n";
} else {
c += " int W = spatial_linear % args.kernel_width;\n";
c += " int H = spatial_linear / args.kernel_width;\n";
}
c += " FLT4 v0 = INIT_FLT4(0.0f);\n";
c += " FLT4 v1 = INIT_FLT4(0.0f);\n";
c += " FLT4 v2 = INIT_FLT4(0.0f);\n";
c += " FLT4 v3 = INIT_FLT4(0.0f);\n";
if (input_layout_ == Layout::OHWI) {
c += " if (O * 4 < args.out_ch) {\n";
c += " v0 = args.src_tensor.Read(W, H, I, O * 4);\n";
c += " }\n";
c += " if (O * 4 + 1 < args.out_ch) {\n";
c += " v1 = args.src_tensor.Read(W, H, I, O * 4 + 1);\n";
c += " }\n";
c += " if (O * 4 + 2 < args.out_ch) {\n";
c += " v2 = args.src_tensor.Read(W, H, I, O * 4 + 2);\n";
c += " }\n";
c += " if (O * 4 + 3 < args.out_ch) {\n";
c += " v3 = args.src_tensor.Read(W, H, I, O * 4 + 3);\n";
c += " }\n";
c += " if (I == args.src_tensor.Slices() - 1) {\n";
c += " FLT4 mask = INIT_FLT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c += " v0 *= mask;\n";
c += " v1 *= mask;\n";
c += " v2 *= mask;\n";
c += " v3 *= mask;\n";
c += " }\n";
} else if (input_layout_ == Layout::HWIO) {
c += " if (I * 4 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v0 = args.src_tensor.Read(I * 4, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 1 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v1 = args.src_tensor.Read(I * 4 + 1, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 2 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v2 = args.src_tensor.Read(I * 4 + 2, W, O, H);\n";
c += " }\n";
c += " if (I * 4 + 3 < args.in_ch && O < args.src_tensor.Slices()) {\n";
c += " v3 = args.src_tensor.Read(I * 4 + 3, W, O, H);\n";
c += " }\n";
c += " if (O == args.src_tensor.Slices() - 1) {\n";
c += " FLT4 mask = INIT_FLT4v4(args.mask_x, args.mask_y, args.mask_z, "
"args.mask_w);\n";
c += " v0 *= mask;\n";
c += " v1 *= mask;\n";
c += " v2 *= mask;\n";
c += " v3 *= mask;\n";
c += " }\n";
}
const bool need_transpose =
(input_layout_ == Layout::HWIO && weights_desc_.IsO4I4()) ||
(input_layout_ == Layout::OHWI && weights_desc_.IsI4O4());
if (need_transpose) {
c += " FLT4 r0 = INIT_FLT4v4(v0.x, v1.x, v2.x, v3.x);\n";
c += " FLT4 r1 = INIT_FLT4v4(v0.y, v1.y, v2.y, v3.y);\n";
c += " FLT4 r2 = INIT_FLT4v4(v0.z, v1.z, v2.z, v3.z);\n";
c += " FLT4 r3 = INIT_FLT4v4(v0.w, v1.w, v2.w, v3.w);\n";
} else {
c += " FLT4 r0 = v0;\n";
c += " FLT4 r1 = v1;\n";
c += " FLT4 r2 = v2;\n";
c += " FLT4 r3 = v3;\n";
}
if (weights_desc_.layout ==
WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4 ||
weights_desc_.layout ==
WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4) {
AddDstTensor("dst_tensor0", definition_.dst_tensors[0]);
AddDstTensor("dst_tensor1", definition_.dst_tensors[1]);
AddDstTensor("dst_tensor2", definition_.dst_tensors[2]);
AddDstTensor("dst_tensor3", definition_.dst_tensors[3]);
c += " int yc = spatial_linear * args.in_ch_x4_groups + I;\n";
c += " args.dst_tensor0.Write2D(r0, O, yc);\n";
c += " args.dst_tensor1.Write2D(r1, O, yc);\n";
c += " args.dst_tensor2.Write2D(r2, O, yc);\n";
c += " args.dst_tensor3.Write2D(r3, O, yc);\n";
c += "}\n";
} else {
AddDstTensor("dst_tensor", definition_.dst_tensors[0]);
c += " int OUTPUT_GROUP_SIZE = " +
std::to_string(weights_desc_.GetOutputGroupSize()) + ";\n";
c += " int d_index = (O * 4) / (OUTPUT_GROUP_SIZE * 4);\n";
c += " int k_index = ((O * 4) % (OUTPUT_GROUP_SIZE * 4)) / 4;\n";
std::string index;
if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 ||
weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) {
index =
"(d_index * args.in_ch_x4_groups + I) * args.kernel_spatial_size + "
"spatial_linear";
} else if (weights_desc_.layout == WeightsLayout::kOSpatialIOGroupI4O4 ||
weights_desc_.layout == WeightsLayout::kOSpatialIOGroupO4I4) {
index =
"(d_index * args.kernel_spatial_size + spatial_linear) * "
"args.in_ch_x4_groups + I";
}
c += " int dst_offset = (" + index + ") * OUTPUT_GROUP_SIZE + k_index;\n";
c += " args.dst_tensor.WriteLinear(r0, dst_offset * 4 + 0);\n";
c += " args.dst_tensor.WriteLinear(r1, dst_offset * 4 + 1);\n";
c += " args.dst_tensor.WriteLinear(r2, dst_offset * 4 + 2);\n";
c += " args.dst_tensor.WriteLinear(r3, dst_offset * 4 + 3);\n";
c += "}\n";
}
return c;
}
OHWI ConverterToConvWeights::GetWeightsSize() const {
int output_channels = 0;
int input_channels = 0;
int kernel_width = 0;
int kernel_height = 0;
if (input_layout_ == Layout::HWIO) {
output_channels = src_[0]->Channels();
input_channels = src_[0]->Width();
kernel_width = src_[0]->Height();
kernel_height = src_[0]->Batch();
} else if (input_layout_ == Layout::OHWI) {
output_channels = src_[0]->Batch();
input_channels = src_[0]->Channels();
kernel_width = src_[0]->Width();
kernel_height = src_[0]->Height();
}
return OHWI(output_channels, kernel_height, kernel_width, input_channels);
}
absl::Status ConverterToConvWeights::BindArguments(ArgumentsBinder* args) {
const auto& weights_shape = GetWeightsSize();
const int output_channels_x4_groups = DivideRoundUp(
AlignByN(weights_shape.o, 4 * weights_desc_.GetOutputGroupSize()), 4);
RETURN_IF_ERROR(args->SetInt("out_ch", weights_shape.o));
RETURN_IF_ERROR(args->SetInt("out_ch_x4_groups", output_channels_x4_groups));
RETURN_IF_ERROR(args->SetInt("in_ch", weights_shape.i));
RETURN_IF_ERROR(
args->SetInt("in_ch_x4_groups", DivideRoundUp(weights_shape.i, 4)));
RETURN_IF_ERROR(args->SetInt("kernel_width", weights_shape.w));
RETURN_IF_ERROR(args->SetInt("kernel_height", weights_shape.h));
RETURN_IF_ERROR(
args->SetInt("kernel_spatial_size", weights_shape.w * weights_shape.h));
float4 mask = GetMaskForLastPlane(src_[0]->Channels());
RETURN_IF_ERROR(args->SetFloat("mask_x", mask.x));
RETURN_IF_ERROR(args->SetFloat("mask_y", mask.y));
RETURN_IF_ERROR(args->SetFloat("mask_z", mask.z));
return args->SetFloat("mask_w", mask.w);
}
int3 ConverterToConvWeights::GetGridSize() const {
const auto& weights_shape = GetWeightsSize();
const int out_group_size = weights_desc_.GetOutputGroupSize();
const int grid_x =
DivideRoundUp(AlignByN(weights_shape.o, 4 * out_group_size), 4);
const int grid_y = DivideRoundUp(weights_shape.i, 4);
const int grid_z = weights_shape.w * weights_shape.h;
return int3(grid_x, grid_y, grid_z);
}
ConverterToConvWeights CreateConverterToConvWeights(
const OperationDef& definition, const WeightsDescription& weights_desc,
Layout input_layout) {
return ConverterToConvWeights(definition, weights_desc, input_layout);
}
}
} | #include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX4) {
const auto status = ConverterToConvWeights1x1OutX4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX4Unaligned) {
const auto status = ConverterToConvWeights1x1OutX4UnalignedTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX2) {
const auto status = ConverterToConvWeights1x1OutX2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeightsOutX2) {
const auto status = ConverterToConvWeightsOutX2Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvTransposedWeights4x4) {
const auto status = ConverterToConvTransposedWeights4x4Test(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, ConverterToConvWeights4xTextures) {
const auto status = ConverterToConvWeights4xTexturesTest(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/conv_weights_converter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
da2a6451-f55a-441f-80f9-ef5b7e5838ea | cpp | google/cel-cpp | compiler_constant_step | eval/eval/compiler_constant_step.cc | eval/eval/compiler_constant_step_test.cc | #include "eval/eval/compiler_constant_step.h"
#include "absl/status/status.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/evaluator_core.h"
namespace google::api::expr::runtime {
using ::cel::Value;
absl::Status DirectCompilerConstantStep::Evaluate(
ExecutionFrameBase& frame, Value& result, AttributeTrail& attribute) const {
result = value_;
return absl::OkStatus();
}
absl::Status CompilerConstantStep::Evaluate(ExecutionFrame* frame) const {
frame->value_stack().Push(value_);
return absl::OkStatus();
}
} | #include "eval/eval/compiler_constant_step.h"
#include <memory>
#include "base/type_provider.h"
#include "common/native_type.h"
#include "common/type_factory.h"
#include "common/type_manager.h"
#include "common/value.h"
#include "common/value_manager.h"
#include "common/values/legacy_value_manager.h"
#include "eval/eval/evaluator_core.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "runtime/activation.h"
#include "runtime/runtime_options.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::extensions::ProtoMemoryManagerRef;
class CompilerConstantStepTest : public testing::Test {
public:
CompilerConstantStepTest()
: value_factory_(ProtoMemoryManagerRef(&arena_),
cel::TypeProvider::Builtin()),
state_(2, 0, cel::TypeProvider::Builtin(),
ProtoMemoryManagerRef(&arena_)) {}
protected:
google::protobuf::Arena arena_;
cel::common_internal::LegacyValueManager value_factory_;
FlatExpressionEvaluatorState state_;
cel::Activation empty_activation_;
cel::RuntimeOptions options_;
};
TEST_F(CompilerConstantStepTest, Evaluate) {
ExecutionPath path;
path.push_back(std::make_unique<CompilerConstantStep>(
value_factory_.CreateIntValue(42), -1, false));
ExecutionFrame frame(path, empty_activation_, options_, state_);
ASSERT_OK_AND_ASSIGN(cel::Value result, frame.Evaluate());
EXPECT_EQ(result.GetInt().NativeValue(), 42);
}
TEST_F(CompilerConstantStepTest, TypeId) {
CompilerConstantStep step(value_factory_.CreateIntValue(42), -1, false);
ExpressionStep& abstract_step = step;
EXPECT_EQ(abstract_step.GetNativeTypeId(),
cel::NativeTypeId::For<CompilerConstantStep>());
}
TEST_F(CompilerConstantStepTest, Value) {
CompilerConstantStep step(value_factory_.CreateIntValue(42), -1, false);
EXPECT_EQ(step.value().GetInt().NativeValue(), 42);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/compiler_constant_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/compiler_constant_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
ec3a6617-a451-44bc-8bdd-d99c7dc86203 | cpp | google/quiche | quic_spdy_session | quiche/quic/core/http/quic_spdy_session.cc | quiche/quic/core/http/quic_spdy_session_test.cc | #include "quiche/quic/core/http/quic_spdy_session.h"
#include <algorithm>
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/strings/numbers.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/core/http2_frame_decoder_adapter.h"
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/core/http/http_decoder.h"
#include "quiche/quic/core/http/http_frames.h"
#include "quiche/quic/core/http/quic_headers_stream.h"
#include "quiche/quic/core/http/quic_spdy_stream.h"
#include "quiche/quic/core/http/web_transport_http3.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_exported_stats.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/platform/api/quic_stack_trace.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
using http2::Http2DecoderAdapter;
using quiche::HttpHeaderBlock;
using spdy::Http2WeightToSpdy3Priority;
using spdy::Spdy3PriorityToHttp2Weight;
using spdy::SpdyErrorCode;
using spdy::SpdyFramer;
using spdy::SpdyFramerDebugVisitorInterface;
using spdy::SpdyFramerVisitorInterface;
using spdy::SpdyFrameType;
using spdy::SpdyHeadersHandlerInterface;
using spdy::SpdyHeadersIR;
using spdy::SpdyPingId;
using spdy::SpdyPriority;
using spdy::SpdyPriorityIR;
using spdy::SpdySerializedFrame;
using spdy::SpdySettingsId;
using spdy::SpdyStreamId;
namespace quic {
ABSL_CONST_INIT const size_t kMaxUnassociatedWebTransportStreams = 24;
namespace {
constexpr uint64_t kHpackEncoderDynamicTableSizeLimit = 16384;
constexpr QuicStreamCount kDefaultMaxWebTransportSessions = 16;
#define ENDPOINT \
(perspective() == Perspective::IS_SERVER ? "Server: " : "Client: ")
class AlpsFrameDecoder : public HttpDecoder::Visitor {
public:
explicit AlpsFrameDecoder(QuicSpdySession* session) : session_(session) {}
~AlpsFrameDecoder() override = default;
void OnError(HttpDecoder* ) override {}
bool OnMaxPushIdFrame() override {
error_detail_ = "MAX_PUSH_ID frame forbidden";
return false;
}
bool OnGoAwayFrame(const GoAwayFrame& ) override {
error_detail_ = "GOAWAY frame forbidden";
return false;
}
bool OnSettingsFrameStart(QuicByteCount ) override {
return true;
}
bool OnSettingsFrame(const SettingsFrame& frame) override {
if (settings_frame_received_via_alps_) {
error_detail_ = "multiple SETTINGS frames";
return false;
}
settings_frame_received_via_alps_ = true;
error_detail_ = session_->OnSettingsFrameViaAlps(frame);
return !error_detail_;
}
bool OnDataFrameStart(QuicByteCount , QuicByteCount
) override {
error_detail_ = "DATA frame forbidden";
return false;
}
bool OnDataFramePayload(absl::string_view ) override {
QUICHE_NOTREACHED();
return false;
}
bool OnDataFrameEnd() override {
QUICHE_NOTREACHED();
return false;
}
bool OnHeadersFrameStart(QuicByteCount ,
QuicByteCount ) override {
error_detail_ = "HEADERS frame forbidden";
return false;
}
bool OnHeadersFramePayload(absl::string_view ) override {
QUICHE_NOTREACHED();
return false;
}
bool OnHeadersFrameEnd() override {
QUICHE_NOTREACHED();
return false;
}
bool OnPriorityUpdateFrameStart(QuicByteCount ) override {
error_detail_ = "PRIORITY_UPDATE frame forbidden";
return false;
}
bool OnPriorityUpdateFrame(const PriorityUpdateFrame& ) override {
QUICHE_NOTREACHED();
return false;
}
bool OnAcceptChFrameStart(QuicByteCount ) override {
return true;
}
bool OnAcceptChFrame(const AcceptChFrame& frame) override {
session_->OnAcceptChFrameReceivedViaAlps(frame);
return true;
}
bool OnOriginFrameStart(QuicByteCount ) override {
QUICHE_NOTREACHED();
return true;
}
bool OnOriginFrame(const OriginFrame& ) override { return true; }
void OnWebTransportStreamFrameType(
QuicByteCount ,
WebTransportSessionId ) override {
QUICHE_NOTREACHED();
}
bool OnMetadataFrameStart(QuicByteCount ,
QuicByteCount ) override {
error_detail_ = "METADATA frame forbidden";
return false;
}
bool OnMetadataFramePayload(absl::string_view ) override {
QUICHE_NOTREACHED();
return false;
}
bool OnMetadataFrameEnd() override {
QUICHE_NOTREACHED();
return false;
}
bool OnUnknownFrameStart(uint64_t ,
QuicByteCount
,
QuicByteCount ) override {
return true;
}
bool OnUnknownFramePayload(absl::string_view ) override {
return true;
}
bool OnUnknownFrameEnd() override { return true; }
const std::optional<std::string>& error_detail() const {
return error_detail_;
}
private:
QuicSpdySession* const session_;
std::optional<std::string> error_detail_;
bool settings_frame_received_via_alps_ = false;
};
uint64_t GetDefaultQpackMaximumDynamicTableCapacity(Perspective perspective) {
if (perspective == Perspective::IS_SERVER &&
GetQuicFlag(quic_server_disable_qpack_dynamic_table)) {
return 0;
}
return kDefaultQpackMaxDynamicTableCapacity;
}
class SizeLimitingHeaderList : public spdy::SpdyHeadersHandlerInterface {
public:
~SizeLimitingHeaderList() override = default;
void OnHeaderBlockStart() override {
QUIC_BUG_IF(quic_bug_12518_1, current_header_list_size_ != 0)
<< "OnHeaderBlockStart called more than once!";
}
void OnHeader(absl::string_view name, absl::string_view value) override {
if (current_header_list_size_ < max_header_list_size_) {
current_header_list_size_ += name.size();
current_header_list_size_ += value.size();
current_header_list_size_ += kQpackEntrySizeOverhead;
header_list_.OnHeader(name, value);
}
}
void OnHeaderBlockEnd(size_t uncompressed_header_bytes,
size_t compressed_header_bytes) override {
header_list_.OnHeaderBlockEnd(uncompressed_header_bytes,
compressed_header_bytes);
if (current_header_list_size_ > max_header_list_size_) {
Clear();
}
}
void set_max_header_list_size(size_t max_header_list_size) {
max_header_list_size_ = max_header_list_size;
}
void Clear() {
header_list_.Clear();
current_header_list_size_ = 0;
}
const QuicHeaderList& header_list() const { return header_list_; }
private:
QuicHeaderList header_list_;
size_t max_header_list_size_ = std::numeric_limits<size_t>::max();
size_t current_header_list_size_ = 0;
};
}
class QuicSpdySession::SpdyFramerVisitor
: public SpdyFramerVisitorInterface,
public SpdyFramerDebugVisitorInterface {
public:
explicit SpdyFramerVisitor(QuicSpdySession* session) : session_(session) {}
SpdyFramerVisitor(const SpdyFramerVisitor&) = delete;
SpdyFramerVisitor& operator=(const SpdyFramerVisitor&) = delete;
SpdyHeadersHandlerInterface* OnHeaderFrameStart(
SpdyStreamId ) override {
QUICHE_DCHECK(!VersionUsesHttp3(session_->transport_version()));
return &header_list_;
}
void OnHeaderFrameEnd(SpdyStreamId ) override {
QUICHE_DCHECK(!VersionUsesHttp3(session_->transport_version()));
LogHeaderCompressionRatioHistogram(
false,
false,
header_list_.header_list().compressed_header_bytes(),
header_list_.header_list().uncompressed_header_bytes());
if (session_->IsConnected() && !expecting_pushed_headers_) {
session_->OnHeaderList(header_list_.header_list());
}
expecting_pushed_headers_ = false;
header_list_.Clear();
}
void OnStreamFrameData(SpdyStreamId , const char* ,
size_t ) override {
QUICHE_DCHECK(!VersionUsesHttp3(session_->transport_version()));
CloseConnection("SPDY DATA frame received.",
QUIC_INVALID_HEADERS_STREAM_DATA);
}
void OnStreamEnd(SpdyStreamId ) override {
}
void OnStreamPadding(SpdyStreamId , size_t ) override {
CloseConnection("SPDY frame padding received.",
QUIC_INVALID_HEADERS_STREAM_DATA);
}
void OnError(Http2DecoderAdapter::SpdyFramerError error,
std::string detailed_error) override {
QuicErrorCode code;
switch (error) {
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_INDEX_VARINT_ERROR:
code = QUIC_HPACK_INDEX_VARINT_ERROR;
break;
case Http2DecoderAdapter::SpdyFramerError::
SPDY_HPACK_NAME_LENGTH_VARINT_ERROR:
code = QUIC_HPACK_NAME_LENGTH_VARINT_ERROR;
break;
case Http2DecoderAdapter::SpdyFramerError::
SPDY_HPACK_VALUE_LENGTH_VARINT_ERROR:
code = QUIC_HPACK_VALUE_LENGTH_VARINT_ERROR;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_NAME_TOO_LONG:
code = QUIC_HPACK_NAME_TOO_LONG;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_VALUE_TOO_LONG:
code = QUIC_HPACK_VALUE_TOO_LONG;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_NAME_HUFFMAN_ERROR:
code = QUIC_HPACK_NAME_HUFFMAN_ERROR;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_VALUE_HUFFMAN_ERROR:
code = QUIC_HPACK_VALUE_HUFFMAN_ERROR;
break;
case Http2DecoderAdapter::SpdyFramerError::
SPDY_HPACK_MISSING_DYNAMIC_TABLE_SIZE_UPDATE:
code = QUIC_HPACK_MISSING_DYNAMIC_TABLE_SIZE_UPDATE;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_INVALID_INDEX:
code = QUIC_HPACK_INVALID_INDEX;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_INVALID_NAME_INDEX:
code = QUIC_HPACK_INVALID_NAME_INDEX;
break;
case Http2DecoderAdapter::SpdyFramerError::
SPDY_HPACK_DYNAMIC_TABLE_SIZE_UPDATE_NOT_ALLOWED:
code = QUIC_HPACK_DYNAMIC_TABLE_SIZE_UPDATE_NOT_ALLOWED;
break;
case Http2DecoderAdapter::SpdyFramerError::
SPDY_HPACK_INITIAL_DYNAMIC_TABLE_SIZE_UPDATE_IS_ABOVE_LOW_WATER_MARK:
code = QUIC_HPACK_INITIAL_TABLE_SIZE_UPDATE_IS_ABOVE_LOW_WATER_MARK;
break;
case Http2DecoderAdapter::SpdyFramerError::
SPDY_HPACK_DYNAMIC_TABLE_SIZE_UPDATE_IS_ABOVE_ACKNOWLEDGED_SETTING:
code = QUIC_HPACK_TABLE_SIZE_UPDATE_IS_ABOVE_ACKNOWLEDGED_SETTING;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_TRUNCATED_BLOCK:
code = QUIC_HPACK_TRUNCATED_BLOCK;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_HPACK_FRAGMENT_TOO_LONG:
code = QUIC_HPACK_FRAGMENT_TOO_LONG;
break;
case Http2DecoderAdapter::SpdyFramerError::
SPDY_HPACK_COMPRESSED_HEADER_SIZE_EXCEEDS_LIMIT:
code = QUIC_HPACK_COMPRESSED_HEADER_SIZE_EXCEEDS_LIMIT;
break;
case Http2DecoderAdapter::SpdyFramerError::SPDY_DECOMPRESS_FAILURE:
code = QUIC_HEADERS_STREAM_DATA_DECOMPRESS_FAILURE;
break;
default:
code = QUIC_INVALID_HEADERS_STREAM_DATA;
}
CloseConnection(
absl::StrCat("SPDY framing error: ", detailed_error,
Http2DecoderAdapter::SpdyFramerErrorToString(error)),
code);
}
void OnDataFrameHeader(SpdyStreamId , size_t ,
bool ) override {
QUICHE_DCHECK(!VersionUsesHttp3(session_->transport_version()));
CloseConnection("SPDY DATA frame received.",
QUIC_INVALID_HEADERS_STREAM_DATA);
}
void OnRstStream(SpdyStreamId ,
SpdyErrorCode ) override {
CloseConnection("SPDY RST_STREAM frame received.",
QUIC_INVALID_HEADERS_STREAM_DATA);
}
void OnSetting(SpdySettingsId id, uint32_t value) override {
QUICHE_DCHECK(!VersionUsesHttp3(session_->transport_version()));
session_->OnSetting(id, value);
}
void OnSettingsEnd() override {
QUICHE_DCHECK(!VersionUsesHttp3(session_->transport_version()));
}
void OnPing(SpdyPingId , bool ) override {
CloseConnection("SPDY PING frame received.",
QUIC_INVALID_HEADERS_STREAM_DATA);
}
void OnGoAway(SpdyStreamId ,
SpdyErrorCode ) override {
CloseConnection("SPDY GOAWAY frame received.",
QUIC_INVALID_HEADERS_STREAM_DATA);
}
void OnHeaders(SpdyStreamId stream_id, size_t ,
bool has_priority, int weight,
SpdyStreamId , bool ,
bool fin, bool ) override {
if (!session_->IsConnected()) {
return;
}
if (VersionUsesHttp3(session_->transport_version())) {
CloseConnection("HEADERS frame not allowed on headers stream.",
QUIC_INVALID_HEADERS_STREAM_DATA);
return;
}
QUIC_BUG_IF(quic_bug_12477_1,
session_->destruction_indicator() != 123456789)
<< "QuicSpdyStream use after free. "
<< session_->destruction_indicator() << QuicStackTrace();
SpdyPriority priority =
has_priority ? Http2WeightToSpdy3Priority(weight) : 0;
session_->OnHeaders(stream_id, has_priority,
spdy::SpdyStreamPrecedence(priority), fin);
}
void OnWindowUpdate(SpdyStreamId ,
int ) override {
CloseConnection("SPDY WINDOW_UPDATE frame received.",
QUIC_INVALID_HEADERS_STREAM_DATA);
}
void OnPushPromise(SpdyStreamId ,
SpdyStreamId promised_stream_id, bool ) override {
QUICHE_DCHECK(!VersionUsesHttp3(session_->transport_version()));
if (session_->perspective() != Perspective::IS_CLIENT) {
CloseConnection("PUSH_PROMISE not supported.",
QUIC_INVALID_HEADERS_STREAM_DATA);
return;
}
session_->MaybeSendRstStreamFrame(
promised_stream_id,
QuicResetStreamError::FromInternal(QUIC_REFUSED_STREAM),
0);
QUICHE_DCHECK(!expecting_pushed_headers_);
expecting_pushed_headers_ = true;
}
void OnContinuation(SpdyStreamId , size_t ,
bool ) override {}
void OnPriority(SpdyStreamId stream_id, SpdyStreamId ,
int weight, bool ) override {
QUICHE_DCHECK(!VersionUsesHttp3(session_->transport_version()));
if (!session_->IsConnected()) {
return;
}
SpdyPriority priority = Http2WeightToSpdy3Priority(weight);
session_->OnPriority(stream_id, spdy::SpdyStreamPrecedence(priority));
}
void OnPriorityUpdate(SpdyStreamId ,
absl::string_view ) override {}
bool OnUnknownFrame(SpdyStreamId ,
uint8_t ) override {
CloseConnection("Unknown frame type received.",
QUIC_INVALID_HEADERS_STREAM_DATA);
return false;
}
void OnUnknownFrameStart(SpdyStreamId , size_t ,
uint8_t , uint8_t ) override {}
void OnUnknownFramePayload(SpdyStreamId ,
absl::string_view ) override {}
void OnSendCompressedFrame(SpdyStreamId , SpdyFrameType ,
size_t payload_len, size_t frame_len) override {
if (payload_len == 0) {
QUIC_BUG(quic_bug_10360_1) << "Zero payload length.";
return;
}
int compression_pct = 100 - (100 * frame_len) / payload_len;
QUIC_DVLOG(1) << "Net.QuicHpackCompressionPercentage: " << compression_pct;
}
void OnReceiveCompressedFrame(SpdyStreamId ,
SpdyFrameType ,
size_t frame_len) override {
if (session_->IsConnected()) {
session_->OnCompressedFrameSize(frame_len);
}
}
void set_max_header_list_size(size_t max_header_list_size) {
header_list_.set_max_header_list_size(max_header_list_size);
}
private:
void CloseConnection(const std::string& details, QuicErrorCode code) {
if (session_->IsConnected()) {
session_->CloseConnectionWithDetails(code, details);
}
}
QuicSpdySession* session_;
SizeLimitingHeaderList header_list_;
bool expecting_pushed_headers_ = false;
};
Http3DebugVisitor::Http3DebugVisitor() {}
Http3DebugVisitor::~Http3DebugVisitor() {}
QuicSpdySession::QuicSpdySession(
QuicConnection* connection, QuicSession::Visitor* visitor,
const QuicConfig& config, const ParsedQuicVersionVector& supported_versions)
: QuicSession(connection, visitor, config, supported_versions,
VersionUsesHttp3(connection->transport_version())
? static_cast<QuicStreamCount>(
kHttp3StaticUnidirectionalStreamCount)
: 0u,
std::make_unique<DatagramObserver>(this)),
send_control_stream_(nullptr),
receive_control_stream_(nullptr),
qpack_encoder_receive_stream_(nullptr),
qpack_decoder_receive_stream_(nullptr),
qpack_encoder_send_stream_(nullptr),
qpack_decoder_send_stream_(nullptr),
qpack_maximum_dynamic_table_capacity_(
GetDefaultQpackMaximumDynamicTableCapacity(perspective())),
qpack_maximum_blocked_streams_(kDefaultMaximumBlockedStreams),
max_inbound_header_list_size_(kDefaultMaxUncompressedHeaderSize),
max_outbound_header_list_size_(std::numeric_limits<size_t>::max()),
stream_id_(
QuicUtils::GetInvalidStreamId(connection->transport_version())),
frame_len_(0),
fin_(false),
spdy_framer_(SpdyFramer::ENABLE_COMPRESSION),
spdy_framer_visitor_(new SpdyFramerVisitor(this)),
debug_visitor_(nullptr),
destruction_indicator_(123456789),
allow_extended_connect_(perspective() == Perspective::IS_SERVER &&
VersionUsesHttp3(transport_version())),
force_buffer_requests_until_settings_(false) {
h2_deframer_.set_visitor(spdy_framer_visitor_.get());
h2_deframer_.set_debug_visitor(spdy_framer_visitor_.get());
spdy_framer_.set_debug_visitor(spdy_framer_visitor_.get());
}
QuicSpdySession::~QuicSpdySession() {
QUIC_BUG_IF(quic_bug_12477_2, destruction_indicator_ != 123456789)
<< "QuicSpdySession use after free. " << destruction_indicator_
<< QuicStackTrace();
destruction_indicator_ = 987654321;
}
void QuicSpdySession::Initialize() {
QuicSession::Initialize();
FillSettingsFrame();
if (!VersionUsesHttp3(transport_version())) {
if (perspective() == Perspective::IS_SERVER) {
set_largest_peer_created_stream_id(
QuicUtils::GetHeadersStreamId(transport_version()));
} else {
QuicStreamId headers_stream_id = GetNextOutgoingBidirectionalStreamId();
QUICHE_DCHECK_EQ(headers_stream_id,
QuicUtils::GetHeadersStreamId(transport_version()));
}
auto headers_stream = std::make_unique<QuicHeadersStream>((this));
QUICHE_DCHECK_EQ(QuicUtils::GetHeadersStreamId(transport_version()),
headers_stream->id());
headers_stream_ = headers_stream.get();
ActivateStream(std::move(headers_stream));
} else {
qpack_encoder_ = std::make_unique<QpackEncoder>(this, huffman_encoding_,
cookie_crumbling_);
qpack_decoder_ =
std::make_unique<QpackDecoder>(qpack_maximum_dynamic_table_capacity_,
qpack_maximum_blocked_streams_, this);
MaybeInitializeHttp3UnidirectionalStreams();
}
spdy_framer_visitor_->set_max_header_list_size(max_inbound_header_list_size_);
h2_deframer_.GetHpackDecoder().set_max_decode_buffer_size_bytes(
2 * max_inbound_header_list_size_);
}
void QuicSpdySession::FillSettingsFrame() {
settings_.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] =
qpack_maximum_dynamic_table_capacity_;
settings_.values[SETTINGS_QPACK_BLOCKED_STREAMS] =
qpack_maximum_blocked_streams_;
settings_.values[SETTINGS_MAX_FIELD_SECTION_SIZE] =
max_inbound_header_list_size_;
if (version().UsesHttp3()) {
switch (LocalHttpDatagramSupport()) {
case HttpDatagramSupport::kNone:
break;
case HttpDatagramSupport::kDraft04:
settings_.values[SETTINGS_H3_DATAGRAM_DRAFT04] = 1;
break;
case HttpDatagramSupport::kRfc:
settings_.values[SETTINGS_H3_DATAGRAM] = 1;
break;
case HttpDatagramSupport::kRfcAndDraft04:
settings_.values[SETTINGS_H3_DATAGRAM] = 1;
settings_.values[SETTINGS_H3_DATAGRAM_DRAFT04] = 1;
break;
}
}
if (WillNegotiateWebTransport()) {
WebTransportHttp3VersionSet versions =
LocallySupportedWebTransportVersions();
if (versions.IsSet(WebTransportHttp3Version::kDraft02)) {
settings_.values[SETTINGS_WEBTRANS_DRAFT00] = 1;
}
if (versions.IsSet(WebTransportHttp3Version::kDraft07)) {
QUICHE_BUG_IF(
WT_enabled_extended_connect_disabled,
perspective() == Perspective::IS_SERVER && !allow_extended_connect())
<< "WebTransport enabled, but extended CONNECT is not";
settings_.values[SETTINGS_WEBTRANS_MAX_SESSIONS_DRAFT07] =
kDefaultMaxWebTransportSessions;
}
}
if (allow_extended_connect()) {
settings_.values[SETTINGS_ENABLE_CONNECT_PROTOCOL] = 1;
}
}
void QuicSpdySession::OnDecoderStreamError(QuicErrorCode error_code,
absl::string_view error_message) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
CloseConnectionWithDetails(
error_code, absl::StrCat("Decoder stream error: ", error_message));
}
void QuicSpdySession::OnEncoderStreamError(QuicErrorCode error_code,
absl::string_view error_message) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
CloseConnectionWithDetails(
error_code, absl::StrCat("Encoder stream error: ", error_message));
}
void QuicSpdySession::OnStreamHeadersPriority(
QuicStreamId stream_id, const spdy::SpdyStreamPrecedence& precedence) {
QuicSpdyStream* stream = GetOrCreateSpdyDataStream(stream_id);
if (!stream) {
return;
}
stream->OnStreamHeadersPriority(precedence);
}
void QuicSpdySession::OnStreamHeaderList(QuicStreamId stream_id, bool fin,
size_t frame_len,
const QuicHeaderList& header_list) {
if (IsStaticStream(stream_id)) {
connection()->CloseConnection(
QUIC_INVALID_HEADERS_STREAM_DATA, "stream is static",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
QuicSpdyStream* stream = GetOrCreateSpdyDataStream(stream_id);
if (stream == nullptr) {
size_t final_byte_offset = 0;
for (const auto& header : header_list) {
const std::string& header_key = header.first;
const std::string& header_value = header.second;
if (header_key == kFinalOffsetHeaderKey) {
if (!absl::SimpleAtoi(header_value, &final_byte_offset)) {
connection()->CloseConnection(
QUIC_INVALID_HEADERS_STREAM_DATA,
"Trailers are malformed (no final offset)",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
QUIC_DVLOG(1) << ENDPOINT
<< "Received final byte offset in trailers for stream "
<< stream_id << ", which no longer exists.";
OnFinalByteOffsetReceived(stream_id, final_byte_offset);
}
}
return;
}
stream->OnStreamHeaderList(fin, frame_len, header_list);
}
void QuicSpdySession::OnPriorityFrame(
QuicStreamId stream_id, const spdy::SpdyStreamPrecedence& precedence) {
QuicSpdyStream* stream = GetOrCreateSpdyDataStream(stream_id);
if (!stream) {
return;
}
stream->OnPriorityFrame(precedence);
}
bool QuicSpdySession::OnPriorityUpdateForRequestStream(
QuicStreamId stream_id, HttpStreamPriority priority) {
if (perspective() == Perspective::IS_CLIENT ||
!QuicUtils::IsBidirectionalStreamId(stream_id, version()) ||
!QuicUtils::IsClientInitiatedStreamId(transport_version(), stream_id)) {
return true;
}
QuicStreamCount advertised_max_incoming_bidirectional_streams =
GetAdvertisedMaxIncomingBidirectionalStreams();
if (advertised_max_incoming_bidirectional_streams == 0 ||
stream_id > QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT) +
QuicUtils::StreamIdDelta(transport_version()) *
(advertised_max_incoming_bidirectional_streams - 1)) {
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID,
"PRIORITY_UPDATE frame received for invalid stream.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
if (MaybeSetStreamPriority(stream_id, QuicStreamPriority(priority))) {
return true;
}
if (IsClosedStream(stream_id)) {
return true;
}
buffered_stream_priorities_[stream_id] = priority;
if (buffered_stream_priorities_.size() >
10 * max_open_incoming_bidirectional_streams()) {
std::string error_message =
absl::StrCat("Too many stream priority values buffered: ",
buffered_stream_priorities_.size(),
", which should not exceed the incoming stream limit of ",
max_open_incoming_bidirectional_streams());
QUIC_BUG(quic_bug_10360_2) << error_message;
connection()->CloseConnection(
QUIC_INTERNAL_ERROR, error_message,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return false;
}
return true;
}
size_t QuicSpdySession::ProcessHeaderData(const struct iovec& iov) {
QUIC_BUG_IF(quic_bug_12477_4, destruction_indicator_ != 123456789)
<< "QuicSpdyStream use after free. " << destruction_indicator_
<< QuicStackTrace();
return h2_deframer_.ProcessInput(static_cast<char*>(iov.iov_base),
iov.iov_len);
}
size_t QuicSpdySession::WriteHeadersOnHeadersStream(
QuicStreamId id, HttpHeaderBlock headers, bool fin,
const spdy::SpdyStreamPrecedence& precedence,
quiche::QuicheReferenceCountedPointer<QuicAckListenerInterface>
ack_listener) {
QUICHE_DCHECK(!VersionUsesHttp3(transport_version()));
return WriteHeadersOnHeadersStreamImpl(
id, std::move(headers), fin,
0,
Spdy3PriorityToHttp2Weight(precedence.spdy3_priority()),
false, std::move(ack_listener));
}
size_t QuicSpdySession::WritePriority(QuicStreamId stream_id,
QuicStreamId parent_stream_id, int weight,
bool exclusive) {
QUICHE_DCHECK(!VersionUsesHttp3(transport_version()));
SpdyPriorityIR priority_frame(stream_id, parent_stream_id, weight, exclusive);
SpdySerializedFrame frame(spdy_framer_.SerializeFrame(priority_frame));
headers_stream()->WriteOrBufferData(
absl::string_view(frame.data(), frame.size()), false, nullptr);
return frame.size();
}
void QuicSpdySession::WriteHttp3PriorityUpdate(QuicStreamId stream_id,
HttpStreamPriority priority) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
send_control_stream_->WritePriorityUpdate(stream_id, priority);
}
void QuicSpdySession::OnHttp3GoAway(uint64_t id) {
QUIC_BUG_IF(quic_bug_12477_5, !version().UsesHttp3())
<< "HTTP/3 GOAWAY received on version " << version();
if (last_received_http3_goaway_id_.has_value() &&
id > *last_received_http3_goaway_id_) {
CloseConnectionWithDetails(
QUIC_HTTP_GOAWAY_ID_LARGER_THAN_PREVIOUS,
absl::StrCat("GOAWAY received with ID ", id,
" greater than previously received ID ",
*last_received_http3_goaway_id_));
return;
}
last_received_http3_goaway_id_ = id;
if (perspective() == Perspective::IS_SERVER) {
return;
}
QuicStreamId stream_id = static_cast<QuicStreamId>(id);
if (!QuicUtils::IsBidirectionalStreamId(stream_id, version()) ||
IsIncomingStream(stream_id)) {
CloseConnectionWithDetails(QUIC_HTTP_GOAWAY_INVALID_STREAM_ID,
"GOAWAY with invalid stream ID");
return;
}
if (SupportsWebTransport()) {
PerformActionOnActiveStreams([](QuicStream* stream) {
if (!QuicUtils::IsBidirectionalStreamId(stream->id(),
stream->version()) ||
!QuicUtils::IsClientInitiatedStreamId(
stream->version().transport_version, stream->id())) {
return true;
}
QuicSpdyStream* spdy_stream = static_cast<QuicSpdyStream*>(stream);
WebTransportHttp3* web_transport = spdy_stream->web_transport();
if (web_transport == nullptr) {
return true;
}
web_transport->OnGoAwayReceived();
return true;
});
}
}
bool QuicSpdySession::OnStreamsBlockedFrame(
const QuicStreamsBlockedFrame& frame) {
if (!QuicSession::OnStreamsBlockedFrame(frame)) {
return false;
}
if (perspective() == Perspective::IS_SERVER &&
frame.stream_count >= QuicUtils::GetMaxStreamCount()) {
QUICHE_DCHECK_EQ(frame.stream_count, QuicUtils::GetMaxStreamCount());
SendHttp3GoAway(QUIC_PEER_GOING_AWAY, "stream count too large");
}
return true;
}
void QuicSpdySession::SendHttp3GoAway(QuicErrorCode error_code,
const std::string& reason) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
if (!IsEncryptionEstablished()) {
QUIC_CODE_COUNT(quic_h3_goaway_before_encryption_established);
connection()->CloseConnection(
error_code, reason,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return;
}
ietf_streamid_manager().StopIncreasingIncomingMaxStreams();
QuicStreamId stream_id =
QuicUtils::GetMaxClientInitiatedBidirectionalStreamId(
transport_version());
if (last_sent_http3_goaway_id_.has_value() &&
*last_sent_http3_goaway_id_ <= stream_id) {
return;
}
send_control_stream_->SendGoAway(stream_id);
last_sent_http3_goaway_id_ = stream_id;
}
void QuicSpdySession::SendInitialData() {
if (!VersionUsesHttp3(transport_version())) {
return;
}
QuicConnection::ScopedPacketFlusher flusher(connection());
send_control_stream_->MaybeSendSettingsFrame();
SendInitialDataAfterSettings();
}
bool QuicSpdySession::CheckStreamWriteBlocked(QuicStream* stream) const {
if (qpack_decoder_send_stream_ != nullptr &&
stream->id() == qpack_decoder_send_stream_->id()) {
return true;
}
return QuicSession::CheckStreamWriteBlocked(stream);
}
QpackEncoder* QuicSpdySession::qpack_encoder() {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
return qpack_encoder_.get();
}
QpackDecoder* QuicSpdySession::qpack_decoder() {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
return qpack_decoder_.get();
}
void QuicSpdySession::OnStreamCreated(QuicSpdyStream* stream) {
auto it = buffered_stream_priorities_.find(stream->id());
if (it == buffered_stream_priorities_.end()) {
return;
}
stream->SetPriority(QuicStreamPriority(it->second));
buffered_stream_priorities_.erase(it);
}
QuicSpdyStream* QuicSpdySession::GetOrCreateSpdyDataStream(
const QuicStreamId stream_id) {
QuicStream* stream = GetOrCreateStream(stream_id);
if (stream && stream->is_static()) {
QUIC_BUG(quic_bug_10360_5)
<< "GetOrCreateSpdyDataStream returns static stream " << stream_id
<< " in version " << transport_version() << "\n"
<< QuicStackTrace();
connection()->CloseConnection(
QUIC_INVALID_STREAM_ID,
absl::StrCat("stream ", stream_id, " is static"),
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return nullptr;
}
return static_cast<QuicSpdyStream*>(stream);
}
void QuicSpdySession::OnNewEncryptionKeyAvailable(
EncryptionLevel level, std::unique_ptr<QuicEncrypter> encrypter) {
QuicSession::OnNewEncryptionKeyAvailable(level, std::move(encrypter));
if (IsEncryptionEstablished()) {
SendInitialData();
}
}
bool QuicSpdySession::ShouldNegotiateWebTransport() const {
return LocallySupportedWebTransportVersions().Any();
}
WebTransportHttp3VersionSet
QuicSpdySession::LocallySupportedWebTransportVersions() const {
return WebTransportHttp3VersionSet();
}
bool QuicSpdySession::WillNegotiateWebTransport() {
return LocalHttpDatagramSupport() != HttpDatagramSupport::kNone &&
version().UsesHttp3() && ShouldNegotiateWebTransport();
}
bool QuicSpdySession::ShouldKeepConnectionAlive() const {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()) ||
0u == pending_streams_size());
return GetNumActiveStreams() + pending_streams_size() > 0;
}
bool QuicSpdySession::UsesPendingStreamForFrame(QuicFrameType type,
QuicStreamId stream_id) const {
return VersionUsesHttp3(transport_version()) &&
(type == STREAM_FRAME || type == RST_STREAM_FRAME) &&
QuicUtils::GetStreamType(stream_id, perspective(),
IsIncomingStream(stream_id),
version()) == READ_UNIDIRECTIONAL;
}
size_t QuicSpdySession::WriteHeadersOnHeadersStreamImpl(
QuicStreamId id, quiche::HttpHeaderBlock headers, bool fin,
QuicStreamId parent_stream_id, int weight, bool exclusive,
quiche::QuicheReferenceCountedPointer<QuicAckListenerInterface>
ack_listener) {
QUICHE_DCHECK(!VersionUsesHttp3(transport_version()));
const QuicByteCount uncompressed_size = headers.TotalBytesUsed();
SpdyHeadersIR headers_frame(id, std::move(headers));
headers_frame.set_fin(fin);
if (perspective() == Perspective::IS_CLIENT) {
headers_frame.set_has_priority(true);
headers_frame.set_parent_stream_id(parent_stream_id);
headers_frame.set_weight(weight);
headers_frame.set_exclusive(exclusive);
}
SpdySerializedFrame frame(spdy_framer_.SerializeFrame(headers_frame));
headers_stream()->WriteOrBufferData(
absl::string_view(frame.data(), frame.size()), false,
std::move(ack_listener));
QuicByteCount compressed_size = frame.size();
compressed_size -= spdy::kFrameHeaderSize;
if (perspective() == Perspective::IS_CLIENT) {
compressed_size -= 5;
}
LogHeaderCompressionRatioHistogram(
false,
true, compressed_size, uncompressed_size);
return frame.size();
}
bool QuicSpdySession::ResumeApplicationState(ApplicationState* cached_state) {
QUICHE_DCHECK_EQ(perspective(), Perspective::IS_CLIENT);
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
SettingsFrame out;
if (!HttpDecoder::DecodeSettings(
reinterpret_cast<char*>(cached_state->data()), cached_state->size(),
&out)) {
return false;
}
if (debug_visitor_ != nullptr) {
debug_visitor_->OnSettingsFrameResumed(out);
}
QUICHE_DCHECK(streams_waiting_for_settings_.empty());
for (const auto& setting : out.values) {
OnSetting(setting.first, setting.second);
}
return true;
}
std::optional<std::string> QuicSpdySession::OnAlpsData(const uint8_t* alps_data,
size_t alps_length) {
AlpsFrameDecoder alps_frame_decoder(this);
HttpDecoder decoder(&alps_frame_decoder);
decoder.ProcessInput(reinterpret_cast<const char*>(alps_data), alps_length);
if (alps_frame_decoder.error_detail()) {
return alps_frame_decoder.error_detail();
}
if (decoder.error() != QUIC_NO_ERROR) {
return decoder.error_detail();
}
if (!decoder.AtFrameBoundary()) {
return "incomplete HTTP/3 frame";
}
return std::nullopt;
}
void QuicSpdySession::OnAcceptChFrameReceivedViaAlps(
const AcceptChFrame& frame) {
if (debug_visitor_) {
debug_visitor_->OnAcceptChFrameReceivedViaAlps(frame);
}
}
bool QuicSpdySession::OnSettingsFrame(const SettingsFrame& frame) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
if (debug_visitor_ != nullptr) {
debug_visitor_->OnSettingsFrameReceived(frame);
}
for (const auto& setting : frame.values) {
if (!OnSetting(setting.first, setting.second)) {
return false;
}
}
if (!ValidateWebTransportSettingsConsistency()) {
return false;
}
QUICHE_DCHECK(!settings_received_);
settings_received_ = true;
for (QuicStreamId stream_id : streams_waiting_for_settings_) {
QUICHE_RELOADABLE_FLAG_COUNT_N(quic_block_until_settings_received_copt, 4,
4);
QUICHE_DCHECK(ShouldBufferRequestsUntilSettings());
QuicSpdyStream* stream = GetOrCreateSpdyDataStream(stream_id);
if (stream == nullptr) {
continue;
}
stream->OnDataAvailable();
}
streams_waiting_for_settings_.clear();
return true;
}
bool QuicSpdySession::ValidateWebTransportSettingsConsistency() {
std::optional<WebTransportHttp3Version> version =
NegotiatedWebTransportVersion();
if (!version.has_value() || *version == WebTransportHttp3Version::kDraft02) {
return true;
}
if (!allow_extended_connect_) {
CloseConnectionWithDetails(
QUIC_HTTP_INVALID_SETTING_VALUE,
"Negotiated use of WebTransport over HTTP/3 (draft-07 or later), but "
"failed to negotiate extended CONNECT");
return false;
}
if (http_datagram_support_ == HttpDatagramSupport::kDraft04) {
CloseConnectionWithDetails(
QUIC_HTTP_INVALID_SETTING_VALUE,
"WebTransport over HTTP/3 version draft-07 and beyond requires the "
"RFC version of HTTP datagrams");
return false;
}
if (http_datagram_support_ != HttpDatagramSupport::kRfc) {
CloseConnectionWithDetails(
QUIC_HTTP_INVALID_SETTING_VALUE,
"WebTransport over HTTP/3 requires HTTP datagrams support");
return false;
}
return true;
}
std::optional<std::string> QuicSpdySession::OnSettingsFrameViaAlps(
const SettingsFrame& frame) {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
if (debug_visitor_ != nullptr) {
debug_visitor_->OnSettingsFrameReceivedViaAlps(frame);
}
for (const auto& setting : frame.values) {
if (!OnSetting(setting.first, setting.second)) {
return "error parsing setting";
}
}
return std::nullopt;
}
bool QuicSpdySession::VerifySettingIsZeroOrOne(uint64_t id, uint64_t value) {
if (value == 0 || value == 1) {
return true;
}
std::string error_details = absl::StrCat(
"Received ",
H3SettingsToString(static_cast<Http3AndQpackSettingsIdentifiers>(id)),
" with invalid value ", value);
QUIC_PEER_BUG(bad received setting) << ENDPOINT << error_details;
CloseConnectionWithDetails(QUIC_HTTP_INVALID_SETTING_VALUE, error_details);
return false;
}
bool QuicSpdySession::OnSetting(uint64_t id, uint64_t value) {
if (VersionUsesHttp3(transport_version())) {
switch (id) {
case SETTINGS_QPACK_MAX_TABLE_CAPACITY: {
QUIC_DVLOG(1)
<< ENDPOINT
<< "SETTINGS_QPACK_MAX_TABLE_CAPACITY received with value "
<< value;
if (!qpack_encoder_->SetMaximumDynamicTableCapacity(value)) {
CloseConnectionWithDetails(
was_zero_rtt_rejected()
? QUIC_HTTP_ZERO_RTT_REJECTION_SETTINGS_MISMATCH
: QUIC_HTTP_ZERO_RTT_RESUMPTION_SETTINGS_MISMATCH,
absl::StrCat(was_zero_rtt_rejected()
? "Server rejected 0-RTT, aborting because "
: "",
"Server sent an SETTINGS_QPACK_MAX_TABLE_CAPACITY: ",
value, " while current value is: ",
qpack_encoder_->MaximumDynamicTableCapacity()));
return false;
}
qpack_encoder_->SetDynamicTableCapacity(
std::min(value, qpack_maximum_dynamic_table_capacity_));
break;
}
case SETTINGS_MAX_FIELD_SECTION_SIZE:
QUIC_DVLOG(1) << ENDPOINT
<< "SETTINGS_MAX_FIELD_SECTION_SIZE received with value "
<< value;
if (max_outbound_header_list_size_ !=
std::numeric_limits<size_t>::max() &&
max_outbound_header_list_size_ > value) {
CloseConnectionWithDetails(
was_zero_rtt_rejected()
? QUIC_HTTP_ZERO_RTT_REJECTION_SETTINGS_MISMATCH
: QUIC_HTTP_ZERO_RTT_RESUMPTION_SETTINGS_MISMATCH,
absl::StrCat(was_zero_rtt_rejected()
? "Server rejected 0-RTT, aborting because "
: "",
"Server sent an SETTINGS_MAX_FIELD_SECTION_SIZE: ",
value, " which reduces current value: ",
max_outbound_header_list_size_));
return false;
}
max_outbound_header_list_size_ = value;
break;
case SETTINGS_QPACK_BLOCKED_STREAMS: {
QUIC_DVLOG(1) << ENDPOINT
<< "SETTINGS_QPACK_BLOCKED_STREAMS received with value "
<< value;
if (!qpack_encoder_->SetMaximumBlockedStreams(value)) {
CloseConnectionWithDetails(
was_zero_rtt_rejected()
? QUIC_HTTP_ZERO_RTT_REJECTION_SETTINGS_MISMATCH
: QUIC_HTTP_ZERO_RTT_RESUMPTION_SETTINGS_MISMATCH,
absl::StrCat(was_zero_rtt_rejected()
? "Server rejected 0-RTT, aborting because "
: "",
"Server sent an SETTINGS_QPACK_BLOCKED_STREAMS: ",
value, " which reduces current value: ",
qpack_encoder_->maximum_blocked_streams()));
return false;
}
break;
}
case SETTINGS_ENABLE_CONNECT_PROTOCOL: {
QUIC_DVLOG(1) << ENDPOINT
<< "SETTINGS_ENABLE_CONNECT_PROTOCOL received with value "
<< value;
if (!VerifySettingIsZeroOrOne(id, value)) {
return false;
}
if (perspective() == Perspective::IS_CLIENT) {
allow_extended_connect_ = value != 0;
}
break;
}
case spdy::SETTINGS_ENABLE_PUSH:
ABSL_FALLTHROUGH_INTENDED;
case spdy::SETTINGS_MAX_CONCURRENT_STREAMS:
ABSL_FALLTHROUGH_INTENDED;
case spdy::SETTINGS_INITIAL_WINDOW_SIZE:
ABSL_FALLTHROUGH_INTENDED;
case spdy::SETTINGS_MAX_FRAME_SIZE:
CloseConnectionWithDetails(
QUIC_HTTP_RECEIVE_SPDY_SETTING,
absl::StrCat("received HTTP/2 specific setting in HTTP/3 session: ",
id));
return false;
case SETTINGS_H3_DATAGRAM_DRAFT04: {
HttpDatagramSupport local_http_datagram_support =
LocalHttpDatagramSupport();
if (local_http_datagram_support != HttpDatagramSupport::kDraft04 &&
local_http_datagram_support !=
HttpDatagramSupport::kRfcAndDraft04) {
break;
}
QUIC_DVLOG(1) << ENDPOINT
<< "SETTINGS_H3_DATAGRAM_DRAFT04 received with value "
<< value;
if (!version().UsesHttp3()) {
break;
}
if (!VerifySettingIsZeroOrOne(id, value)) {
return false;
}
if (value && http_datagram_support_ != HttpDatagramSupport::kRfc) {
http_datagram_support_ = HttpDatagramSupport::kDraft04;
}
break;
}
case SETTINGS_H3_DATAGRAM: {
HttpDatagramSupport local_http_datagram_support =
LocalHttpDatagramSupport();
if (local_http_datagram_support != HttpDatagramSupport::kRfc &&
local_http_datagram_support !=
HttpDatagramSupport::kRfcAndDraft04) {
break;
}
QUIC_DVLOG(1) << ENDPOINT << "SETTINGS_H3_DATAGRAM received with value "
<< value;
if (!version().UsesHttp3()) {
break;
}
if (!VerifySettingIsZeroOrOne(id, value)) {
return false;
}
if (value) {
http_datagram_support_ = HttpDatagramSupport::kRfc;
}
break;
}
case SETTINGS_WEBTRANS_DRAFT00:
if (!WillNegotiateWebTransport()) {
break;
}
QUIC_DVLOG(1) << ENDPOINT
<< "SETTINGS_ENABLE_WEBTRANSPORT(02) received with value "
<< value;
if (!VerifySettingIsZeroOrOne(id, value)) {
return false;
}
if (value == 1) {
peer_web_transport_versions_.Set(WebTransportHttp3Version::kDraft02);
if (perspective() == Perspective::IS_CLIENT) {
allow_extended_connect_ = true;
}
}
break;
case SETTINGS_WEBTRANS_MAX_SESSIONS_DRAFT07:
if (!WillNegotiateWebTransport()) {
break;
}
QUIC_DVLOG(1)
<< ENDPOINT
<< "SETTINGS_WEBTRANS_MAX_SESSIONS_DRAFT07 received with value "
<< value;
if (value > 0) {
peer_web_transport_versions_.Set(WebTransportHttp3Version::kDraft07);
if (perspective() == Perspective::IS_CLIENT) {
max_webtransport_sessions_[WebTransportHttp3Version::kDraft07] =
value;
}
}
break;
default:
QUIC_DVLOG(1) << ENDPOINT << "Unknown setting identifier " << id
<< " received with value " << value;
break;
}
return true;
}
switch (id) {
case spdy::SETTINGS_HEADER_TABLE_SIZE:
QUIC_DVLOG(1) << ENDPOINT
<< "SETTINGS_HEADER_TABLE_SIZE received with value "
<< value;
spdy_framer_.UpdateHeaderEncoderTableSize(
std::min<uint64_t>(value, kHpackEncoderDynamicTableSizeLimit));
break;
case spdy::SETTINGS_ENABLE_PUSH:
if (perspective() == Perspective::IS_SERVER) {
if (value > 1) {
QUIC_DLOG(ERROR) << ENDPOINT << "Invalid value " << value
<< " received for SETTINGS_ENABLE_PUSH.";
if (IsConnected()) {
CloseConnectionWithDetails(
QUIC_INVALID_HEADERS_STREAM_DATA,
absl::StrCat("Invalid value for SETTINGS_ENABLE_PUSH: ",
value));
}
return true;
}
QUIC_DVLOG(1) << ENDPOINT << "SETTINGS_ENABLE_PUSH received with value "
<< value << ", ignoring.";
break;
} else {
QUIC_DLOG(ERROR)
<< ENDPOINT
<< "Invalid SETTINGS_ENABLE_PUSH received by client with value "
<< value;
if (IsConnected()) {
CloseConnectionWithDetails(
QUIC_INVALID_HEADERS_STREAM_DATA,
absl::StrCat("Unsupported field of HTTP/2 SETTINGS frame: ", id));
}
}
break;
case spdy::SETTINGS_MAX_HEADER_LIST_SIZE:
QUIC_DVLOG(1) << ENDPOINT
<< "SETTINGS_MAX_HEADER_LIST_SIZE received with value "
<< value;
max_outbound_header_list_size_ = value;
break;
default:
QUIC_DLOG(ERROR) << ENDPOINT << "Unknown setting identifier " << id
<< " received with value " << value;
if (IsConnected()) {
CloseConnectionWithDetails(
QUIC_INVALID_HEADERS_STREAM_DATA,
absl::StrCat("Unsupported field of HTTP/2 SETTINGS frame: ", id));
}
}
return true;
}
bool QuicSpdySession::ShouldReleaseHeadersStreamSequencerBuffer() {
return false;
}
void QuicSpdySession::OnHeaders(SpdyStreamId stream_id, bool has_priority,
const spdy::SpdyStreamPrecedence& precedence,
bool fin) {
if (has_priority) {
if (perspective() == Perspective::IS_CLIENT) {
CloseConnectionWithDetails(QUIC_INVALID_HEADERS_STREAM_DATA,
"Server must not send priorities.");
return;
}
OnStreamHeadersPriority(stream_id, precedence);
} else {
if (perspective() == Perspective::IS_SERVER) {
CloseConnectionWithDetails(QUIC_INVALID_HEADERS_STREAM_DATA,
"Client must send priorities.");
return;
}
}
QUICHE_DCHECK_EQ(QuicUtils::GetInvalidStreamId(transport_version()),
stream_id_);
stream_id_ = stream_id;
fin_ = fin;
}
void QuicSpdySession::OnPriority(SpdyStreamId stream_id,
const spdy::SpdyStreamPrecedence& precedence) {
if (perspective() == Perspective::IS_CLIENT) {
CloseConnectionWithDetails(QUIC_INVALID_HEADERS_STREAM_DATA,
"Server must not send PRIORITY frames.");
return;
}
OnPriorityFrame(stream_id, precedence);
}
void QuicSpdySession::OnHeaderList(const QuicHeaderList& header_list) {
QUIC_DVLOG(1) << ENDPOINT << "Received header list for stream " << stream_id_
<< ": " << header_list.DebugString();
QUICHE_DCHECK(!VersionUsesHttp3(transport_version()));
OnStreamHeaderList(stream_id_, fin_, frame_len_, header_list);
stream_id_ = QuicUtils::GetInvalidStreamId(transport_version());
fin_ = false;
frame_len_ = 0;
}
void QuicSpdySession::OnCompressedFrameSize(size_t frame_len) {
frame_len_ += frame_len;
}
void QuicSpdySession::CloseConnectionWithDetails(QuicErrorCode error,
const std::string& details) {
connection()->CloseConnection(
error, details, ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
bool QuicSpdySession::HasActiveRequestStreams() const {
return GetNumActiveStreams() + num_draining_streams() > 0;
}
QuicStream* QuicSpdySession::ProcessReadUnidirectionalPendingStream(
PendingStream* pending) {
struct iovec iov;
if (!pending->sequencer()->GetReadableRegion(&iov)) {
return nullptr;
}
QuicDataReader reader(static_cast<char*>(iov.iov_base), iov.iov_len);
uint8_t stream_type_length = reader.PeekVarInt62Length();
uint64_t stream_type = 0;
if (!reader.ReadVarInt62(&stream_type)) {
if (pending->sequencer()->NumBytesBuffered() ==
pending->sequencer()->close_offset()) {
pending->MarkConsumed(pending->sequencer()->close_offset());
}
return nullptr;
}
pending->MarkConsumed(stream_type_length);
switch (stream_type) {
case kControlStream: {
if (receive_control_stream_) {
CloseConnectionOnDuplicateHttp3UnidirectionalStreams("Control");
return nullptr;
}
auto receive_stream =
std::make_unique<QuicReceiveControlStream>(pending, this);
receive_control_stream_ = receive_stream.get();
ActivateStream(std::move(receive_stream));
QUIC_DVLOG(1) << ENDPOINT << "Receive Control stream is created";
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPeerControlStreamCreated(
receive_control_stream_->id());
}
return receive_control_stream_;
}
case kServerPushStream: {
CloseConnectionWithDetails(QUIC_HTTP_RECEIVE_SERVER_PUSH,
"Received server push stream");
return nullptr;
}
case kQpackEncoderStream: {
if (qpack_encoder_receive_stream_) {
CloseConnectionOnDuplicateHttp3UnidirectionalStreams("QPACK encoder");
return nullptr;
}
auto encoder_receive = std::make_unique<QpackReceiveStream>(
pending, this, qpack_decoder_->encoder_stream_receiver());
qpack_encoder_receive_stream_ = encoder_receive.get();
ActivateStream(std::move(encoder_receive));
QUIC_DVLOG(1) << ENDPOINT << "Receive QPACK Encoder stream is created";
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPeerQpackEncoderStreamCreated(
qpack_encoder_receive_stream_->id());
}
return qpack_encoder_receive_stream_;
}
case kQpackDecoderStream: {
if (qpack_decoder_receive_stream_) {
CloseConnectionOnDuplicateHttp3UnidirectionalStreams("QPACK decoder");
return nullptr;
}
auto decoder_receive = std::make_unique<QpackReceiveStream>(
pending, this, qpack_encoder_->decoder_stream_receiver());
qpack_decoder_receive_stream_ = decoder_receive.get();
ActivateStream(std::move(decoder_receive));
QUIC_DVLOG(1) << ENDPOINT << "Receive QPACK Decoder stream is created";
if (debug_visitor_ != nullptr) {
debug_visitor_->OnPeerQpackDecoderStreamCreated(
qpack_decoder_receive_stream_->id());
}
return qpack_decoder_receive_stream_;
}
case kWebTransportUnidirectionalStream: {
if (!WillNegotiateWebTransport()) {
break;
}
QUIC_DVLOG(1) << ENDPOINT << "Created an incoming WebTransport stream "
<< pending->id();
auto stream_owned =
std::make_unique<WebTransportHttp3UnidirectionalStream>(pending,
this);
WebTransportHttp3UnidirectionalStream* stream = stream_owned.get();
ActivateStream(std::move(stream_owned));
return stream;
}
default:
break;
}
MaybeSendStopSendingFrame(
pending->id(),
QuicResetStreamError::FromInternal(QUIC_STREAM_STREAM_CREATION_ERROR));
pending->StopReading();
return nullptr;
}
void QuicSpdySession::MaybeInitializeHttp3UnidirectionalStreams() {
QUICHE_DCHECK(VersionUsesHttp3(transport_version()));
if (!send_control_stream_ && CanOpenNextOutgoingUnidirectionalStream()) {
auto send_control = std::make_unique<QuicSendControlStream>(
GetNextOutgoingUnidirectionalStreamId(), this, settings_);
send_control_stream_ = send_control.get();
ActivateStream(std::move(send_control));
if (debug_visitor_) {
debug_visitor_->OnControlStreamCreated(send_control_stream_->id());
}
}
if (!qpack_decoder_send_stream_ &&
CanOpenNextOutgoingUnidirectionalStream()) {
auto decoder_send = std::make_unique<QpackSendStream>(
GetNextOutgoingUnidirectionalStreamId(), this, kQpackDecoderStream);
qpack_decoder_send_stream_ = decoder_send.get();
ActivateStream(std::move(decoder_send));
qpack_decoder_->set_qpack_stream_sender_delegate(
qpack_decoder_send_stream_);
if (debug_visitor_) {
debug_visitor_->OnQpackDecoderStreamCreated(
qpack_decoder_send_stream_->id());
}
}
if (!qpack_encoder_send_stream_ &&
CanOpenNextOutgoingUnidirectionalStream()) {
auto encoder_send = std::make_unique<QpackSendStream>(
GetNextOutgoingUnidirectionalStreamId(), this, kQpackEncoderStream);
qpack_encoder_send_stream_ = encoder_send.get();
ActivateStream(std::move(encoder_send));
qpack_encoder_->set_qpack_stream_sender_delegate(
qpack_encoder_send_stream_);
if (debug_visitor_) {
debug_visitor_->OnQpackEncoderStreamCreated(
qpack_encoder_send_stream_->id());
}
}
}
void QuicSpdySession::BeforeConnectionCloseSent() {
if (!VersionUsesHttp3(transport_version()) || !IsEncryptionEstablished()) {
return;
}
QUICHE_DCHECK_EQ(perspective(), Perspective::IS_SERVER);
QuicStreamId stream_id =
GetLargestPeerCreatedStreamId( false);
if (stream_id == QuicUtils::GetInvalidStreamId(transport_version())) {
stream_id = 0;
} else {
stream_id += QuicUtils::StreamIdDelta(transport_version());
}
if (last_sent_http3_goaway_id_.has_value() &&
*last_sent_http3_goaway_id_ <= stream_id) {
return;
}
send_control_stream_->SendGoAway(stream_id);
last_sent_http3_goaway_id_ = stream_id;
}
void QuicSpdySession::MaybeBundleOpportunistically() {
if (qpack_decoder_ != nullptr) {
qpack_decoder_->FlushDecoderStream();
}
}
void QuicSpdySession::OnCanCreateNewOutgoingStream(bool unidirectional) {
if (unidirectional && VersionUsesHttp3(transport_version())) {
MaybeInitializeHttp3UnidirectionalStreams();
}
}
bool QuicSpdySession::goaway_received() const {
return VersionUsesHttp3(transport_version())
? last_received_http3_goaway_id_.has_value()
: transport_goaway_received();
}
bool QuicSpdySession::goaway_sent() const {
return VersionUsesHttp3(transport_version())
? last_sent_http3_goaway_id_.has_value()
: transport_goaway_sent();
}
void QuicSpdySession::CloseConnectionOnDuplicateHttp3UnidirectionalStreams(
absl::string_view type) {
QUIC_PEER_BUG(quic_peer_bug_10360_9) << absl::StrCat(
"Received a duplicate ", type, " stream: Closing connection.");
CloseConnectionWithDetails(QUIC_HTTP_DUPLICATE_UNIDIRECTIONAL_STREAM,
absl::StrCat(type, " stream is received twice."));
}
void QuicSpdySession::LogHeaderCompressionRatioHistogram(
bool using_qpack, bool is_sent, QuicByteCount compressed,
QuicByteCount uncompressed) {
if (compressed <= 0 || uncompressed <= 0) {
return;
}
int ratio = 100 * (compressed) / (uncompressed);
if (ratio < 1) {
ratio = 1;
} else if (ratio > 200) {
ratio = 200;
}
if (using_qpack) {
if (is_sent) {
QUIC_HISTOGRAM_COUNTS("QuicSession.HeaderCompressionRatioQpackSent",
ratio, 1, 200, 200,
"Header compression ratio as percentage for sent "
"headers using QPACK.");
} else {
QUIC_HISTOGRAM_COUNTS("QuicSession.HeaderCompressionRatioQpackReceived",
ratio, 1, 200, 200,
"Header compression ratio as percentage for "
"received headers using QPACK.");
}
} else {
if (is_sent) {
QUIC_HISTOGRAM_COUNTS("QuicSession.HeaderCompressionRatioHpackSent",
ratio, 1, 200, 200,
"Header compression ratio as percentage for sent "
"headers using HPACK.");
} else {
QUIC_HISTOGRAM_COUNTS("QuicSession.HeaderCompressionRatioHpackReceived",
ratio, 1, 200, 200,
"Header compression ratio as percentage for "
"received headers using HPACK.");
}
}
}
MessageStatus QuicSpdySession::SendHttp3Datagram(QuicStreamId stream_id,
absl::string_view payload) {
if (!SupportsH3Datagram()) {
if (LocalHttpDatagramSupport() == HttpDatagramSupport::kNone) {
QUIC_BUG(http datagram disabled locally)
<< "Cannot send HTTP Datagram when disabled locally";
return MESSAGE_STATUS_UNSUPPORTED;
} else if (!settings_received_) {
QUIC_DLOG(INFO)
<< "Refusing to send HTTP Datagram before SETTINGS received";
return MESSAGE_STATUS_SETTINGS_NOT_RECEIVED;
} else {
QUIC_DLOG(INFO) << "Refusing to send HTTP Datagram without peer support";
return MESSAGE_STATUS_UNSUPPORTED;
}
}
uint64_t stream_id_to_write = stream_id / kHttpDatagramStreamIdDivisor;
size_t slice_length =
QuicDataWriter::GetVarInt62Len(stream_id_to_write) + payload.length();
quiche::QuicheBuffer buffer(
connection()->helper()->GetStreamSendBufferAllocator(), slice_length);
QuicDataWriter writer(slice_length, buffer.data());
if (!writer.WriteVarInt62(stream_id_to_write)) {
QUIC_BUG(h3 datagram stream ID write fail)
<< "Failed to write HTTP/3 datagram stream ID";
return MESSAGE_STATUS_INTERNAL_ERROR;
}
if (!writer.WriteBytes(payload.data(), payload.length())) {
QUIC_BUG(h3 datagram payload write fail)
<< "Failed to write HTTP/3 datagram payload";
return MESSAGE_STATUS_INTERNAL_ERROR;
}
quiche::QuicheMemSlice slice(std::move(buffer));
return datagram_queue()->SendOrQueueDatagram(std::move(slice));
}
void QuicSpdySession::SetMaxDatagramTimeInQueueForStreamId(
QuicStreamId , QuicTime::Delta max_time_in_queue) {
datagram_queue()->SetMaxTimeInQueue(max_time_in_queue);
}
void QuicSpdySession::OnMessageReceived(absl::string_view message) {
QuicSession::OnMessageReceived(message);
if (!SupportsH3Datagram()) {
QUIC_DLOG(INFO) << "Ignoring unexpected received HTTP/3 datagram";
return;
}
QuicDataReader reader(message);
uint64_t stream_id64;
if (!reader.ReadVarInt62(&stream_id64)) {
QUIC_DLOG(ERROR) << "Failed to parse stream ID in received HTTP/3 datagram";
return;
}
if (stream_id64 >
std::numeric_limits<QuicStreamId>::max() / kHttpDatagramStreamIdDivisor) {
CloseConnectionWithDetails(
QUIC_HTTP_FRAME_ERROR,
absl::StrCat("Received HTTP Datagram with invalid quarter stream ID ",
stream_id64));
return;
}
stream_id64 *= kHttpDatagramStreamIdDivisor;
QuicStreamId stream_id = static_cast<QuicStreamId>(stream_id64);
QuicSpdyStream* stream =
static_cast<QuicSpdyStream*>(GetActiveStream(stream_id));
if (stream == nullptr) {
QUIC_DLOG(INFO) << "Received HTTP/3 datagram for unknown stream ID "
<< stream_id;
return;
}
stream->OnDatagramReceived(&reader);
}
bool QuicSpdySession::SupportsWebTransport() {
return WillNegotiateWebTransport() && SupportsH3Datagram() &&
NegotiatedWebTransportVersion().has_value() && allow_extended_connect_;
}
std::optional<WebTransportHttp3Version>
QuicSpdySession::SupportedWebTransportVersion() {
if (!SupportsWebTransport()) {
return std::nullopt;
}
return NegotiatedWebTransportVersion();
}
bool QuicSpdySession::SupportsH3Datagram() const {
return http_datagram_support_ != HttpDatagramSupport::kNone;
}
WebTransportHttp3* QuicSpdySession::GetWebTransportSession(
WebTransportSessionId id) {
if (!SupportsWebTransport()) {
return nullptr;
}
if (!IsValidWebTransportSessionId(id, version())) {
return nullptr;
}
QuicSpdyStream* connect_stream = GetOrCreateSpdyDataStream(id);
if (connect_stream == nullptr) {
return nullptr;
}
return connect_stream->web_transport();
}
bool QuicSpdySession::ShouldProcessIncomingRequests() {
if (!ShouldBufferRequestsUntilSettings()) {
return true;
}
QUICHE_RELOADABLE_FLAG_COUNT_N(quic_block_until_settings_received_copt, 2, 4);
return settings_received_;
}
void QuicSpdySession::OnStreamWaitingForClientSettings(QuicStreamId id) {
QUICHE_DCHECK(ShouldBufferRequestsUntilSettings());
QUICHE_DCHECK(QuicUtils::IsBidirectionalStreamId(id, version()));
QUICHE_RELOADABLE_FLAG_COUNT_N(quic_block_until_settings_received_copt, 3, 4);
streams_waiting_for_settings_.insert(id);
}
void QuicSpdySession::AssociateIncomingWebTransportStreamWithSession(
WebTransportSessionId session_id, QuicStreamId stream_id) {
if (QuicUtils::IsOutgoingStreamId(version(), stream_id, perspective())) {
QUIC_BUG(AssociateIncomingWebTransportStreamWithSession got outgoing stream)
<< ENDPOINT
<< "AssociateIncomingWebTransportStreamWithSession() got an outgoing "
"stream ID: "
<< stream_id;
return;
}
WebTransportHttp3* session = GetWebTransportSession(session_id);
if (session != nullptr) {
QUIC_DVLOG(1) << ENDPOINT
<< "Successfully associated incoming WebTransport stream "
<< stream_id << " with session ID " << session_id;
session->AssociateStream(stream_id);
return;
}
while (buffered_streams_.size() >= kMaxUnassociatedWebTransportStreams) {
QUIC_DVLOG(1) << ENDPOINT << "Removing stream "
<< buffered_streams_.front().stream_id
<< " from buffered streams as the queue is full.";
ResetStream(buffered_streams_.front().stream_id,
QUIC_STREAM_WEBTRANSPORT_BUFFERED_STREAMS_LIMIT_EXCEEDED);
buffered_streams_.pop_front();
}
QUIC_DVLOG(1) << ENDPOINT << "Received a WebTransport stream " << stream_id
<< " for session ID " << session_id
<< " but cannot associate it; buffering instead.";
buffered_streams_.push_back(
BufferedWebTransportStream{session_id, stream_id});
}
void QuicSpdySession::ProcessBufferedWebTransportStreamsForSession(
WebTransportHttp3* session) {
const WebTransportSessionId session_id = session->id();
QUIC_DVLOG(1) << "Processing buffered WebTransport streams for "
<< session_id;
auto it = buffered_streams_.begin();
while (it != buffered_streams_.end()) {
if (it->session_id == session_id) {
QUIC_DVLOG(1) << "Unbuffered and associated WebTransport stream "
<< it->stream_id << " with session " << it->session_id;
session->AssociateStream(it->stream_id);
it = buffered_streams_.erase(it);
} else {
it++;
}
}
}
WebTransportHttp3UnidirectionalStream*
QuicSpdySession::CreateOutgoingUnidirectionalWebTransportStream(
WebTransportHttp3* session) {
if (!CanOpenNextOutgoingUnidirectionalStream()) {
return nullptr;
}
QuicStreamId stream_id = GetNextOutgoingUnidirectionalStreamId();
auto stream_owned = std::make_unique<WebTransportHttp3UnidirectionalStream>(
stream_id, this, session->id());
WebTransportHttp3UnidirectionalStream* stream = stream_owned.get();
ActivateStream(std::move(stream_owned));
stream->WritePreamble();
session->AssociateStream(stream_id);
return stream;
}
QuicSpdyStream* QuicSpdySession::CreateOutgoingBidirectionalWebTransportStream(
WebTransportHttp3* session) {
QuicSpdyStream* stream = CreateOutgoingBidirectionalStream();
if (stream == nullptr) {
return nullptr;
}
QuicStreamId stream_id = stream->id();
stream->ConvertToWebTransportDataStream(session->id());
if (stream->web_transport_stream() == nullptr) {
return nullptr;
}
session->AssociateStream(stream_id);
return stream;
}
void QuicSpdySession::OnDatagramProcessed(
std::optional<MessageStatus> ) {
}
void QuicSpdySession::DatagramObserver::OnDatagramProcessed(
std::optional<MessageStatus> status) {
session_->OnDatagramProcessed(status);
}
HttpDatagramSupport QuicSpdySession::LocalHttpDatagramSupport() {
return HttpDatagramSupport::kRfc;
}
std::string HttpDatagramSupportToString(
HttpDatagramSupport http_datagram_support) {
switch (http_datagram_support) {
case HttpDatagramSupport::kNone:
return "None";
case HttpDatagramSupport::kDraft04:
return "Draft04";
case HttpDatagramSupport::kRfc:
return "Rfc";
case HttpDatagramSupport::kRfcAndDraft04:
return "RfcAndDraft04";
}
return absl::StrCat("Unknown(", static_cast<int>(http_datagram_support), ")");
}
std::ostream& operator<<(std::ostream& os,
const HttpDatagramSupport& http_datagram_support) {
os << HttpDatagramSupportToString(http_datagram_support);
return os;
}
void QuicSpdySession::set_allow_extended_connect(bool allow_extended_connect) {
QUIC_BUG_IF(extended connect wrong version,
!VersionUsesHttp3(transport_version()))
<< "Try to enable/disable extended CONNECT in Google QUIC";
QUIC_BUG_IF(extended connect on client,
perspective() == Perspective::IS_CLIENT)
<< "Enabling/disabling extended CONNECT on the client side has no effect";
if (ShouldNegotiateWebTransport()) {
QUIC_BUG_IF(disable extended connect, !allow_extended_connect)
<< "Disabling extended CONNECT with web transport enabled has no "
"effect.";
return;
}
allow_extended_connect_ = allow_extended_connect;
}
void QuicSpdySession::OnConfigNegotiated() {
QuicSession::OnConfigNegotiated();
if (GetQuicReloadableFlag(quic_block_until_settings_received_copt) &&
perspective() == Perspective::IS_SERVER &&
config()->HasClientSentConnectionOption(kBSUS, Perspective::IS_SERVER)) {
QUICHE_RELOADABLE_FLAG_COUNT_N(quic_block_until_settings_received_copt, 1,
4);
force_buffer_requests_until_settings_ = true;
}
}
#undef ENDPOINT
} | #include "quiche/quic/core/http/quic_spdy_session.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include "absl/base/macros.h"
#include "absl/memory/memory.h"
#include "absl/strings/escaping.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/http2/core/spdy_framer.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/frames/quic_stream_frame.h"
#include "quiche/quic/core/frames/quic_streams_blocked_frame.h"
#include "quiche/quic/core/http/http_constants.h"
#include "quiche/quic/core/http/http_encoder.h"
#include "quiche/quic/core/http/quic_header_list.h"
#include "quiche/quic/core/http/web_transport_http3.h"
#include "quiche/quic/core/qpack/qpack_header_table.h"
#include "quiche/quic/core/quic_config.h"
#include "quiche/quic/core/quic_crypto_stream.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_packets.h"
#include "quiche/quic/core/quic_stream.h"
#include "quiche/quic/core/quic_stream_priority.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_expect_bug.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/qpack/qpack_encoder_peer.h"
#include "quiche/quic/test_tools/qpack/qpack_test_utils.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_flow_controller_peer.h"
#include "quiche/quic/test_tools/quic_session_peer.h"
#include "quiche/quic/test_tools/quic_spdy_session_peer.h"
#include "quiche/quic/test_tools/quic_stream_peer.h"
#include "quiche/quic/test_tools/quic_stream_send_buffer_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
#include "quiche/common/quiche_endian.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
using quiche::HttpHeaderBlock;
using spdy::kV3HighestPriority;
using spdy::Spdy3PriorityToHttp2Weight;
using spdy::SpdyFramer;
using spdy::SpdyPriority;
using spdy::SpdyPriorityIR;
using spdy::SpdySerializedFrame;
using ::testing::_;
using ::testing::AnyNumber;
using ::testing::AtLeast;
using ::testing::ElementsAre;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrictMock;
namespace quic {
namespace test {
namespace {
bool VerifyAndClearStopSendingFrame(const QuicFrame& frame) {
EXPECT_EQ(STOP_SENDING_FRAME, frame.type);
return ClearControlFrame(frame);
}
class TestCryptoStream : public QuicCryptoStream, public QuicCryptoHandshaker {
public:
explicit TestCryptoStream(QuicSession* session)
: QuicCryptoStream(session),
QuicCryptoHandshaker(this, session),
encryption_established_(false),
one_rtt_keys_available_(false),
params_(new QuicCryptoNegotiatedParameters) {
params_->cipher_suite = 1;
}
void EstablishZeroRttEncryption() {
encryption_established_ = true;
session()->connection()->SetEncrypter(
ENCRYPTION_ZERO_RTT,
std::make_unique<TaggingEncrypter>(ENCRYPTION_ZERO_RTT));
}
void OnHandshakeMessage(const CryptoHandshakeMessage& ) override {
encryption_established_ = true;
one_rtt_keys_available_ = true;
QuicErrorCode error;
std::string error_details;
session()->config()->SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
session()->config()->SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
if (session()->version().UsesTls()) {
if (session()->perspective() == Perspective::IS_CLIENT) {
session()->config()->SetOriginalConnectionIdToSend(
session()->connection()->connection_id());
session()->config()->SetInitialSourceConnectionIdToSend(
session()->connection()->connection_id());
} else {
session()->config()->SetInitialSourceConnectionIdToSend(
session()->connection()->client_connection_id());
}
TransportParameters transport_parameters;
EXPECT_TRUE(
session()->config()->FillTransportParameters(&transport_parameters));
error = session()->config()->ProcessTransportParameters(
transport_parameters, false, &error_details);
} else {
CryptoHandshakeMessage msg;
session()->config()->ToHandshakeMessage(&msg, transport_version());
error =
session()->config()->ProcessPeerHello(msg, CLIENT, &error_details);
}
EXPECT_THAT(error, IsQuicNoError());
session()->OnNewEncryptionKeyAvailable(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
session()->OnConfigNegotiated();
if (session()->connection()->version().handshake_protocol ==
PROTOCOL_TLS1_3) {
session()->OnTlsHandshakeComplete();
} else {
session()->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
}
session()->DiscardOldEncryptionKey(ENCRYPTION_INITIAL);
}
ssl_early_data_reason_t EarlyDataReason() const override {
return ssl_early_data_unknown;
}
bool encryption_established() const override {
return encryption_established_;
}
bool one_rtt_keys_available() const override {
return one_rtt_keys_available_;
}
HandshakeState GetHandshakeState() const override {
return one_rtt_keys_available() ? HANDSHAKE_COMPLETE : HANDSHAKE_START;
}
void SetServerApplicationStateForResumption(
std::unique_ptr<ApplicationState> ) override {}
std::unique_ptr<QuicDecrypter> AdvanceKeysAndCreateCurrentOneRttDecrypter()
override {
return nullptr;
}
std::unique_ptr<QuicEncrypter> CreateCurrentOneRttEncrypter() override {
return nullptr;
}
const QuicCryptoNegotiatedParameters& crypto_negotiated_params()
const override {
return *params_;
}
CryptoMessageParser* crypto_message_parser() override {
return QuicCryptoHandshaker::crypto_message_parser();
}
void OnPacketDecrypted(EncryptionLevel ) override {}
void OnOneRttPacketAcknowledged() override {}
void OnHandshakePacketSent() override {}
void OnHandshakeDoneReceived() override {}
void OnNewTokenReceived(absl::string_view ) override {}
std::string GetAddressToken(
const CachedNetworkParameters* ) const override {
return "";
}
bool ValidateAddressToken(absl::string_view ) const override {
return true;
}
const CachedNetworkParameters* PreviousCachedNetworkParams() const override {
return nullptr;
}
void SetPreviousCachedNetworkParams(
CachedNetworkParameters ) override {}
MOCK_METHOD(void, OnCanWrite, (), (override));
bool HasPendingCryptoRetransmission() const override { return false; }
MOCK_METHOD(bool, HasPendingRetransmission, (), (const, override));
void OnConnectionClosed(const QuicConnectionCloseFrame& ,
ConnectionCloseSource ) override {}
SSL* GetSsl() const override { return nullptr; }
bool IsCryptoFrameExpectedForEncryptionLevel(
EncryptionLevel level) const override {
return level != ENCRYPTION_ZERO_RTT;
}
EncryptionLevel GetEncryptionLevelToSendCryptoDataOfSpace(
PacketNumberSpace space) const override {
switch (space) {
case INITIAL_DATA:
return ENCRYPTION_INITIAL;
case HANDSHAKE_DATA:
return ENCRYPTION_HANDSHAKE;
case APPLICATION_DATA:
return ENCRYPTION_FORWARD_SECURE;
default:
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
}
bool ExportKeyingMaterial(absl::string_view ,
absl::string_view ,
size_t , std::string*
) override {
return false;
}
private:
using QuicCryptoStream::session;
bool encryption_established_;
bool one_rtt_keys_available_;
quiche::QuicheReferenceCountedPointer<QuicCryptoNegotiatedParameters> params_;
};
class TestHeadersStream : public QuicHeadersStream {
public:
explicit TestHeadersStream(QuicSpdySession* session)
: QuicHeadersStream(session) {}
MOCK_METHOD(void, OnCanWrite, (), (override));
};
class TestStream : public QuicSpdyStream {
public:
TestStream(QuicStreamId id, QuicSpdySession* session, StreamType type)
: QuicSpdyStream(id, session, type) {}
TestStream(PendingStream* pending, QuicSpdySession* session)
: QuicSpdyStream(pending, session) {}
using QuicStream::CloseWriteSide;
void OnBodyAvailable() override {}
MOCK_METHOD(void, OnCanWrite, (), (override));
MOCK_METHOD(bool, RetransmitStreamData,
(QuicStreamOffset, QuicByteCount, bool, TransmissionType),
(override));
MOCK_METHOD(bool, HasPendingRetransmission, (), (const, override));
protected:
bool ValidateReceivedHeaders(const QuicHeaderList& ) override {
return true;
}
};
class TestSession : public QuicSpdySession {
public:
explicit TestSession(QuicConnection* connection)
: QuicSpdySession(connection, nullptr, DefaultQuicConfig(),
CurrentSupportedVersions()),
crypto_stream_(this),
writev_consumes_all_data_(false) {
this->connection()->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<TaggingEncrypter>(ENCRYPTION_FORWARD_SECURE));
if (this->connection()->version().SupportsAntiAmplificationLimit()) {
QuicConnectionPeer::SetAddressValidated(this->connection());
}
}
~TestSession() override { DeleteConnection(); }
TestCryptoStream* GetMutableCryptoStream() override {
return &crypto_stream_;
}
const TestCryptoStream* GetCryptoStream() const override {
return &crypto_stream_;
}
TestStream* CreateOutgoingBidirectionalStream() override {
TestStream* stream = new TestStream(GetNextOutgoingBidirectionalStreamId(),
this, BIDIRECTIONAL);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
TestStream* CreateOutgoingUnidirectionalStream() override {
TestStream* stream = new TestStream(GetNextOutgoingUnidirectionalStreamId(),
this, WRITE_UNIDIRECTIONAL);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
TestStream* CreateIncomingStream(QuicStreamId id) override {
if (!VersionHasIetfQuicFrames(connection()->transport_version()) &&
stream_id_manager().num_open_incoming_streams() + 1 >
max_open_incoming_bidirectional_streams()) {
connection()->CloseConnection(
QUIC_TOO_MANY_OPEN_STREAMS, "Too many streams!",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
return nullptr;
} else {
TestStream* stream = new TestStream(
id, this,
DetermineStreamType(id, connection()->version(), perspective(),
true, BIDIRECTIONAL));
ActivateStream(absl::WrapUnique(stream));
return stream;
}
}
TestStream* CreateIncomingStream(PendingStream* pending) override {
TestStream* stream = new TestStream(pending, this);
ActivateStream(absl::WrapUnique(stream));
return stream;
}
bool ShouldCreateIncomingStream(QuicStreamId ) override { return true; }
bool ShouldCreateOutgoingBidirectionalStream() override { return true; }
bool ShouldCreateOutgoingUnidirectionalStream() override { return true; }
bool IsClosedStream(QuicStreamId id) {
return QuicSession::IsClosedStream(id);
}
QuicStream* GetOrCreateStream(QuicStreamId stream_id) {
return QuicSpdySession::GetOrCreateStream(stream_id);
}
QuicConsumedData WritevData(QuicStreamId id, size_t write_length,
QuicStreamOffset offset, StreamSendingState state,
TransmissionType type,
EncryptionLevel level) override {
bool fin = state != NO_FIN;
QuicConsumedData consumed(write_length, fin);
if (!writev_consumes_all_data_) {
consumed =
QuicSession::WritevData(id, write_length, offset, state, type, level);
}
QuicSessionPeer::GetWriteBlockedStreams(this)->UpdateBytesForStream(
id, consumed.bytes_consumed);
return consumed;
}
void set_writev_consumes_all_data(bool val) {
writev_consumes_all_data_ = val;
}
QuicConsumedData SendStreamData(QuicStream* stream) {
if (!QuicUtils::IsCryptoStreamId(connection()->transport_version(),
stream->id()) &&
connection()->encryption_level() != ENCRYPTION_FORWARD_SECURE) {
this->connection()->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
}
QuicStreamPeer::SendBuffer(stream).SaveStreamData("not empty");
QuicConsumedData consumed =
WritevData(stream->id(), 9, 0, FIN, NOT_RETRANSMISSION,
GetEncryptionLevelToSendApplicationData());
QuicStreamPeer::SendBuffer(stream).OnStreamDataConsumed(
consumed.bytes_consumed);
return consumed;
}
QuicConsumedData SendLargeFakeData(QuicStream* stream, int bytes) {
QUICHE_DCHECK(writev_consumes_all_data_);
return WritevData(stream->id(), bytes, 0, FIN, NOT_RETRANSMISSION,
GetEncryptionLevelToSendApplicationData());
}
WebTransportHttp3VersionSet LocallySupportedWebTransportVersions()
const override {
return locally_supported_web_transport_versions_;
}
void set_supports_webtransport(bool value) {
locally_supported_web_transport_versions_ =
value ? kDefaultSupportedWebTransportVersions
: WebTransportHttp3VersionSet();
}
void set_locally_supported_web_transport_versions(
WebTransportHttp3VersionSet versions) {
locally_supported_web_transport_versions_ = std::move(versions);
}
HttpDatagramSupport LocalHttpDatagramSupport() override {
return local_http_datagram_support_;
}
void set_local_http_datagram_support(HttpDatagramSupport value) {
local_http_datagram_support_ = value;
}
MOCK_METHOD(void, OnAcceptChFrame, (const AcceptChFrame&), (override));
using QuicSession::closed_streams;
using QuicSession::pending_streams_size;
using QuicSession::ShouldKeepConnectionAlive;
using QuicSpdySession::settings;
using QuicSpdySession::UsesPendingStreamForFrame;
private:
StrictMock<TestCryptoStream> crypto_stream_;
bool writev_consumes_all_data_;
WebTransportHttp3VersionSet locally_supported_web_transport_versions_;
HttpDatagramSupport local_http_datagram_support_ = HttpDatagramSupport::kNone;
};
class QuicSpdySessionTestBase : public QuicTestWithParam<ParsedQuicVersion> {
public:
bool ClearMaxStreamsControlFrame(const QuicFrame& frame) {
if (frame.type == MAX_STREAMS_FRAME) {
DeleteFrame(&const_cast<QuicFrame&>(frame));
return true;
}
return false;
}
protected:
explicit QuicSpdySessionTestBase(Perspective perspective,
bool allow_extended_connect)
: connection_(new StrictMock<MockQuicConnection>(
&helper_, &alarm_factory_, perspective,
SupportedVersions(GetParam()))),
allow_extended_connect_(allow_extended_connect) {}
void Initialize() {
session_.emplace(connection_);
if (qpack_maximum_dynamic_table_capacity_.has_value()) {
session_->set_qpack_maximum_dynamic_table_capacity(
*qpack_maximum_dynamic_table_capacity_);
}
if (connection_->perspective() == Perspective::IS_SERVER &&
VersionUsesHttp3(transport_version())) {
session_->set_allow_extended_connect(allow_extended_connect_);
}
session_->Initialize();
session_->config()->SetInitialStreamFlowControlWindowToSend(
kInitialStreamFlowControlWindowForTest);
session_->config()->SetInitialSessionFlowControlWindowToSend(
kInitialSessionFlowControlWindowForTest);
if (VersionUsesHttp3(transport_version())) {
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(
session_->config(), kHttp3StaticUnidirectionalStreamCount);
}
QuicConfigPeer::SetReceivedInitialSessionFlowControlWindow(
session_->config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesUnidirectional(
session_->config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesIncomingBidirectional(
session_->config(), kMinimumFlowControlSendWindow);
QuicConfigPeer::SetReceivedInitialMaxStreamDataBytesOutgoingBidirectional(
session_->config(), kMinimumFlowControlSendWindow);
session_->OnConfigNegotiated();
connection_->AdvanceTime(QuicTime::Delta::FromSeconds(1));
TestCryptoStream* crypto_stream = session_->GetMutableCryptoStream();
EXPECT_CALL(*crypto_stream, HasPendingRetransmission())
.Times(testing::AnyNumber());
writer_ = static_cast<MockPacketWriter*>(
QuicConnectionPeer::GetWriter(session_->connection()));
}
void CheckClosedStreams() {
QuicStreamId first_stream_id = QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
if (!QuicVersionUsesCryptoFrames(transport_version())) {
first_stream_id = QuicUtils::GetCryptoStreamId(transport_version());
}
for (QuicStreamId i = first_stream_id; i < 100; i++) {
if (closed_streams_.find(i) == closed_streams_.end()) {
EXPECT_FALSE(session_->IsClosedStream(i)) << " stream id: " << i;
} else {
EXPECT_TRUE(session_->IsClosedStream(i)) << " stream id: " << i;
}
}
}
void CloseStream(QuicStreamId id) {
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
} else {
EXPECT_CALL(*connection_, SendControlFrame(_))
.Times(2)
.WillRepeatedly(Invoke(&ClearControlFrame));
}
EXPECT_CALL(*connection_, OnStreamReset(id, _));
session_->set_writev_consumes_all_data(true);
session_->ResetStream(id, QUIC_STREAM_CANCELLED);
closed_streams_.insert(id);
}
ParsedQuicVersion version() const { return connection_->version(); }
QuicTransportVersion transport_version() const {
return connection_->transport_version();
}
QuicStreamId GetNthClientInitiatedBidirectionalId(int n) {
return GetNthClientInitiatedBidirectionalStreamId(transport_version(), n);
}
QuicStreamId GetNthServerInitiatedBidirectionalId(int n) {
return GetNthServerInitiatedBidirectionalStreamId(transport_version(), n);
}
QuicStreamId IdDelta() {
return QuicUtils::StreamIdDelta(transport_version());
}
QuicStreamId StreamCountToId(QuicStreamCount stream_count,
Perspective perspective, bool bidirectional) {
QuicStreamId id =
((stream_count - 1) * QuicUtils::StreamIdDelta(transport_version()));
if (!bidirectional) {
id |= 0x2;
}
if (perspective == Perspective::IS_SERVER) {
id |= 0x1;
}
return id;
}
void CompleteHandshake() {
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
}
if (connection_->version().UsesTls() &&
connection_->perspective() == Perspective::IS_SERVER) {
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
}
CryptoHandshakeMessage message;
session_->GetMutableCryptoStream()->OnHandshakeMessage(message);
testing::Mock::VerifyAndClearExpectations(writer_);
testing::Mock::VerifyAndClearExpectations(connection_);
}
void ReceiveWebTransportSettings(WebTransportHttp3VersionSet versions =
kDefaultSupportedWebTransportVersions) {
SettingsFrame settings;
settings.values[SETTINGS_H3_DATAGRAM] = 1;
if (versions.IsSet(WebTransportHttp3Version::kDraft02)) {
settings.values[SETTINGS_WEBTRANS_DRAFT00] = 1;
}
if (versions.IsSet(WebTransportHttp3Version::kDraft07)) {
settings.values[SETTINGS_WEBTRANS_MAX_SESSIONS_DRAFT07] = 16;
}
settings.values[SETTINGS_ENABLE_CONNECT_PROTOCOL] = 1;
std::string data = std::string(1, kControlStream) +
HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamId control_stream_id =
session_->perspective() == Perspective::IS_SERVER
? GetNthClientInitiatedUnidirectionalStreamId(transport_version(),
3)
: GetNthServerInitiatedUnidirectionalStreamId(transport_version(),
3);
QuicStreamFrame frame(control_stream_id, false, 0, data);
session_->OnStreamFrame(frame);
}
void ReceiveWebTransportSession(WebTransportSessionId session_id) {
QuicStreamFrame frame(session_id, false, 0,
absl::string_view());
session_->OnStreamFrame(frame);
QuicSpdyStream* stream =
static_cast<QuicSpdyStream*>(session_->GetOrCreateStream(session_id));
QuicHeaderList headers;
headers.OnHeader(":method", "CONNECT");
headers.OnHeader(":protocol", "webtransport");
stream->OnStreamHeaderList(true, 0, headers);
WebTransportHttp3* web_transport =
session_->GetWebTransportSession(session_id);
ASSERT_TRUE(web_transport != nullptr);
quiche::HttpHeaderBlock header_block;
web_transport->HeadersReceived(header_block);
}
void ReceiveWebTransportUnidirectionalStream(WebTransportSessionId session_id,
QuicStreamId stream_id) {
char buffer[256];
QuicDataWriter data_writer(sizeof(buffer), buffer);
ASSERT_TRUE(data_writer.WriteVarInt62(kWebTransportUnidirectionalStream));
ASSERT_TRUE(data_writer.WriteVarInt62(session_id));
ASSERT_TRUE(data_writer.WriteStringPiece("test data"));
std::string data(buffer, data_writer.length());
QuicStreamFrame frame(stream_id, false, 0, data);
session_->OnStreamFrame(frame);
}
void TestHttpDatagramSetting(HttpDatagramSupport local_support,
HttpDatagramSupport remote_support,
HttpDatagramSupport expected_support,
bool expected_datagram_supported);
MockQuicConnectionHelper helper_;
MockAlarmFactory alarm_factory_;
StrictMock<MockQuicConnection>* connection_;
bool allow_extended_connect_;
std::optional<TestSession> session_;
std::set<QuicStreamId> closed_streams_;
std::optional<uint64_t> qpack_maximum_dynamic_table_capacity_;
MockPacketWriter* writer_;
};
class QuicSpdySessionTestServer : public QuicSpdySessionTestBase {
protected:
QuicSpdySessionTestServer()
: QuicSpdySessionTestBase(Perspective::IS_SERVER, true) {}
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSpdySessionTestServer,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicSpdySessionTestServer, UsesPendingStreamsForFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
EXPECT_TRUE(session_->UsesPendingStreamForFrame(
STREAM_FRAME, QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT)));
EXPECT_TRUE(session_->UsesPendingStreamForFrame(
RST_STREAM_FRAME, QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT)));
EXPECT_FALSE(session_->UsesPendingStreamForFrame(
RST_STREAM_FRAME, QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_SERVER)));
EXPECT_FALSE(session_->UsesPendingStreamForFrame(
STOP_SENDING_FRAME, QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT)));
EXPECT_FALSE(session_->UsesPendingStreamForFrame(
RST_STREAM_FRAME, QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT)));
}
TEST_P(QuicSpdySessionTestServer, PeerAddress) {
Initialize();
EXPECT_EQ(QuicSocketAddress(QuicIpAddress::Loopback4(), kTestPort),
session_->peer_address());
}
TEST_P(QuicSpdySessionTestServer, SelfAddress) {
Initialize();
EXPECT_TRUE(session_->self_address().IsInitialized());
}
TEST_P(QuicSpdySessionTestServer, OneRttKeysAvailable) {
Initialize();
EXPECT_FALSE(session_->OneRttKeysAvailable());
CompleteHandshake();
EXPECT_TRUE(session_->OneRttKeysAvailable());
}
TEST_P(QuicSpdySessionTestServer, IsClosedStreamDefault) {
Initialize();
QuicStreamId first_stream_id = QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
if (!QuicVersionUsesCryptoFrames(transport_version())) {
first_stream_id = QuicUtils::GetCryptoStreamId(transport_version());
}
for (QuicStreamId i = first_stream_id; i < 100; i++) {
EXPECT_FALSE(session_->IsClosedStream(i)) << "stream id: " << i;
}
}
TEST_P(QuicSpdySessionTestServer, AvailableStreams) {
Initialize();
ASSERT_TRUE(session_->GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(2)) != nullptr);
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&*session_, GetNthClientInitiatedBidirectionalId(0)));
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&*session_, GetNthClientInitiatedBidirectionalId(1)));
ASSERT_TRUE(session_->GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(1)) != nullptr);
ASSERT_TRUE(session_->GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(0)) != nullptr);
}
TEST_P(QuicSpdySessionTestServer, IsClosedStreamLocallyCreated) {
Initialize();
CompleteHandshake();
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
EXPECT_EQ(GetNthServerInitiatedBidirectionalId(0), stream2->id());
QuicSpdyStream* stream4 = session_->CreateOutgoingBidirectionalStream();
EXPECT_EQ(GetNthServerInitiatedBidirectionalId(1), stream4->id());
CheckClosedStreams();
CloseStream(GetNthServerInitiatedBidirectionalId(0));
CheckClosedStreams();
CloseStream(GetNthServerInitiatedBidirectionalId(1));
CheckClosedStreams();
}
TEST_P(QuicSpdySessionTestServer, IsClosedStreamPeerCreated) {
Initialize();
CompleteHandshake();
QuicStreamId stream_id1 = GetNthClientInitiatedBidirectionalId(0);
QuicStreamId stream_id2 = GetNthClientInitiatedBidirectionalId(1);
session_->GetOrCreateStream(stream_id1);
session_->GetOrCreateStream(stream_id2);
CheckClosedStreams();
CloseStream(stream_id1);
CheckClosedStreams();
CloseStream(stream_id2);
QuicStream* stream3 = session_->GetOrCreateStream(stream_id2 + 4);
CheckClosedStreams();
CloseStream(stream3->id());
CheckClosedStreams();
}
TEST_P(QuicSpdySessionTestServer, MaximumAvailableOpenedStreams) {
Initialize();
if (VersionHasIetfQuicFrames(transport_version())) {
QuicStreamId stream_id = StreamCountToId(
QuicSessionPeer::ietf_streamid_manager(&*session_)
->max_incoming_bidirectional_streams(),
Perspective::IS_CLIENT,
true);
EXPECT_NE(nullptr, session_->GetOrCreateStream(stream_id));
stream_id =
StreamCountToId(QuicSessionPeer::ietf_streamid_manager(&*session_)
->max_incoming_unidirectional_streams(),
Perspective::IS_CLIENT,
false);
EXPECT_NE(nullptr, session_->GetOrCreateStream(stream_id));
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(2);
stream_id =
StreamCountToId(QuicSessionPeer::ietf_streamid_manager(&*session_)
->max_incoming_bidirectional_streams() +
1,
Perspective::IS_CLIENT,
true);
EXPECT_EQ(nullptr, session_->GetOrCreateStream(stream_id));
stream_id =
StreamCountToId(QuicSessionPeer::ietf_streamid_manager(&*session_)
->max_incoming_unidirectional_streams() +
1,
Perspective::IS_CLIENT,
false);
EXPECT_EQ(nullptr, session_->GetOrCreateStream(stream_id));
} else {
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
session_->GetOrCreateStream(stream_id);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_NE(
nullptr,
session_->GetOrCreateStream(
stream_id +
IdDelta() *
(session_->max_open_incoming_bidirectional_streams() - 1)));
}
}
TEST_P(QuicSpdySessionTestServer, TooManyAvailableStreams) {
Initialize();
QuicStreamId stream_id1 = GetNthClientInitiatedBidirectionalId(0);
QuicStreamId stream_id2;
EXPECT_NE(nullptr, session_->GetOrCreateStream(stream_id1));
stream_id2 = GetNthClientInitiatedBidirectionalId(
2 * session_->MaxAvailableBidirectionalStreams() + 4);
if (VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_STREAM_ID, _, _));
} else {
EXPECT_CALL(*connection_,
CloseConnection(QUIC_TOO_MANY_AVAILABLE_STREAMS, _, _));
}
EXPECT_EQ(nullptr, session_->GetOrCreateStream(stream_id2));
}
TEST_P(QuicSpdySessionTestServer, ManyAvailableStreams) {
Initialize();
if (VersionHasIetfQuicFrames(transport_version())) {
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(&*session_, 200);
} else {
QuicSessionPeer::SetMaxOpenIncomingStreams(&*session_, 200);
}
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
session_->GetOrCreateStream(stream_id);
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
EXPECT_NE(nullptr, session_->GetOrCreateStream(
GetNthClientInitiatedBidirectionalId(198)));
}
TEST_P(QuicSpdySessionTestServer,
DebugDFatalIfMarkingClosedStreamWriteBlocked) {
Initialize();
CompleteHandshake();
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillRepeatedly(Return(WriteResult(WRITE_STATUS_OK, 0)));
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
QuicStreamId closed_stream_id = stream2->id();
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(closed_stream_id, _));
stream2->Reset(QUIC_BAD_APPLICATION_PAYLOAD);
std::string msg =
absl::StrCat("Marking unknown stream ", closed_stream_id, " blocked.");
EXPECT_QUIC_BUG(session_->MarkConnectionLevelWriteBlocked(closed_stream_id),
msg);
}
TEST_P(QuicSpdySessionTestServer, TooLargeStreamBlocked) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(
static_cast<QuicSession*>(&*session_), QuicUtils::GetMaxStreamCount());
QuicStreamsBlockedFrame frame;
frame.stream_count = QuicUtils::GetMaxStreamCount();
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(debug_visitor, OnGoAwayFrameSent(_));
session_->OnStreamsBlockedFrame(frame);
}
TEST_P(QuicSpdySessionTestServer, OnCanWriteBundlesStreams) {
Initialize();
CompleteHandshake();
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_->connection(), send_algorithm);
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_->CreateOutgoingBidirectionalStream();
session_->MarkConnectionLevelWriteBlocked(stream2->id());
session_->MarkConnectionLevelWriteBlocked(stream6->id());
session_->MarkConnectionLevelWriteBlocked(stream4->id());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*send_algorithm, GetCongestionWindow())
.WillRepeatedly(Return(kMaxOutgoingPacketSize * 10));
EXPECT_CALL(*send_algorithm, InRecovery()).WillRepeatedly(Return(false));
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_->SendStreamData(stream2);
}));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_->SendStreamData(stream4);
}));
EXPECT_CALL(*stream6, OnCanWrite()).WillOnce(Invoke([this, stream6]() {
session_->SendStreamData(stream6);
}));
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(*send_algorithm, OnPacketSent(_, _, _, _, _));
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_->OnCanWrite();
EXPECT_FALSE(session_->WillingAndAbleToWrite());
}
TEST_P(QuicSpdySessionTestServer, OnCanWriteCongestionControlBlocks) {
Initialize();
CompleteHandshake();
session_->set_writev_consumes_all_data(true);
InSequence s;
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_->connection(), send_algorithm);
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_->CreateOutgoingBidirectionalStream();
session_->MarkConnectionLevelWriteBlocked(stream2->id());
session_->MarkConnectionLevelWriteBlocked(stream6->id());
session_->MarkConnectionLevelWriteBlocked(stream4->id());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_->SendStreamData(stream2);
}));
EXPECT_CALL(*send_algorithm, GetCongestionWindow()).Times(AnyNumber());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream6, OnCanWrite()).WillOnce(Invoke([this, stream6]() {
session_->SendStreamData(stream6);
}));
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(false));
session_->OnCanWrite();
EXPECT_TRUE(session_->WillingAndAbleToWrite());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(false));
session_->OnCanWrite();
EXPECT_TRUE(session_->WillingAndAbleToWrite());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_->SendStreamData(stream4);
}));
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_->OnCanWrite();
EXPECT_FALSE(session_->WillingAndAbleToWrite());
}
TEST_P(QuicSpdySessionTestServer, OnCanWriteWriterBlocks) {
Initialize();
CompleteHandshake();
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_->connection(), send_algorithm);
EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*writer_, IsWriteBlocked()).WillRepeatedly(Return(true));
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _)).Times(0);
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
session_->MarkConnectionLevelWriteBlocked(stream2->id());
EXPECT_CALL(*stream2, OnCanWrite()).Times(0);
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_)).Times(0);
session_->OnCanWrite();
EXPECT_TRUE(session_->WillingAndAbleToWrite());
}
TEST_P(QuicSpdySessionTestServer, BufferedHandshake) {
Initialize();
if (QuicVersionUsesCryptoFrames(transport_version())) {
return;
}
session_->set_writev_consumes_all_data(true);
EXPECT_FALSE(session_->HasPendingHandshake());
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
session_->MarkConnectionLevelWriteBlocked(stream2->id());
EXPECT_FALSE(session_->HasPendingHandshake());
TestStream* stream3 = session_->CreateOutgoingBidirectionalStream();
session_->MarkConnectionLevelWriteBlocked(stream3->id());
EXPECT_FALSE(session_->HasPendingHandshake());
session_->MarkConnectionLevelWriteBlocked(
QuicUtils::GetCryptoStreamId(transport_version()));
EXPECT_TRUE(session_->HasPendingHandshake());
TestStream* stream4 = session_->CreateOutgoingBidirectionalStream();
session_->MarkConnectionLevelWriteBlocked(stream4->id());
EXPECT_TRUE(session_->HasPendingHandshake());
InSequence s;
TestCryptoStream* crypto_stream = session_->GetMutableCryptoStream();
EXPECT_CALL(*crypto_stream, OnCanWrite());
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_->SendStreamData(stream2);
}));
EXPECT_CALL(*stream3, OnCanWrite()).WillOnce(Invoke([this, stream3]() {
session_->SendStreamData(stream3);
}));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_->SendStreamData(stream4);
session_->MarkConnectionLevelWriteBlocked(stream4->id());
}));
session_->OnCanWrite();
EXPECT_TRUE(session_->WillingAndAbleToWrite());
EXPECT_FALSE(session_->HasPendingHandshake());
}
TEST_P(QuicSpdySessionTestServer, OnCanWriteWithClosedStream) {
Initialize();
CompleteHandshake();
session_->set_writev_consumes_all_data(true);
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_->CreateOutgoingBidirectionalStream();
session_->MarkConnectionLevelWriteBlocked(stream2->id());
session_->MarkConnectionLevelWriteBlocked(stream6->id());
session_->MarkConnectionLevelWriteBlocked(stream4->id());
CloseStream(stream6->id());
InSequence s;
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*stream2, OnCanWrite()).WillOnce(Invoke([this, stream2]() {
session_->SendStreamData(stream2);
}));
EXPECT_CALL(*stream4, OnCanWrite()).WillOnce(Invoke([this, stream4]() {
session_->SendStreamData(stream4);
}));
session_->OnCanWrite();
EXPECT_FALSE(session_->WillingAndAbleToWrite());
}
TEST_P(QuicSpdySessionTestServer,
OnCanWriteLimitsNumWritesIfFlowControlBlocked) {
Initialize();
CompleteHandshake();
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_->connection(), send_algorithm);
EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(true));
QuicFlowControllerPeer::SetSendWindowOffset(session_->flow_controller(), 0);
EXPECT_TRUE(session_->flow_controller()->IsBlocked());
EXPECT_TRUE(session_->IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_->IsStreamFlowControlBlocked());
if (!QuicVersionUsesCryptoFrames(transport_version())) {
session_->MarkConnectionLevelWriteBlocked(
QuicUtils::GetCryptoStreamId(transport_version()));
}
TestStream* stream = session_->CreateOutgoingBidirectionalStream();
session_->MarkConnectionLevelWriteBlocked(stream->id());
EXPECT_CALL(*stream, OnCanWrite()).Times(0);
if (!QuicVersionUsesCryptoFrames(transport_version())) {
TestCryptoStream* crypto_stream = session_->GetMutableCryptoStream();
EXPECT_CALL(*crypto_stream, OnCanWrite());
}
if (!VersionUsesHttp3(transport_version())) {
TestHeadersStream* headers_stream;
QuicSpdySessionPeer::SetHeadersStream(&*session_, nullptr);
headers_stream = new TestHeadersStream(&*session_);
QuicSpdySessionPeer::SetHeadersStream(&*session_, headers_stream);
session_->MarkConnectionLevelWriteBlocked(
QuicUtils::GetHeadersStreamId(transport_version()));
EXPECT_CALL(*headers_stream, OnCanWrite());
}
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_->OnCanWrite();
EXPECT_FALSE(session_->WillingAndAbleToWrite());
}
TEST_P(QuicSpdySessionTestServer, SendGoAway) {
Initialize();
CompleteHandshake();
if (VersionHasIetfQuicFrames(transport_version())) {
return;
}
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallySendControlFrame));
session_->SendGoAway(QUIC_PEER_GOING_AWAY, "Going Away.");
EXPECT_TRUE(session_->goaway_sent());
const QuicStreamId kTestStreamId = 5u;
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(0);
EXPECT_CALL(*connection_,
OnStreamReset(kTestStreamId, QUIC_STREAM_PEER_GOING_AWAY))
.Times(0);
EXPECT_TRUE(session_->GetOrCreateStream(kTestStreamId));
}
TEST_P(QuicSpdySessionTestServer, SendGoAwayWithoutEncryption) {
Initialize();
if (VersionHasIetfQuicFrames(transport_version())) {
return;
}
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_PEER_GOING_AWAY, "Going Away.",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(0);
session_->SendGoAway(QUIC_PEER_GOING_AWAY, "Going Away.");
EXPECT_FALSE(session_->goaway_sent());
}
TEST_P(QuicSpdySessionTestServer, SendHttp3GoAway) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(debug_visitor, OnGoAwayFrameSent( 0xfffffffc));
session_->SendHttp3GoAway(QUIC_PEER_GOING_AWAY, "Goaway");
EXPECT_TRUE(session_->goaway_sent());
const QuicStreamId kTestStreamId =
GetNthClientInitiatedBidirectionalStreamId(transport_version(), 0);
EXPECT_CALL(*connection_, OnStreamReset(kTestStreamId, _)).Times(0);
EXPECT_TRUE(session_->GetOrCreateStream(kTestStreamId));
session_->SendHttp3GoAway(QUIC_PEER_GOING_AWAY, "Goaway");
}
TEST_P(QuicSpdySessionTestServer, SendHttp3GoAwayAndNoMoreMaxStreams) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(debug_visitor, OnGoAwayFrameSent( 0xfffffffc));
session_->SendHttp3GoAway(QUIC_PEER_GOING_AWAY, "Goaway");
EXPECT_TRUE(session_->goaway_sent());
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(0);
const QuicStreamCount max_streams =
QuicSessionPeer::ietf_streamid_manager(&*session_)
->max_incoming_bidirectional_streams();
for (QuicStreamCount i = 0; i < max_streams; ++i) {
QuicStreamId stream_id = StreamCountToId(
i + 1,
Perspective::IS_CLIENT,
true);
EXPECT_NE(nullptr, session_->GetOrCreateStream(stream_id));
CloseStream(stream_id);
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_id,
QUIC_STREAM_CANCELLED,
0);
session_->OnRstStream(rst_frame);
}
EXPECT_EQ(max_streams, QuicSessionPeer::ietf_streamid_manager(&*session_)
->max_incoming_bidirectional_streams());
}
TEST_P(QuicSpdySessionTestServer, SendHttp3GoAwayWithoutEncryption) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_PEER_GOING_AWAY, "Goaway",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_->SendHttp3GoAway(QUIC_PEER_GOING_AWAY, "Goaway");
EXPECT_FALSE(session_->goaway_sent());
}
TEST_P(QuicSpdySessionTestServer, SendHttp3GoAwayAfterStreamIsCreated) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
const QuicStreamId kTestStreamId =
GetNthClientInitiatedBidirectionalStreamId(transport_version(), 0);
EXPECT_TRUE(session_->GetOrCreateStream(kTestStreamId));
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(debug_visitor, OnGoAwayFrameSent( 0xfffffffc));
session_->SendHttp3GoAway(QUIC_PEER_GOING_AWAY, "Goaway");
EXPECT_TRUE(session_->goaway_sent());
session_->SendHttp3GoAway(QUIC_PEER_GOING_AWAY, "Goaway");
}
TEST_P(QuicSpdySessionTestServer, DoNotSendGoAwayTwice) {
Initialize();
CompleteHandshake();
if (VersionHasIetfQuicFrames(transport_version())) {
return;
}
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
session_->SendGoAway(QUIC_PEER_GOING_AWAY, "Going Away.");
EXPECT_TRUE(session_->goaway_sent());
session_->SendGoAway(QUIC_PEER_GOING_AWAY, "Going Away.");
}
TEST_P(QuicSpdySessionTestServer, InvalidGoAway) {
Initialize();
if (VersionHasIetfQuicFrames(transport_version())) {
return;
}
QuicGoAwayFrame go_away(kInvalidControlFrameId, QUIC_PEER_GOING_AWAY,
session_->next_outgoing_bidirectional_stream_id(),
"");
session_->OnGoAway(go_away);
}
TEST_P(QuicSpdySessionTestServer, Http3GoAwayLargerIdThanBefore) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
EXPECT_FALSE(session_->goaway_received());
session_->OnHttp3GoAway( 0);
EXPECT_TRUE(session_->goaway_received());
EXPECT_CALL(
*connection_,
CloseConnection(
QUIC_HTTP_GOAWAY_ID_LARGER_THAN_PREVIOUS,
"GOAWAY received with ID 1 greater than previously received ID 0",
_));
session_->OnHttp3GoAway( 1);
}
TEST_P(QuicSpdySessionTestServer, ServerReplyToConnecitivityProbe) {
Initialize();
if (VersionHasIetfQuicFrames(transport_version()) ||
GetQuicReloadableFlag(quic_ignore_gquic_probing)) {
return;
}
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
QuicSocketAddress old_peer_address =
QuicSocketAddress(QuicIpAddress::Loopback4(), kTestPort);
EXPECT_EQ(old_peer_address, session_->peer_address());
QuicSocketAddress new_peer_address =
QuicSocketAddress(QuicIpAddress::Loopback4(), kTestPort + 1);
EXPECT_CALL(*connection_,
SendConnectivityProbingPacket(nullptr, new_peer_address));
session_->OnPacketReceived(session_->self_address(), new_peer_address,
true);
EXPECT_EQ(old_peer_address, session_->peer_address());
}
TEST_P(QuicSpdySessionTestServer, IncreasedTimeoutAfterCryptoHandshake) {
Initialize();
EXPECT_EQ(kInitialIdleTimeoutSecs + 3,
QuicConnectionPeer::GetNetworkTimeout(connection_).ToSeconds());
CompleteHandshake();
EXPECT_EQ(kMaximumIdleTimeoutSecs + 3,
QuicConnectionPeer::GetNetworkTimeout(connection_).ToSeconds());
}
TEST_P(QuicSpdySessionTestServer, RstStreamBeforeHeadersDecompressed) {
Initialize();
CompleteHandshake();
QuicStreamFrame data1(GetNthClientInitiatedBidirectionalId(0), false, 0,
absl::string_view("HT"));
session_->OnStreamFrame(data1);
EXPECT_EQ(1u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_,
OnStreamReset(GetNthClientInitiatedBidirectionalId(0), _));
}
EXPECT_CALL(*connection_, SendControlFrame(_));
QuicRstStreamFrame rst1(kInvalidControlFrameId,
GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM, 0);
session_->OnRstStream(rst1);
if (VersionHasIetfQuicFrames(transport_version())) {
QuicStopSendingFrame stop_sending(kInvalidControlFrameId,
GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM);
EXPECT_CALL(*connection_,
OnStreamReset(GetNthClientInitiatedBidirectionalId(0),
QUIC_ERROR_PROCESSING_STREAM));
session_->OnStopSendingFrame(stop_sending);
}
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
EXPECT_TRUE(connection_->connected());
}
TEST_P(QuicSpdySessionTestServer, OnStreamFrameFinStaticStreamId) {
Initialize();
QuicStreamId id;
if (VersionUsesHttp3(transport_version())) {
CompleteHandshake();
id = GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
QuicStreamFrame data1(id, false, 0, absl::string_view(type, 1));
session_->OnStreamFrame(data1);
} else {
id = QuicUtils::GetHeadersStreamId(transport_version());
}
QuicStreamFrame data1(id, true, 0, absl::string_view("HT"));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_STREAM_ID, "Attempt to close a static stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_->OnStreamFrame(data1);
}
TEST_P(QuicSpdySessionTestServer, OnRstStreamStaticStreamId) {
Initialize();
QuicStreamId id;
QuicErrorCode expected_error;
std::string error_message;
if (VersionUsesHttp3(transport_version())) {
CompleteHandshake();
id = GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
QuicStreamFrame data1(id, false, 0, absl::string_view(type, 1));
session_->OnStreamFrame(data1);
expected_error = QUIC_HTTP_CLOSED_CRITICAL_STREAM;
error_message = "RESET_STREAM received for receive control stream";
} else {
id = QuicUtils::GetHeadersStreamId(transport_version());
expected_error = QUIC_INVALID_STREAM_ID;
error_message = "Attempt to reset headers stream";
}
QuicRstStreamFrame rst1(kInvalidControlFrameId, id,
QUIC_ERROR_PROCESSING_STREAM, 0);
EXPECT_CALL(
*connection_,
CloseConnection(expected_error, error_message,
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_->OnRstStream(rst1);
}
TEST_P(QuicSpdySessionTestServer, OnStreamFrameInvalidStreamId) {
Initialize();
QuicStreamFrame data1(QuicUtils::GetInvalidStreamId(transport_version()),
true, 0, absl::string_view("HT"));
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_STREAM_ID, "Received data for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_->OnStreamFrame(data1);
}
TEST_P(QuicSpdySessionTestServer, OnRstStreamInvalidStreamId) {
Initialize();
QuicRstStreamFrame rst1(kInvalidControlFrameId,
QuicUtils::GetInvalidStreamId(transport_version()),
QUIC_ERROR_PROCESSING_STREAM, 0);
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_INVALID_STREAM_ID, "Received data for an invalid stream",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_->OnRstStream(rst1);
}
TEST_P(QuicSpdySessionTestServer, HandshakeUnblocksFlowControlBlockedStream) {
Initialize();
if (connection_->version().handshake_protocol == PROTOCOL_TLS1_3) {
return;
}
session_->GetMutableCryptoStream()->EstablishZeroRttEncryption();
session_->set_writev_consumes_all_data(true);
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
std::string body(kMinimumFlowControlSendWindow, '.');
EXPECT_FALSE(stream2->IsFlowControlBlocked());
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_->IsStreamFlowControlBlocked());
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(AtLeast(1));
stream2->WriteOrBufferBody(body, false);
EXPECT_TRUE(stream2->IsFlowControlBlocked());
EXPECT_TRUE(session_->IsConnectionFlowControlBlocked());
EXPECT_TRUE(session_->IsStreamFlowControlBlocked());
CompleteHandshake();
EXPECT_TRUE(QuicSessionPeer::IsStreamWriteBlocked(&*session_, stream2->id()));
EXPECT_FALSE(stream2->IsFlowControlBlocked());
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_->IsStreamFlowControlBlocked());
}
#if !defined(OS_IOS)
TEST_P(QuicSpdySessionTestServer,
HandshakeUnblocksFlowControlBlockedHeadersStream) {
Initialize();
if (QuicVersionUsesCryptoFrames(transport_version())) {
return;
}
if (VersionUsesHttp3(transport_version())) {
return;
}
session_->GetMutableCryptoStream()->EstablishZeroRttEncryption();
session_->set_writev_consumes_all_data(true);
TestCryptoStream* crypto_stream = session_->GetMutableCryptoStream();
EXPECT_FALSE(crypto_stream->IsFlowControlBlocked());
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_->IsStreamFlowControlBlocked());
QuicHeadersStream* headers_stream =
QuicSpdySessionPeer::GetHeadersStream(&*session_);
EXPECT_FALSE(headers_stream->IsFlowControlBlocked());
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_->IsStreamFlowControlBlocked());
QuicStreamId stream_id = 5;
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
HttpHeaderBlock headers;
SimpleRandom random;
while (!headers_stream->IsFlowControlBlocked() && stream_id < 2000) {
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_->IsStreamFlowControlBlocked());
headers["header"] = absl::StrCat(random.RandUint64(), random.RandUint64(),
random.RandUint64());
session_->WriteHeadersOnHeadersStream(stream_id, headers.Clone(), true,
spdy::SpdyStreamPrecedence(0),
nullptr);
stream_id += IdDelta();
}
session_->WriteHeadersOnHeadersStream(stream_id, std::move(headers), true,
spdy::SpdyStreamPrecedence(0), nullptr);
EXPECT_TRUE(headers_stream->HasBufferedData());
EXPECT_TRUE(headers_stream->IsFlowControlBlocked());
EXPECT_FALSE(crypto_stream->IsFlowControlBlocked());
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_TRUE(session_->IsStreamFlowControlBlocked());
EXPECT_FALSE(session_->HasDataToWrite());
CompleteHandshake();
EXPECT_FALSE(headers_stream->IsFlowControlBlocked());
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_->IsStreamFlowControlBlocked());
EXPECT_TRUE(headers_stream->HasBufferedData());
EXPECT_TRUE(QuicSessionPeer::IsStreamWriteBlocked(
&*session_, QuicUtils::GetHeadersStreamId(transport_version())));
}
#endif
TEST_P(QuicSpdySessionTestServer,
ConnectionFlowControlAccountingRstOutOfOrder) {
Initialize();
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
CompleteHandshake();
TestStream* stream = session_->CreateOutgoingBidirectionalStream();
const QuicStreamOffset kByteOffset =
1 + kInitialSessionFlowControlWindowForTest / 2;
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
EXPECT_CALL(*connection_, SendControlFrame(_));
}
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream->id(),
QUIC_STREAM_CANCELLED, kByteOffset);
session_->OnRstStream(rst_frame);
if (VersionHasIetfQuicFrames(transport_version())) {
QuicStopSendingFrame stop_sending(kInvalidControlFrameId, stream->id(),
QUIC_STREAM_CANCELLED);
EXPECT_CALL(*connection_,
OnStreamReset(stream->id(), QUIC_STREAM_CANCELLED));
EXPECT_CALL(*connection_, SendControlFrame(_));
session_->OnStopSendingFrame(stop_sending);
}
EXPECT_EQ(kByteOffset, session_->flow_controller()->bytes_consumed());
}
TEST_P(QuicSpdySessionTestServer, InvalidStreamFlowControlWindowInHandshake) {
Initialize();
if (GetParam().handshake_protocol == PROTOCOL_TLS1_3) {
return;
}
const uint32_t kInvalidWindow = kMinimumFlowControlSendWindow - 1;
QuicConfigPeer::SetReceivedInitialStreamFlowControlWindow(session_->config(),
kInvalidWindow);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_FLOW_CONTROL_INVALID_WINDOW, _, _));
session_->OnConfigNegotiated();
}
TEST_P(QuicSpdySessionTestServer, TooLowUnidirectionalStreamLimitHttp3) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_->GetMutableCryptoStream()->EstablishZeroRttEncryption();
QuicConfigPeer::SetReceivedMaxUnidirectionalStreams(session_->config(), 2u);
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
EXPECT_CALL(
*connection_,
CloseConnection(
_, "new unidirectional limit 2 decreases the current limit: 3", _));
session_->OnConfigNegotiated();
}
TEST_P(QuicSpdySessionTestServer, CustomFlowControlWindow) {
Initialize();
QuicTagVector copt;
copt.push_back(kIFW7);
QuicConfigPeer::SetReceivedConnectionOptions(session_->config(), copt);
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
session_->OnConfigNegotiated();
EXPECT_EQ(192 * 1024u, QuicFlowControllerPeer::ReceiveWindowSize(
session_->flow_controller()));
}
TEST_P(QuicSpdySessionTestServer, WindowUpdateUnblocksHeadersStream) {
Initialize();
if (VersionUsesHttp3(transport_version())) {
return;
}
QuicHeadersStream* headers_stream =
QuicSpdySessionPeer::GetHeadersStream(&*session_);
QuicStreamPeer::SetSendWindowOffset(headers_stream, 0);
EXPECT_TRUE(headers_stream->IsFlowControlBlocked());
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_TRUE(session_->IsStreamFlowControlBlocked());
QuicWindowUpdateFrame window_update_frame(kInvalidControlFrameId,
headers_stream->id(),
2 * kMinimumFlowControlSendWindow);
session_->OnWindowUpdateFrame(window_update_frame);
EXPECT_FALSE(headers_stream->IsFlowControlBlocked());
EXPECT_FALSE(session_->IsConnectionFlowControlBlocked());
EXPECT_FALSE(session_->IsStreamFlowControlBlocked());
}
TEST_P(QuicSpdySessionTestServer,
TooManyUnfinishedStreamsCauseServerRejectStream) {
Initialize();
CompleteHandshake();
const QuicStreamId kMaxStreams = 5;
if (VersionHasIetfQuicFrames(transport_version())) {
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(&*session_,
kMaxStreams);
} else {
QuicSessionPeer::SetMaxOpenIncomingStreams(&*session_, kMaxStreams);
}
const QuicStreamId kFirstStreamId = GetNthClientInitiatedBidirectionalId(0);
const QuicStreamId kFinalStreamId =
GetNthClientInitiatedBidirectionalId(kMaxStreams);
const QuicStreamId kNextId = QuicUtils::StreamIdDelta(transport_version());
for (QuicStreamId i = kFirstStreamId; i < kFinalStreamId; i += kNextId) {
QuicStreamFrame data1(i, false, 0, absl::string_view("HT"));
session_->OnStreamFrame(data1);
CloseStream(i);
}
if (!VersionHasIetfQuicFrames(transport_version())) {
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(1);
EXPECT_CALL(*connection_,
OnStreamReset(kFinalStreamId, QUIC_REFUSED_STREAM))
.Times(1);
} else {
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_INVALID_STREAM_ID,
testing::MatchesRegex(
"Stream id \\d+ would exceed stream count limit 5"),
_));
}
QuicStreamFrame data1(kFinalStreamId, false, 0, absl::string_view("HT"));
session_->OnStreamFrame(data1);
}
TEST_P(QuicSpdySessionTestServer, DrainingStreamsDoNotCountAsOpened) {
Initialize();
CompleteHandshake();
if (VersionHasIetfQuicFrames(transport_version())) {
QuicSessionPeer::set_is_configured(&*session_, true);
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(1);
} else {
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(0);
}
EXPECT_CALL(*connection_, OnStreamReset(_, QUIC_REFUSED_STREAM)).Times(0);
const QuicStreamId kMaxStreams = 5;
if (VersionHasIetfQuicFrames(transport_version())) {
QuicSessionPeer::SetMaxOpenIncomingBidirectionalStreams(&*session_,
kMaxStreams);
} else {
QuicSessionPeer::SetMaxOpenIncomingStreams(&*session_, kMaxStreams);
}
const QuicStreamId kFirstStreamId = GetNthClientInitiatedBidirectionalId(0);
const QuicStreamId kFinalStreamId =
GetNthClientInitiatedBidirectionalId(kMaxStreams + 1);
for (QuicStreamId i = kFirstStreamId; i < kFinalStreamId; i += IdDelta()) {
QuicStreamFrame data1(i, true, 0, absl::string_view("HT"));
session_->OnStreamFrame(data1);
EXPECT_EQ(1u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
session_->StreamDraining(i, false);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
}
}
class QuicSpdySessionTestClient : public QuicSpdySessionTestBase {
protected:
QuicSpdySessionTestClient()
: QuicSpdySessionTestBase(Perspective::IS_CLIENT, false) {}
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSpdySessionTestClient,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicSpdySessionTestClient, UsesPendingStreamsForFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
EXPECT_TRUE(session_->UsesPendingStreamForFrame(
STREAM_FRAME, QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_SERVER)));
EXPECT_TRUE(session_->UsesPendingStreamForFrame(
RST_STREAM_FRAME, QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_SERVER)));
EXPECT_FALSE(session_->UsesPendingStreamForFrame(
RST_STREAM_FRAME, QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT)));
EXPECT_FALSE(session_->UsesPendingStreamForFrame(
STOP_SENDING_FRAME, QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_SERVER)));
EXPECT_FALSE(session_->UsesPendingStreamForFrame(
RST_STREAM_FRAME, QuicUtils::GetFirstBidirectionalStreamId(
transport_version(), Perspective::IS_SERVER)));
}
TEST_P(QuicSpdySessionTestClient, BadStreamFramePendingStream) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
QuicStreamId stream_id1 =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
QuicStreamFrame data1(stream_id1, false, 0, 0);
session_->OnStreamFrame(data1);
}
TEST_P(QuicSpdySessionTestClient, PendingStreamKeepsConnectionAlive) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_SERVER);
QuicStreamFrame frame(stream_id, false, 1, "test");
EXPECT_FALSE(session_->ShouldKeepConnectionAlive());
session_->OnStreamFrame(frame);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&*session_, stream_id));
EXPECT_TRUE(session_->ShouldKeepConnectionAlive());
}
TEST_P(QuicSpdySessionTestClient, AvailableStreamsClient) {
Initialize();
ASSERT_TRUE(session_->GetOrCreateStream(
GetNthServerInitiatedBidirectionalId(2)) != nullptr);
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&*session_, GetNthServerInitiatedBidirectionalId(0)));
EXPECT_TRUE(QuicSessionPeer::IsStreamAvailable(
&*session_, GetNthServerInitiatedBidirectionalId(1)));
ASSERT_TRUE(session_->GetOrCreateStream(
GetNthServerInitiatedBidirectionalId(0)) != nullptr);
ASSERT_TRUE(session_->GetOrCreateStream(
GetNthServerInitiatedBidirectionalId(1)) != nullptr);
EXPECT_FALSE(QuicSessionPeer::IsStreamAvailable(
&*session_, GetNthClientInitiatedBidirectionalId(0)));
}
TEST_P(QuicSpdySessionTestClient, TooLargeHeadersMustNotCauseWriteAfterReset) {
Initialize();
if (VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
TestStream* stream = session_->CreateOutgoingBidirectionalStream();
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillOnce(Return(WriteResult(WRITE_STATUS_OK, 0)));
stream->WriteHeaders(HttpHeaderBlock(), true, nullptr);
QuicHeaderList headers;
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_,
OnStreamReset(stream->id(), QUIC_HEADERS_TOO_LARGE));
stream->OnStreamHeaderList( true,
headers.uncompressed_header_bytes(), headers);
}
TEST_P(QuicSpdySessionTestClient, RecordFinAfterReadSideClosed) {
Initialize();
CompleteHandshake();
TestStream* stream = session_->CreateOutgoingBidirectionalStream();
QuicStreamId stream_id = stream->id();
QuicStreamPeer::CloseReadSide(stream);
QuicStreamFrame frame(stream_id, true, 0, absl::string_view());
session_->OnStreamFrame(frame);
EXPECT_TRUE(stream->fin_received());
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream->id(), _));
stream->Reset(QUIC_STREAM_CANCELLED);
EXPECT_TRUE(QuicStreamPeer::read_side_closed(stream));
EXPECT_TRUE(connection_->connected());
EXPECT_TRUE(QuicSessionPeer::IsStreamClosed(&*session_, stream_id));
EXPECT_FALSE(QuicSessionPeer::IsStreamCreated(&*session_, stream_id));
EXPECT_EQ(
0u,
QuicSessionPeer::GetLocallyClosedStreamsHighestOffset(&*session_).size());
}
TEST_P(QuicSpdySessionTestClient, WritePriority) {
Initialize();
if (VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
TestHeadersStream* headers_stream;
QuicSpdySessionPeer::SetHeadersStream(&*session_, nullptr);
headers_stream = new TestHeadersStream(&*session_);
QuicSpdySessionPeer::SetHeadersStream(&*session_, headers_stream);
EXPECT_CALL(*writer_, IsWriteBlocked()).WillRepeatedly(Return(true));
const QuicStreamId id = 4;
const QuicStreamId parent_stream_id = 9;
const SpdyPriority priority = kV3HighestPriority;
const bool exclusive = true;
session_->WritePriority(id, parent_stream_id,
Spdy3PriorityToHttp2Weight(priority), exclusive);
QuicStreamSendBuffer& send_buffer =
QuicStreamPeer::SendBuffer(headers_stream);
ASSERT_EQ(1u, send_buffer.size());
SpdyPriorityIR priority_frame(
id, parent_stream_id, Spdy3PriorityToHttp2Weight(priority), exclusive);
SpdyFramer spdy_framer(SpdyFramer::ENABLE_COMPRESSION);
SpdySerializedFrame frame = spdy_framer.SerializeFrame(priority_frame);
const quiche::QuicheMemSlice& slice =
QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer)->slice;
EXPECT_EQ(absl::string_view(frame.data(), frame.size()),
absl::string_view(slice.data(), slice.length()));
}
TEST_P(QuicSpdySessionTestClient, Http3ServerPush) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
std::string frame_type1;
ASSERT_TRUE(absl::HexStringToBytes("01", &frame_type1));
QuicStreamId stream_id1 =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_RECEIVE_SERVER_PUSH, _, _))
.Times(1);
session_->OnStreamFrame(QuicStreamFrame(stream_id1, false,
0, frame_type1));
}
TEST_P(QuicSpdySessionTestClient, Http3ServerPushOutofOrderFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
std::string frame_type;
ASSERT_TRUE(absl::HexStringToBytes("01", &frame_type));
std::string push_id;
ASSERT_TRUE(absl::HexStringToBytes("4000", &push_id));
QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
QuicStreamFrame data1(stream_id,
false, 0, frame_type);
QuicStreamFrame data2(stream_id,
false, frame_type.size(),
push_id);
session_->OnStreamFrame(data2);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_RECEIVE_SERVER_PUSH, _, _))
.Times(1);
session_->OnStreamFrame(data1);
}
TEST_P(QuicSpdySessionTestClient, ServerDisableQpackDynamicTable) {
SetQuicFlag(quic_server_disable_qpack_dynamic_table, true);
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
QuicStreamFrame data1(stream_id, false, 0, absl::string_view(type, 1));
session_->OnStreamFrame(data1);
EXPECT_EQ(stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
const uint64_t capacity = 512;
SettingsFrame settings;
settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = capacity;
std::string data = HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamFrame frame(stream_id, false, 1, data);
session_->OnStreamFrame(frame);
QpackEncoder* qpack_encoder = session_->qpack_encoder();
EXPECT_EQ(capacity, qpack_encoder->MaximumDynamicTableCapacity());
QpackEncoderHeaderTable* encoder_header_table =
QpackEncoderPeer::header_table(qpack_encoder);
EXPECT_EQ(capacity, encoder_header_table->dynamic_table_capacity());
EXPECT_EQ(capacity, encoder_header_table->maximum_dynamic_table_capacity());
SettingsFrame outgoing_settings = session_->settings();
EXPECT_EQ(kDefaultQpackMaxDynamicTableCapacity,
outgoing_settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY]);
}
TEST_P(QuicSpdySessionTestClient, DisableQpackDynamicTable) {
SetQuicFlag(quic_server_disable_qpack_dynamic_table, false);
qpack_maximum_dynamic_table_capacity_ = 0;
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
QuicStreamFrame data1(stream_id, false, 0, absl::string_view(type, 1));
session_->OnStreamFrame(data1);
EXPECT_EQ(stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
const uint64_t capacity = 512;
SettingsFrame settings;
settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = capacity;
std::string data = HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamFrame frame(stream_id, false, 1, data);
session_->OnStreamFrame(frame);
QpackEncoder* qpack_encoder = session_->qpack_encoder();
EXPECT_EQ(capacity, qpack_encoder->MaximumDynamicTableCapacity());
QpackEncoderHeaderTable* encoder_header_table =
QpackEncoderPeer::header_table(qpack_encoder);
EXPECT_EQ(0, encoder_header_table->dynamic_table_capacity());
EXPECT_EQ(capacity, encoder_header_table->maximum_dynamic_table_capacity());
SettingsFrame outgoing_settings = session_->settings();
EXPECT_EQ(0, outgoing_settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY]);
}
TEST_P(QuicSpdySessionTestServer, OnStreamFrameLost) {
Initialize();
CompleteHandshake();
InSequence s;
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_->connection(), send_algorithm);
TestCryptoStream* crypto_stream = session_->GetMutableCryptoStream();
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_->CreateOutgoingBidirectionalStream();
QuicStreamFrame frame2(stream2->id(), false, 0, 9);
QuicStreamFrame frame3(stream4->id(), false, 0, 9);
EXPECT_CALL(*stream4, HasPendingRetransmission()).WillOnce(Return(true));
if (!QuicVersionUsesCryptoFrames(transport_version())) {
EXPECT_CALL(*crypto_stream, HasPendingRetransmission())
.WillOnce(Return(true));
}
EXPECT_CALL(*stream2, HasPendingRetransmission()).WillOnce(Return(true));
session_->OnFrameLost(QuicFrame(frame3));
if (!QuicVersionUsesCryptoFrames(transport_version())) {
QuicStreamFrame frame1(QuicUtils::GetCryptoStreamId(transport_version()),
false, 0, 1300);
session_->OnFrameLost(QuicFrame(frame1));
} else {
QuicCryptoFrame crypto_frame(ENCRYPTION_INITIAL, 0, 1300);
session_->OnFrameLost(QuicFrame(&crypto_frame));
}
session_->OnFrameLost(QuicFrame(frame2));
EXPECT_TRUE(session_->WillingAndAbleToWrite());
session_->MarkConnectionLevelWriteBlocked(stream2->id());
session_->MarkConnectionLevelWriteBlocked(stream4->id());
EXPECT_CALL(*send_algorithm, CanSend(_)).Times(0);
if (!QuicVersionUsesCryptoFrames(transport_version())) {
EXPECT_CALL(*crypto_stream, OnCanWrite());
EXPECT_CALL(*crypto_stream, HasPendingRetransmission())
.WillOnce(Return(false));
}
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream4, OnCanWrite());
EXPECT_CALL(*stream4, HasPendingRetransmission()).WillOnce(Return(false));
EXPECT_CALL(*send_algorithm, CanSend(_)).WillRepeatedly(Return(false));
session_->OnCanWrite();
EXPECT_TRUE(session_->WillingAndAbleToWrite());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream2, OnCanWrite());
EXPECT_CALL(*stream2, HasPendingRetransmission()).WillOnce(Return(false));
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream2, OnCanWrite());
EXPECT_CALL(*send_algorithm, CanSend(_)).WillOnce(Return(true));
EXPECT_CALL(*stream4, OnCanWrite());
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_->OnCanWrite();
EXPECT_FALSE(session_->WillingAndAbleToWrite());
}
TEST_P(QuicSpdySessionTestServer, DonotRetransmitDataOfClosedStreams) {
Initialize();
CompleteHandshake();
NoopQpackStreamSenderDelegate qpack_stream_sender_delegate;
if (VersionUsesHttp3(transport_version())) {
session_->qpack_decoder()->set_qpack_stream_sender_delegate(
&qpack_stream_sender_delegate);
}
InSequence s;
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_->CreateOutgoingBidirectionalStream();
QuicStreamFrame frame1(stream2->id(), false, 0, 9);
QuicStreamFrame frame2(stream4->id(), false, 0, 9);
QuicStreamFrame frame3(stream6->id(), false, 0, 9);
EXPECT_CALL(*stream6, HasPendingRetransmission()).WillOnce(Return(true));
EXPECT_CALL(*stream4, HasPendingRetransmission()).WillOnce(Return(true));
EXPECT_CALL(*stream2, HasPendingRetransmission()).WillOnce(Return(true));
session_->OnFrameLost(QuicFrame(frame3));
session_->OnFrameLost(QuicFrame(frame2));
session_->OnFrameLost(QuicFrame(frame1));
session_->MarkConnectionLevelWriteBlocked(stream2->id());
session_->MarkConnectionLevelWriteBlocked(stream4->id());
session_->MarkConnectionLevelWriteBlocked(stream6->id());
EXPECT_CALL(*connection_, SendControlFrame(_));
EXPECT_CALL(*connection_, OnStreamReset(stream4->id(), _));
stream4->Reset(QUIC_STREAM_CANCELLED);
EXPECT_CALL(*stream6, OnCanWrite());
EXPECT_CALL(*stream6, HasPendingRetransmission()).WillOnce(Return(false));
EXPECT_CALL(*stream2, OnCanWrite());
EXPECT_CALL(*stream2, HasPendingRetransmission()).WillOnce(Return(false));
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*stream2, OnCanWrite());
EXPECT_CALL(*stream6, OnCanWrite());
session_->OnCanWrite();
}
TEST_P(QuicSpdySessionTestServer, RetransmitFrames) {
Initialize();
CompleteHandshake();
MockSendAlgorithm* send_algorithm = new StrictMock<MockSendAlgorithm>;
QuicConnectionPeer::SetSendAlgorithm(session_->connection(), send_algorithm);
InSequence s;
TestStream* stream2 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream4 = session_->CreateOutgoingBidirectionalStream();
TestStream* stream6 = session_->CreateOutgoingBidirectionalStream();
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
session_->SendWindowUpdate(stream2->id(), 9);
QuicStreamFrame frame1(stream2->id(), false, 0, 9);
QuicStreamFrame frame2(stream4->id(), false, 0, 9);
QuicStreamFrame frame3(stream6->id(), false, 0, 9);
QuicWindowUpdateFrame window_update(1, stream2->id(), 9);
QuicFrames frames;
frames.push_back(QuicFrame(frame1));
frames.push_back(QuicFrame(window_update));
frames.push_back(QuicFrame(frame2));
frames.push_back(QuicFrame(frame3));
EXPECT_FALSE(session_->WillingAndAbleToWrite());
EXPECT_CALL(*stream2, RetransmitStreamData(_, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&ClearControlFrame));
EXPECT_CALL(*stream4, RetransmitStreamData(_, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*stream6, RetransmitStreamData(_, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(*send_algorithm, OnApplicationLimited(_));
session_->RetransmitFrames(frames, PTO_RETRANSMISSION);
}
TEST_P(QuicSpdySessionTestServer, OnPriorityFrame) {
Initialize();
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
TestStream* stream = session_->CreateIncomingStream(stream_id);
session_->OnPriorityFrame(stream_id,
spdy::SpdyStreamPrecedence(kV3HighestPriority));
EXPECT_EQ((QuicStreamPriority(HttpStreamPriority{
kV3HighestPriority, HttpStreamPriority::kDefaultIncremental})),
stream->priority());
}
TEST_P(QuicSpdySessionTestServer, OnPriorityUpdateFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
EXPECT_CALL(debug_visitor, OnSettingsFrameSent(_));
CompleteHandshake();
QuicStreamId receive_control_stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
absl::string_view stream_type(type, 1);
QuicStreamOffset offset = 0;
QuicStreamFrame data1(receive_control_stream_id, false, offset, stream_type);
offset += stream_type.length();
EXPECT_CALL(debug_visitor,
OnPeerControlStreamCreated(receive_control_stream_id));
session_->OnStreamFrame(data1);
EXPECT_EQ(receive_control_stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
std::string serialized_settings = HttpEncoder::SerializeSettingsFrame({});
QuicStreamFrame data2(receive_control_stream_id, false, offset,
serialized_settings);
offset += serialized_settings.length();
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(_));
session_->OnStreamFrame(data2);
const QuicStreamId stream_id1 = GetNthClientInitiatedBidirectionalId(0);
PriorityUpdateFrame priority_update1{stream_id1, "u=2"};
std::string serialized_priority_update1 =
HttpEncoder::SerializePriorityUpdateFrame(priority_update1);
QuicStreamFrame data3(receive_control_stream_id,
false, offset, serialized_priority_update1);
offset += serialized_priority_update1.size();
TestStream* stream1 = session_->CreateIncomingStream(stream_id1);
EXPECT_EQ(QuicStreamPriority(
HttpStreamPriority{HttpStreamPriority::kDefaultUrgency,
HttpStreamPriority::kDefaultIncremental}),
stream1->priority());
EXPECT_CALL(debug_visitor, OnPriorityUpdateFrameReceived(priority_update1));
session_->OnStreamFrame(data3);
EXPECT_EQ(QuicStreamPriority(HttpStreamPriority{
2u, HttpStreamPriority::kDefaultIncremental}),
stream1->priority());
const QuicStreamId stream_id2 = GetNthClientInitiatedBidirectionalId(1);
PriorityUpdateFrame priority_update2{stream_id2, "u=5, i"};
std::string serialized_priority_update2 =
HttpEncoder::SerializePriorityUpdateFrame(priority_update2);
QuicStreamFrame stream_frame3(receive_control_stream_id,
false, offset,
serialized_priority_update2);
EXPECT_CALL(debug_visitor, OnPriorityUpdateFrameReceived(priority_update2));
session_->OnStreamFrame(stream_frame3);
TestStream* stream2 = session_->CreateIncomingStream(stream_id2);
EXPECT_EQ(QuicStreamPriority(HttpStreamPriority{5u, true}),
stream2->priority());
}
TEST_P(QuicSpdySessionTestServer, OnInvalidPriorityUpdateFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
QuicStreamId receive_control_stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
absl::string_view stream_type(type, 1);
QuicStreamOffset offset = 0;
QuicStreamFrame data1(receive_control_stream_id, false, offset, stream_type);
offset += stream_type.length();
EXPECT_CALL(debug_visitor,
OnPeerControlStreamCreated(receive_control_stream_id));
session_->OnStreamFrame(data1);
EXPECT_EQ(receive_control_stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
std::string serialized_settings = HttpEncoder::SerializeSettingsFrame({});
QuicStreamFrame data2(receive_control_stream_id, false, offset,
serialized_settings);
offset += serialized_settings.length();
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(_));
session_->OnStreamFrame(data2);
const QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
PriorityUpdateFrame priority_update{stream_id, "00"};
EXPECT_CALL(debug_visitor, OnPriorityUpdateFrameReceived(priority_update));
EXPECT_CALL(*connection_,
CloseConnection(QUIC_INVALID_PRIORITY_UPDATE,
"Invalid PRIORITY_UPDATE frame payload.", _));
std::string serialized_priority_update =
HttpEncoder::SerializePriorityUpdateFrame(priority_update);
QuicStreamFrame data3(receive_control_stream_id,
false, offset, serialized_priority_update);
session_->OnStreamFrame(data3);
}
TEST_P(QuicSpdySessionTestServer, OnPriorityUpdateFrameOutOfBoundsUrgency) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
QuicStreamId receive_control_stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
absl::string_view stream_type(type, 1);
QuicStreamOffset offset = 0;
QuicStreamFrame data1(receive_control_stream_id, false, offset, stream_type);
offset += stream_type.length();
EXPECT_CALL(debug_visitor,
OnPeerControlStreamCreated(receive_control_stream_id));
session_->OnStreamFrame(data1);
EXPECT_EQ(receive_control_stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
std::string serialized_settings = HttpEncoder::SerializeSettingsFrame({});
QuicStreamFrame data2(receive_control_stream_id, false, offset,
serialized_settings);
offset += serialized_settings.length();
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(_));
session_->OnStreamFrame(data2);
const QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
PriorityUpdateFrame priority_update{stream_id, "u=9"};
EXPECT_CALL(debug_visitor, OnPriorityUpdateFrameReceived(priority_update));
EXPECT_CALL(*connection_, CloseConnection(_, _, _)).Times(0);
std::string serialized_priority_update =
HttpEncoder::SerializePriorityUpdateFrame(priority_update);
QuicStreamFrame data3(receive_control_stream_id,
false, offset, serialized_priority_update);
session_->OnStreamFrame(data3);
}
TEST_P(QuicSpdySessionTestServer, SimplePendingStreamType) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
char input[] = {0x04,
'a', 'b', 'c'};
absl::string_view payload(input, ABSL_ARRAYSIZE(input));
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
for (bool fin : {true, false}) {
QuicStreamFrame frame(stream_id, fin, 0, payload);
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke([stream_id](const QuicFrame& frame) {
EXPECT_EQ(STOP_SENDING_FRAME, frame.type);
const QuicStopSendingFrame& stop_sending = frame.stop_sending_frame;
EXPECT_EQ(stream_id, stop_sending.stream_id);
EXPECT_EQ(QUIC_STREAM_STREAM_CREATION_ERROR, stop_sending.error_code);
EXPECT_EQ(
static_cast<uint64_t>(QuicHttp3ErrorCode::STREAM_CREATION_ERROR),
stop_sending.ietf_error_code);
return ClearControlFrame(frame);
}));
session_->OnStreamFrame(frame);
PendingStream* pending =
QuicSessionPeer::GetPendingStream(&*session_, stream_id);
if (fin) {
EXPECT_FALSE(pending);
} else {
ASSERT_TRUE(pending);
EXPECT_TRUE(pending->sequencer()->ignore_read_data());
}
stream_id += QuicUtils::StreamIdDelta(transport_version());
}
}
TEST_P(QuicSpdySessionTestServer, SimplePendingStreamTypeOutOfOrderDelivery) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
char input[] = {0x04,
'a', 'b', 'c'};
absl::string_view payload(input, ABSL_ARRAYSIZE(input));
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
for (bool fin : {true, false}) {
QuicStreamFrame frame1(stream_id, false, 0,
payload.substr(0, 1));
QuicStreamFrame frame2(stream_id, fin, 1, payload.substr(1));
session_->OnStreamFrame(frame2);
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&VerifyAndClearStopSendingFrame));
session_->OnStreamFrame(frame1);
PendingStream* pending =
QuicSessionPeer::GetPendingStream(&*session_, stream_id);
if (fin) {
EXPECT_FALSE(pending);
} else {
ASSERT_TRUE(pending);
EXPECT_TRUE(pending->sequencer()->ignore_read_data());
}
stream_id += QuicUtils::StreamIdDelta(transport_version());
}
}
TEST_P(QuicSpdySessionTestServer,
MultipleBytesPendingStreamTypeOutOfOrderDelivery) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
char input[] = {0x41, 0x00,
'a', 'b', 'c'};
absl::string_view payload(input, ABSL_ARRAYSIZE(input));
QuicStreamId stream_id = QuicUtils::GetFirstUnidirectionalStreamId(
transport_version(), Perspective::IS_CLIENT);
for (bool fin : {true, false}) {
QuicStreamFrame frame1(stream_id, false, 0,
payload.substr(0, 1));
QuicStreamFrame frame2(stream_id, false, 1,
payload.substr(1, 1));
QuicStreamFrame frame3(stream_id, fin, 2, payload.substr(2));
session_->OnStreamFrame(frame3);
session_->OnStreamFrame(frame1);
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&VerifyAndClearStopSendingFrame));
session_->OnStreamFrame(frame2);
PendingStream* pending =
QuicSessionPeer::GetPendingStream(&*session_, stream_id);
if (fin) {
EXPECT_FALSE(pending);
} else {
ASSERT_TRUE(pending);
EXPECT_TRUE(pending->sequencer()->ignore_read_data());
}
stream_id += QuicUtils::StreamIdDelta(transport_version());
}
}
TEST_P(QuicSpdySessionTestServer, ReceiveControlStream) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
QuicStreamId stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
QuicStreamFrame data1(stream_id, false, 0, absl::string_view(type, 1));
EXPECT_CALL(debug_visitor, OnPeerControlStreamCreated(stream_id));
session_->OnStreamFrame(data1);
EXPECT_EQ(stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
SettingsFrame settings;
settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = 512;
settings.values[SETTINGS_MAX_FIELD_SECTION_SIZE] = 5;
settings.values[SETTINGS_QPACK_BLOCKED_STREAMS] = 42;
std::string data = HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamFrame frame(stream_id, false, 1, data);
QpackEncoder* qpack_encoder = session_->qpack_encoder();
QpackEncoderHeaderTable* header_table =
QpackEncoderPeer::header_table(qpack_encoder);
EXPECT_NE(512u, header_table->maximum_dynamic_table_capacity());
EXPECT_NE(5u, session_->max_outbound_header_list_size());
EXPECT_NE(42u, QpackEncoderPeer::maximum_blocked_streams(qpack_encoder));
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(settings));
session_->OnStreamFrame(frame);
EXPECT_EQ(512u, header_table->maximum_dynamic_table_capacity());
EXPECT_EQ(5u, session_->max_outbound_header_list_size());
EXPECT_EQ(42u, QpackEncoderPeer::maximum_blocked_streams(qpack_encoder));
}
TEST_P(QuicSpdySessionTestServer, ServerDisableQpackDynamicTable) {
SetQuicFlag(quic_server_disable_qpack_dynamic_table, true);
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
QuicStreamId stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
QuicStreamFrame data1(stream_id, false, 0, absl::string_view(type, 1));
session_->OnStreamFrame(data1);
EXPECT_EQ(stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
const uint64_t capacity = 512;
SettingsFrame settings;
settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = capacity;
std::string data = HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamFrame frame(stream_id, false, 1, data);
session_->OnStreamFrame(frame);
QpackEncoder* qpack_encoder = session_->qpack_encoder();
EXPECT_EQ(capacity, qpack_encoder->MaximumDynamicTableCapacity());
QpackEncoderHeaderTable* encoder_header_table =
QpackEncoderPeer::header_table(qpack_encoder);
EXPECT_EQ(capacity, encoder_header_table->maximum_dynamic_table_capacity());
EXPECT_EQ(0, encoder_header_table->dynamic_table_capacity());
SettingsFrame outgoing_settings = session_->settings();
EXPECT_EQ(0, outgoing_settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY]);
}
TEST_P(QuicSpdySessionTestServer, DisableQpackDynamicTable) {
SetQuicFlag(quic_server_disable_qpack_dynamic_table, false);
qpack_maximum_dynamic_table_capacity_ = 0;
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
QuicStreamId stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
QuicStreamFrame data1(stream_id, false, 0, absl::string_view(type, 1));
session_->OnStreamFrame(data1);
EXPECT_EQ(stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
const uint64_t capacity = 512;
SettingsFrame settings;
settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] = capacity;
std::string data = HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamFrame frame(stream_id, false, 1, data);
session_->OnStreamFrame(frame);
QpackEncoder* qpack_encoder = session_->qpack_encoder();
EXPECT_EQ(capacity, qpack_encoder->MaximumDynamicTableCapacity());
QpackEncoderHeaderTable* encoder_header_table =
QpackEncoderPeer::header_table(qpack_encoder);
EXPECT_EQ(capacity, encoder_header_table->maximum_dynamic_table_capacity());
EXPECT_EQ(0, encoder_header_table->dynamic_table_capacity());
SettingsFrame outgoing_settings = session_->settings();
EXPECT_EQ(0, outgoing_settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY]);
}
TEST_P(QuicSpdySessionTestServer, ReceiveControlStreamOutOfOrderDelivery) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
QuicStreamId stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
SettingsFrame settings;
settings.values[10] = 2;
settings.values[SETTINGS_MAX_FIELD_SECTION_SIZE] = 5;
std::string data = HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamFrame data1(stream_id, false, 1, data);
QuicStreamFrame data2(stream_id, false, 0, absl::string_view(type, 1));
session_->OnStreamFrame(data1);
EXPECT_NE(5u, session_->max_outbound_header_list_size());
session_->OnStreamFrame(data2);
EXPECT_EQ(5u, session_->max_outbound_header_list_size());
}
TEST_P(QuicSpdySessionTestServer, StreamClosedWhileHeaderDecodingBlocked) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
session_->qpack_decoder()->OnSetDynamicTableCapacity(1024);
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
TestStream* stream = session_->CreateIncomingStream(stream_id);
std::string headers_frame_payload;
ASSERT_TRUE(absl::HexStringToBytes("020080", &headers_frame_payload));
std::string headers_frame_header =
HttpEncoder::SerializeHeadersFrameHeader(headers_frame_payload.length());
std::string headers_frame =
absl::StrCat(headers_frame_header, headers_frame_payload);
stream->OnStreamFrame(QuicStreamFrame(stream_id, false, 0, headers_frame));
EXPECT_FALSE(stream->headers_decompressed());
CloseStream(stream_id);
session_->CleanUpClosedStreams();
session_->qpack_decoder()->OnInsertWithoutNameReference("foo", "bar");
}
TEST_P(QuicSpdySessionTestServer, SessionDestroyedWhileHeaderDecodingBlocked) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
session_->qpack_decoder()->OnSetDynamicTableCapacity(1024);
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
TestStream* stream = session_->CreateIncomingStream(stream_id);
std::string headers_frame_payload;
ASSERT_TRUE(absl::HexStringToBytes("020080", &headers_frame_payload));
std::string headers_frame_header =
HttpEncoder::SerializeHeadersFrameHeader(headers_frame_payload.length());
std::string headers_frame =
absl::StrCat(headers_frame_header, headers_frame_payload);
stream->OnStreamFrame(QuicStreamFrame(stream_id, false, 0, headers_frame));
EXPECT_FALSE(stream->headers_decompressed());
}
TEST_P(QuicSpdySessionTestClient, ResetAfterInvalidIncomingStreamType) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
const QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
ASSERT_TRUE(session_->UsesPendingStreamForFrame(STREAM_FRAME, stream_id));
std::string payload;
ASSERT_TRUE(absl::HexStringToBytes("3f01", &payload));
QuicStreamFrame frame(stream_id, false, 0,
payload);
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&VerifyAndClearStopSendingFrame));
session_->OnStreamFrame(frame);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&*session_));
PendingStream* pending =
QuicSessionPeer::GetPendingStream(&*session_, stream_id);
ASSERT_TRUE(pending);
EXPECT_TRUE(pending->sequencer()->ignore_read_data());
session_->OnStreamFrame(frame);
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_id,
QUIC_STREAM_CANCELLED,
payload.size());
session_->OnRstStream(rst_frame);
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&*session_, stream_id));
}
TEST_P(QuicSpdySessionTestClient, FinAfterInvalidIncomingStreamType) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
const QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
ASSERT_TRUE(session_->UsesPendingStreamForFrame(STREAM_FRAME, stream_id));
std::string payload;
ASSERT_TRUE(absl::HexStringToBytes("3f01", &payload));
QuicStreamFrame frame(stream_id, false, 0,
payload);
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillOnce(Invoke(&VerifyAndClearStopSendingFrame));
session_->OnStreamFrame(frame);
PendingStream* pending =
QuicSessionPeer::GetPendingStream(&*session_, stream_id);
EXPECT_TRUE(pending);
EXPECT_TRUE(pending->sequencer()->ignore_read_data());
session_->OnStreamFrame(frame);
session_->OnStreamFrame(QuicStreamFrame(stream_id, true,
payload.size(), ""));
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&*session_, stream_id));
}
TEST_P(QuicSpdySessionTestClient, ResetInMiddleOfStreamType) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
const QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
ASSERT_TRUE(session_->UsesPendingStreamForFrame(STREAM_FRAME, stream_id));
std::string payload;
ASSERT_TRUE(absl::HexStringToBytes("40", &payload));
QuicStreamFrame frame(stream_id, false, 0,
payload);
session_->OnStreamFrame(frame);
EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&*session_, stream_id));
QuicRstStreamFrame rst_frame(kInvalidControlFrameId, stream_id,
QUIC_STREAM_CANCELLED,
payload.size());
session_->OnRstStream(rst_frame);
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&*session_, stream_id));
}
TEST_P(QuicSpdySessionTestClient, FinInMiddleOfStreamType) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
const QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
ASSERT_TRUE(session_->UsesPendingStreamForFrame(STREAM_FRAME, stream_id));
std::string payload;
ASSERT_TRUE(absl::HexStringToBytes("40", &payload));
QuicStreamFrame frame(stream_id, true, 0, payload);
session_->OnStreamFrame(frame);
EXPECT_FALSE(QuicSessionPeer::GetPendingStream(&*session_, stream_id));
}
TEST_P(QuicSpdySessionTestClient, DuplicateHttp3UnidirectionalStreams) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
QuicStreamId id1 =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
char type1[] = {kControlStream};
QuicStreamFrame data1(id1, false, 0, absl::string_view(type1, 1));
EXPECT_CALL(debug_visitor, OnPeerControlStreamCreated(id1));
session_->OnStreamFrame(data1);
QuicStreamId id2 =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 1);
QuicStreamFrame data2(id2, false, 0, absl::string_view(type1, 1));
EXPECT_CALL(debug_visitor, OnPeerControlStreamCreated(id2)).Times(0);
EXPECT_QUIC_PEER_BUG(
{
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_DUPLICATE_UNIDIRECTIONAL_STREAM,
"Control stream is received twice.", _));
session_->OnStreamFrame(data2);
},
"Received a duplicate Control stream: Closing connection.");
QuicStreamId id3 =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 2);
char type2[]{kQpackEncoderStream};
QuicStreamFrame data3(id3, false, 0, absl::string_view(type2, 1));
EXPECT_CALL(debug_visitor, OnPeerQpackEncoderStreamCreated(id3));
session_->OnStreamFrame(data3);
QuicStreamId id4 =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
QuicStreamFrame data4(id4, false, 0, absl::string_view(type2, 1));
EXPECT_CALL(debug_visitor, OnPeerQpackEncoderStreamCreated(id4)).Times(0);
EXPECT_QUIC_PEER_BUG(
{
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HTTP_DUPLICATE_UNIDIRECTIONAL_STREAM,
"QPACK encoder stream is received twice.", _));
session_->OnStreamFrame(data4);
},
"Received a duplicate QPACK encoder stream: Closing connection.");
QuicStreamId id5 =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 4);
char type3[]{kQpackDecoderStream};
QuicStreamFrame data5(id5, false, 0, absl::string_view(type3, 1));
EXPECT_CALL(debug_visitor, OnPeerQpackDecoderStreamCreated(id5));
session_->OnStreamFrame(data5);
QuicStreamId id6 =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 5);
QuicStreamFrame data6(id6, false, 0, absl::string_view(type3, 1));
EXPECT_CALL(debug_visitor, OnPeerQpackDecoderStreamCreated(id6)).Times(0);
EXPECT_QUIC_PEER_BUG(
{
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HTTP_DUPLICATE_UNIDIRECTIONAL_STREAM,
"QPACK decoder stream is received twice.", _));
session_->OnStreamFrame(data6);
},
"Received a duplicate QPACK decoder stream: Closing connection.");
}
TEST_P(QuicSpdySessionTestClient, EncoderStreamError) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
std::string data;
ASSERT_TRUE(
absl::HexStringToBytes("02"
"00",
&data));
QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
QuicStreamFrame frame(stream_id, false, 0, data);
EXPECT_CALL(*connection_,
CloseConnection(
QUIC_QPACK_ENCODER_STREAM_DUPLICATE_INVALID_RELATIVE_INDEX,
"Encoder stream error: Invalid relative index.", _));
session_->OnStreamFrame(frame);
}
TEST_P(QuicSpdySessionTestClient, DecoderStreamError) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
std::string data;
ASSERT_TRUE(absl::HexStringToBytes(
"03"
"00",
&data));
QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
QuicStreamFrame frame(stream_id, false, 0, data);
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_QPACK_DECODER_STREAM_INVALID_ZERO_INCREMENT,
"Decoder stream error: Invalid increment value 0.", _));
session_->OnStreamFrame(frame);
}
TEST_P(QuicSpdySessionTestClient, InvalidHttp3GoAway) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_GOAWAY_INVALID_STREAM_ID,
"GOAWAY with invalid stream ID", _));
QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
session_->OnHttp3GoAway(stream_id);
}
TEST_P(QuicSpdySessionTestClient, Http3GoAwayLargerIdThanBefore) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
EXPECT_FALSE(session_->goaway_received());
QuicStreamId stream_id1 =
GetNthClientInitiatedBidirectionalStreamId(transport_version(), 0);
session_->OnHttp3GoAway(stream_id1);
EXPECT_TRUE(session_->goaway_received());
EXPECT_CALL(
*connection_,
CloseConnection(
QUIC_HTTP_GOAWAY_ID_LARGER_THAN_PREVIOUS,
"GOAWAY received with ID 4 greater than previously received ID 0",
_));
QuicStreamId stream_id2 =
GetNthClientInitiatedBidirectionalStreamId(transport_version(), 1);
session_->OnHttp3GoAway(stream_id2);
}
TEST_P(QuicSpdySessionTestClient, CloseConnectionOnCancelPush) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
QuicStreamId receive_control_stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
absl::string_view stream_type(type, 1);
QuicStreamOffset offset = 0;
QuicStreamFrame data1(receive_control_stream_id, false, offset,
stream_type);
offset += stream_type.length();
EXPECT_CALL(debug_visitor,
OnPeerControlStreamCreated(receive_control_stream_id));
session_->OnStreamFrame(data1);
EXPECT_EQ(receive_control_stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
std::string serialized_settings = HttpEncoder::SerializeSettingsFrame({});
QuicStreamFrame data2(receive_control_stream_id, false, offset,
serialized_settings);
offset += serialized_settings.length();
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(_));
session_->OnStreamFrame(data2);
std::string cancel_push_frame;
ASSERT_TRUE(
absl::HexStringToBytes("03"
"01"
"00",
&cancel_push_frame));
QuicStreamFrame data3(receive_control_stream_id, false, offset,
cancel_push_frame);
EXPECT_CALL(*connection_, CloseConnection(QUIC_HTTP_FRAME_ERROR,
"CANCEL_PUSH frame received.", _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_,
SendConnectionClosePacket(QUIC_HTTP_FRAME_ERROR, _,
"CANCEL_PUSH frame received."));
session_->OnStreamFrame(data3);
}
TEST_P(QuicSpdySessionTestServer, OnSetting) {
Initialize();
CompleteHandshake();
if (VersionUsesHttp3(transport_version())) {
EXPECT_EQ(std::numeric_limits<size_t>::max(),
session_->max_outbound_header_list_size());
session_->OnSetting(SETTINGS_MAX_FIELD_SECTION_SIZE, 5);
EXPECT_EQ(5u, session_->max_outbound_header_list_size());
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillRepeatedly(Return(WriteResult(WRITE_STATUS_OK, 0)));
QpackEncoder* qpack_encoder = session_->qpack_encoder();
EXPECT_EQ(0u, QpackEncoderPeer::maximum_blocked_streams(qpack_encoder));
session_->OnSetting(SETTINGS_QPACK_BLOCKED_STREAMS, 12);
EXPECT_EQ(12u, QpackEncoderPeer::maximum_blocked_streams(qpack_encoder));
QpackEncoderHeaderTable* header_table =
QpackEncoderPeer::header_table(qpack_encoder);
EXPECT_EQ(0u, header_table->maximum_dynamic_table_capacity());
session_->OnSetting(SETTINGS_QPACK_MAX_TABLE_CAPACITY, 37);
EXPECT_EQ(37u, header_table->maximum_dynamic_table_capacity());
return;
}
EXPECT_EQ(std::numeric_limits<size_t>::max(),
session_->max_outbound_header_list_size());
session_->OnSetting(SETTINGS_MAX_FIELD_SECTION_SIZE, 5);
EXPECT_EQ(5u, session_->max_outbound_header_list_size());
spdy::HpackEncoder* hpack_encoder =
QuicSpdySessionPeer::GetSpdyFramer(&*session_)->GetHpackEncoder();
EXPECT_EQ(4096u, hpack_encoder->CurrentHeaderTableSizeSetting());
session_->OnSetting(spdy::SETTINGS_HEADER_TABLE_SIZE, 59);
EXPECT_EQ(59u, hpack_encoder->CurrentHeaderTableSizeSetting());
}
TEST_P(QuicSpdySessionTestServer, FineGrainedHpackErrorCodes) {
Initialize();
if (VersionUsesHttp3(transport_version())) {
return;
}
QuicStreamId request_stream_id = 5;
session_->CreateIncomingStream(request_stream_id);
std::string headers_frame;
ASSERT_TRUE(
absl::HexStringToBytes("000006"
"01"
"24"
"00000005"
"00000000"
"10"
"fe",
&headers_frame));
QuicStreamId headers_stream_id =
QuicUtils::GetHeadersStreamId(transport_version());
QuicStreamFrame data(headers_stream_id, false, 0, headers_frame);
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HPACK_INVALID_INDEX,
"SPDY framing error: HPACK_INVALID_INDEX",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
session_->OnStreamFrame(data);
}
TEST_P(QuicSpdySessionTestServer, PeerClosesCriticalReceiveStream) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
struct {
char type;
const char* error_details;
} kTestData[] = {
{kControlStream, "RESET_STREAM received for receive control stream"},
{kQpackEncoderStream, "RESET_STREAM received for QPACK receive stream"},
{kQpackDecoderStream, "RESET_STREAM received for QPACK receive stream"},
};
for (size_t i = 0; i < ABSL_ARRAYSIZE(kTestData); ++i) {
QuicStreamId stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), i + 1);
const QuicByteCount data_length = 1;
QuicStreamFrame data(stream_id, false, 0,
absl::string_view(&kTestData[i].type, data_length));
session_->OnStreamFrame(data);
EXPECT_CALL(*connection_, CloseConnection(QUIC_HTTP_CLOSED_CRITICAL_STREAM,
kTestData[i].error_details, _));
QuicRstStreamFrame rst(kInvalidControlFrameId, stream_id,
QUIC_STREAM_CANCELLED, data_length);
session_->OnRstStream(rst);
}
}
TEST_P(QuicSpdySessionTestServer,
H3ControlStreamsLimitedByConnectionFlowControl) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
QuicFlowControllerPeer::SetSendWindowOffset(session_->flow_controller(), 0);
EXPECT_TRUE(session_->IsConnectionFlowControlBlocked());
QuicSendControlStream* send_control_stream =
QuicSpdySessionPeer::GetSendControlStream(&*session_);
session_->MarkConnectionLevelWriteBlocked(send_control_stream->id());
EXPECT_FALSE(session_->WillingAndAbleToWrite());
}
TEST_P(QuicSpdySessionTestServer, PeerClosesCriticalSendStream) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
QuicSendControlStream* control_stream =
QuicSpdySessionPeer::GetSendControlStream(&*session_);
ASSERT_TRUE(control_stream);
QuicStopSendingFrame stop_sending_control_stream(
kInvalidControlFrameId, control_stream->id(), QUIC_STREAM_CANCELLED);
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HTTP_CLOSED_CRITICAL_STREAM,
"STOP_SENDING received for send control stream", _));
session_->OnStopSendingFrame(stop_sending_control_stream);
QpackSendStream* decoder_stream =
QuicSpdySessionPeer::GetQpackDecoderSendStream(&*session_);
ASSERT_TRUE(decoder_stream);
QuicStopSendingFrame stop_sending_decoder_stream(
kInvalidControlFrameId, decoder_stream->id(), QUIC_STREAM_CANCELLED);
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HTTP_CLOSED_CRITICAL_STREAM,
"STOP_SENDING received for QPACK send stream", _));
session_->OnStopSendingFrame(stop_sending_decoder_stream);
QpackSendStream* encoder_stream =
QuicSpdySessionPeer::GetQpackEncoderSendStream(&*session_);
ASSERT_TRUE(encoder_stream);
QuicStopSendingFrame stop_sending_encoder_stream(
kInvalidControlFrameId, encoder_stream->id(), QUIC_STREAM_CANCELLED);
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HTTP_CLOSED_CRITICAL_STREAM,
"STOP_SENDING received for QPACK send stream", _));
session_->OnStopSendingFrame(stop_sending_encoder_stream);
}
TEST_P(QuicSpdySessionTestServer, CloseConnectionOnCancelPush) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
QuicStreamId receive_control_stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
absl::string_view stream_type(type, 1);
QuicStreamOffset offset = 0;
QuicStreamFrame data1(receive_control_stream_id, false, offset,
stream_type);
offset += stream_type.length();
EXPECT_CALL(debug_visitor,
OnPeerControlStreamCreated(receive_control_stream_id));
session_->OnStreamFrame(data1);
EXPECT_EQ(receive_control_stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
std::string serialized_settings = HttpEncoder::SerializeSettingsFrame({});
QuicStreamFrame data2(receive_control_stream_id, false, offset,
serialized_settings);
offset += serialized_settings.length();
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(_));
session_->OnStreamFrame(data2);
std::string cancel_push_frame;
ASSERT_TRUE(
absl::HexStringToBytes("03"
"01"
"00",
&cancel_push_frame));
QuicStreamFrame data3(receive_control_stream_id, false, offset,
cancel_push_frame);
EXPECT_CALL(*connection_, CloseConnection(QUIC_HTTP_FRAME_ERROR,
"CANCEL_PUSH frame received.", _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_,
SendConnectionClosePacket(QUIC_HTTP_FRAME_ERROR, _,
"CANCEL_PUSH frame received."));
session_->OnStreamFrame(data3);
}
TEST_P(QuicSpdySessionTestServer, Http3GoAwayWhenClosingConnection) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
EXPECT_CALL(debug_visitor, OnSettingsFrameSent(_));
CompleteHandshake();
QuicStreamId stream_id = GetNthClientInitiatedBidirectionalId(0);
const QuicByteCount headers_payload_length = 10;
std::string headers_frame_header =
HttpEncoder::SerializeHeadersFrameHeader(headers_payload_length);
EXPECT_CALL(debug_visitor,
OnHeadersFrameReceived(stream_id, headers_payload_length));
session_->OnStreamFrame(
QuicStreamFrame(stream_id, false, 0, headers_frame_header));
EXPECT_EQ(stream_id, QuicSessionPeer::GetLargestPeerCreatedStreamId(
&*session_, false));
EXPECT_CALL(debug_visitor,
OnGoAwayFrameSent(stream_id +
QuicUtils::StreamIdDelta(transport_version())));
EXPECT_CALL(*writer_, WritePacket(_, _, _, _, _, _))
.WillRepeatedly(Return(WriteResult(WRITE_STATUS_OK, 0)));
EXPECT_CALL(*connection_, CloseConnection(QUIC_NO_ERROR, _, _))
.WillOnce(
Invoke(connection_, &MockQuicConnection::ReallyCloseConnection));
EXPECT_CALL(*connection_, SendConnectionClosePacket(QUIC_NO_ERROR, _, _))
.WillOnce(Invoke(connection_,
&MockQuicConnection::ReallySendConnectionClosePacket));
connection_->CloseConnection(
QUIC_NO_ERROR, "closing connection",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET);
}
TEST_P(QuicSpdySessionTestClient, DoNotSendInitialMaxPushIdIfNotSet) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
InSequence s;
EXPECT_CALL(debug_visitor, OnSettingsFrameSent(_));
CompleteHandshake();
}
TEST_P(QuicSpdySessionTestClient, ReceiveSpdySettingInHttp3) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
SettingsFrame frame;
frame.values[SETTINGS_MAX_FIELD_SECTION_SIZE] = 5;
frame.values[spdy::SETTINGS_INITIAL_WINDOW_SIZE] = 100;
CompleteHandshake();
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_RECEIVE_SPDY_SETTING, _, _));
session_->OnSettingsFrame(frame);
}
TEST_P(QuicSpdySessionTestClient, ReceiveAcceptChFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
QuicStreamId receive_control_stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
char type[] = {kControlStream};
absl::string_view stream_type(type, 1);
QuicStreamOffset offset = 0;
QuicStreamFrame data1(receive_control_stream_id, false, offset,
stream_type);
offset += stream_type.length();
EXPECT_CALL(debug_visitor,
OnPeerControlStreamCreated(receive_control_stream_id));
session_->OnStreamFrame(data1);
EXPECT_EQ(receive_control_stream_id,
QuicSpdySessionPeer::GetReceiveControlStream(&*session_)->id());
std::string serialized_settings = HttpEncoder::SerializeSettingsFrame({});
QuicStreamFrame data2(receive_control_stream_id, false, offset,
serialized_settings);
offset += serialized_settings.length();
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(_));
session_->OnStreamFrame(data2);
AcceptChFrame accept_ch;
accept_ch.entries.push_back({"foo", "bar"});
std::string accept_ch_frame = HttpEncoder::SerializeAcceptChFrame(accept_ch);
QuicStreamFrame data3(receive_control_stream_id, false, offset,
accept_ch_frame);
EXPECT_CALL(debug_visitor, OnAcceptChFrameReceived(accept_ch));
EXPECT_CALL(*session_, OnAcceptChFrame(accept_ch));
session_->OnStreamFrame(data3);
}
TEST_P(QuicSpdySessionTestClient, AcceptChViaAlps) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
std::string serialized_accept_ch_frame;
ASSERT_TRUE(
absl::HexStringToBytes("4089"
"08"
"03"
"666f6f"
"03"
"626172",
&serialized_accept_ch_frame));
AcceptChFrame expected_accept_ch_frame{{{"foo", "bar"}}};
EXPECT_CALL(debug_visitor,
OnAcceptChFrameReceivedViaAlps(expected_accept_ch_frame));
auto error = session_->OnAlpsData(
reinterpret_cast<const uint8_t*>(serialized_accept_ch_frame.data()),
serialized_accept_ch_frame.size());
EXPECT_FALSE(error);
}
TEST_P(QuicSpdySessionTestClient, AlpsForbiddenFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
std::string forbidden_frame;
ASSERT_TRUE(
absl::HexStringToBytes("00"
"03"
"66666f",
&forbidden_frame));
auto error = session_->OnAlpsData(
reinterpret_cast<const uint8_t*>(forbidden_frame.data()),
forbidden_frame.size());
ASSERT_TRUE(error);
EXPECT_EQ("DATA frame forbidden", error.value());
}
TEST_P(QuicSpdySessionTestClient, AlpsIncompleteFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
std::string incomplete_frame;
ASSERT_TRUE(
absl::HexStringToBytes("04"
"03",
&incomplete_frame));
auto error = session_->OnAlpsData(
reinterpret_cast<const uint8_t*>(incomplete_frame.data()),
incomplete_frame.size());
ASSERT_TRUE(error);
EXPECT_EQ("incomplete HTTP/3 frame", error.value());
}
TEST_P(QuicSpdySessionTestClient, SettingsViaAlpsThenOnControlStream) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
QpackEncoder* qpack_encoder = session_->qpack_encoder();
EXPECT_EQ(0u, qpack_encoder->MaximumDynamicTableCapacity());
EXPECT_EQ(0u, qpack_encoder->maximum_blocked_streams());
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
std::string serialized_settings_frame1;
ASSERT_TRUE(
absl::HexStringToBytes("04"
"05"
"01"
"4400"
"07"
"20",
&serialized_settings_frame1));
SettingsFrame expected_settings_frame1{
{{SETTINGS_QPACK_MAX_TABLE_CAPACITY, 1024},
{SETTINGS_QPACK_BLOCKED_STREAMS, 32}}};
EXPECT_CALL(debug_visitor,
OnSettingsFrameReceivedViaAlps(expected_settings_frame1));
auto error = session_->OnAlpsData(
reinterpret_cast<const uint8_t*>(serialized_settings_frame1.data()),
serialized_settings_frame1.size());
EXPECT_FALSE(error);
EXPECT_EQ(1024u, qpack_encoder->MaximumDynamicTableCapacity());
EXPECT_EQ(32u, qpack_encoder->maximum_blocked_streams());
const QuicStreamId control_stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
EXPECT_CALL(debug_visitor, OnPeerControlStreamCreated(control_stream_id));
std::string stream_type;
ASSERT_TRUE(absl::HexStringToBytes("00", &stream_type));
session_->OnStreamFrame(QuicStreamFrame(control_stream_id, false,
0, stream_type));
SettingsFrame expected_settings_frame2{
{{SETTINGS_QPACK_MAX_TABLE_CAPACITY, 1024},
{SETTINGS_QPACK_BLOCKED_STREAMS, 48}}};
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(expected_settings_frame2));
std::string serialized_settings_frame2;
ASSERT_TRUE(
absl::HexStringToBytes("04"
"05"
"01"
"4400"
"07"
"30",
&serialized_settings_frame2));
session_->OnStreamFrame(QuicStreamFrame(control_stream_id, false,
stream_type.length(),
serialized_settings_frame2));
EXPECT_EQ(1024u, qpack_encoder->MaximumDynamicTableCapacity());
EXPECT_EQ(48u, qpack_encoder->maximum_blocked_streams());
}
TEST_P(QuicSpdySessionTestClient,
SettingsViaAlpsConflictsSettingsViaControlStream) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
CompleteHandshake();
QpackEncoder* qpack_encoder = session_->qpack_encoder();
EXPECT_EQ(0u, qpack_encoder->MaximumDynamicTableCapacity());
std::string serialized_settings_frame1;
ASSERT_TRUE(
absl::HexStringToBytes("04"
"03"
"01"
"4400",
&serialized_settings_frame1));
auto error = session_->OnAlpsData(
reinterpret_cast<const uint8_t*>(serialized_settings_frame1.data()),
serialized_settings_frame1.size());
EXPECT_FALSE(error);
EXPECT_EQ(1024u, qpack_encoder->MaximumDynamicTableCapacity());
const QuicStreamId control_stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 0);
std::string stream_type;
ASSERT_TRUE(absl::HexStringToBytes("00", &stream_type));
session_->OnStreamFrame(QuicStreamFrame(control_stream_id, false,
0, stream_type));
EXPECT_CALL(
*connection_,
CloseConnection(QUIC_HTTP_ZERO_RTT_RESUMPTION_SETTINGS_MISMATCH,
"Server sent an SETTINGS_QPACK_MAX_TABLE_CAPACITY: "
"32 while current value is: 1024",
ConnectionCloseBehavior::SEND_CONNECTION_CLOSE_PACKET));
std::string serialized_settings_frame2;
ASSERT_TRUE(
absl::HexStringToBytes("04"
"02"
"01"
"20",
&serialized_settings_frame2));
session_->OnStreamFrame(QuicStreamFrame(control_stream_id, false,
stream_type.length(),
serialized_settings_frame2));
}
TEST_P(QuicSpdySessionTestClient, AlpsTwoSettingsFrame) {
Initialize();
if (!VersionUsesHttp3(transport_version())) {
return;
}
std::string banned_frame;
ASSERT_TRUE(
absl::HexStringToBytes("04"
"00"
"04"
"00",
&banned_frame));
auto error = session_->OnAlpsData(
reinterpret_cast<const uint8_t*>(banned_frame.data()),
banned_frame.size());
ASSERT_TRUE(error);
EXPECT_EQ("multiple SETTINGS frames", error.value());
}
void QuicSpdySessionTestBase::TestHttpDatagramSetting(
HttpDatagramSupport local_support, HttpDatagramSupport remote_support,
HttpDatagramSupport expected_support, bool expected_datagram_supported) {
if (!version().UsesHttp3()) {
return;
}
CompleteHandshake();
session_->set_local_http_datagram_support(local_support);
EXPECT_FALSE(session_->SupportsH3Datagram());
EXPECT_EQ(session_->http_datagram_support(), HttpDatagramSupport::kNone);
SettingsFrame settings;
switch (remote_support) {
case HttpDatagramSupport::kNone:
break;
case HttpDatagramSupport::kDraft04:
settings.values[SETTINGS_H3_DATAGRAM_DRAFT04] = 1;
break;
case HttpDatagramSupport::kRfc:
settings.values[SETTINGS_H3_DATAGRAM] = 1;
break;
case HttpDatagramSupport::kRfcAndDraft04:
settings.values[SETTINGS_H3_DATAGRAM] = 1;
settings.values[SETTINGS_H3_DATAGRAM_DRAFT04] = 1;
break;
}
std::string data = std::string(1, kControlStream) +
HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
QuicStreamFrame frame(stream_id, false, 0, data);
StrictMock<MockHttp3DebugVisitor> debug_visitor;
session_->set_debug_visitor(&debug_visitor);
EXPECT_CALL(debug_visitor, OnPeerControlStreamCreated(stream_id));
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(settings));
session_->OnStreamFrame(frame);
EXPECT_EQ(session_->http_datagram_support(), expected_support);
EXPECT_EQ(session_->SupportsH3Datagram(), expected_datagram_supported);
}
TEST_P(QuicSpdySessionTestClient, HttpDatagramSettingLocal04Remote04) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kDraft04,
HttpDatagramSupport::kDraft04,
HttpDatagramSupport::kDraft04,
true);
}
TEST_P(QuicSpdySessionTestClient, HttpDatagramSettingLocal04Remote09) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kDraft04,
HttpDatagramSupport::kRfc,
HttpDatagramSupport::kNone,
false);
}
TEST_P(QuicSpdySessionTestClient, HttpDatagramSettingLocal04Remote04And09) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kDraft04,
HttpDatagramSupport::kRfcAndDraft04,
HttpDatagramSupport::kDraft04,
true);
}
TEST_P(QuicSpdySessionTestClient, HttpDatagramSettingLocal09Remote04) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kRfc,
HttpDatagramSupport::kDraft04,
HttpDatagramSupport::kNone,
false);
}
TEST_P(QuicSpdySessionTestClient, HttpDatagramSettingLocal09Remote09) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kRfc,
HttpDatagramSupport::kRfc,
HttpDatagramSupport::kRfc,
true);
}
TEST_P(QuicSpdySessionTestClient, HttpDatagramSettingLocal09Remote04And09) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kRfc,
HttpDatagramSupport::kRfcAndDraft04,
HttpDatagramSupport::kRfc,
true);
}
TEST_P(QuicSpdySessionTestClient, HttpDatagramSettingLocal04And09Remote04) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kRfcAndDraft04,
HttpDatagramSupport::kDraft04,
HttpDatagramSupport::kDraft04,
true);
}
TEST_P(QuicSpdySessionTestClient, HttpDatagramSettingLocal04And09Remote09) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kRfcAndDraft04,
HttpDatagramSupport::kRfc,
HttpDatagramSupport::kRfc,
true);
}
TEST_P(QuicSpdySessionTestClient,
HttpDatagramSettingLocal04And09Remote04And09) {
Initialize();
TestHttpDatagramSetting(
HttpDatagramSupport::kRfcAndDraft04,
HttpDatagramSupport::kRfcAndDraft04,
HttpDatagramSupport::kRfc,
true);
}
TEST_P(QuicSpdySessionTestClient, WebTransportSettingDraft02OnlyBothSides) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_locally_supported_web_transport_versions(
WebTransportHttp3VersionSet({WebTransportHttp3Version::kDraft02}));
EXPECT_FALSE(session_->SupportsWebTransport());
CompleteHandshake();
ReceiveWebTransportSettings(
WebTransportHttp3VersionSet({WebTransportHttp3Version::kDraft02}));
EXPECT_TRUE(session_->ShouldProcessIncomingRequests());
EXPECT_TRUE(session_->SupportsWebTransport());
EXPECT_EQ(session_->SupportedWebTransportVersion(),
WebTransportHttp3Version::kDraft02);
}
TEST_P(QuicSpdySessionTestClient, WebTransportSettingDraft07OnlyBothSides) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_locally_supported_web_transport_versions(
WebTransportHttp3VersionSet({WebTransportHttp3Version::kDraft07}));
EXPECT_FALSE(session_->SupportsWebTransport());
CompleteHandshake();
ReceiveWebTransportSettings(
WebTransportHttp3VersionSet({WebTransportHttp3Version::kDraft07}));
EXPECT_TRUE(session_->ShouldProcessIncomingRequests());
EXPECT_TRUE(session_->SupportsWebTransport());
EXPECT_EQ(session_->SupportedWebTransportVersion(),
WebTransportHttp3Version::kDraft07);
}
TEST_P(QuicSpdySessionTestClient, WebTransportSettingBothDraftsBothSides) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_locally_supported_web_transport_versions(
WebTransportHttp3VersionSet({WebTransportHttp3Version::kDraft02,
WebTransportHttp3Version::kDraft07}));
EXPECT_FALSE(session_->SupportsWebTransport());
CompleteHandshake();
ReceiveWebTransportSettings(
WebTransportHttp3VersionSet({WebTransportHttp3Version::kDraft02,
WebTransportHttp3Version::kDraft07}));
EXPECT_TRUE(session_->ShouldProcessIncomingRequests());
EXPECT_TRUE(session_->SupportsWebTransport());
EXPECT_EQ(session_->SupportedWebTransportVersion(),
WebTransportHttp3Version::kDraft07);
}
TEST_P(QuicSpdySessionTestClient, WebTransportSettingVersionMismatch) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_locally_supported_web_transport_versions(
WebTransportHttp3VersionSet({WebTransportHttp3Version::kDraft07}));
EXPECT_FALSE(session_->SupportsWebTransport());
CompleteHandshake();
ReceiveWebTransportSettings(
WebTransportHttp3VersionSet({WebTransportHttp3Version::kDraft02}));
EXPECT_FALSE(session_->SupportsWebTransport());
EXPECT_EQ(session_->SupportedWebTransportVersion(), std::nullopt);
}
TEST_P(QuicSpdySessionTestClient, WebTransportSettingSetToZero) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_supports_webtransport(true);
EXPECT_FALSE(session_->SupportsWebTransport());
StrictMock<MockHttp3DebugVisitor> debug_visitor;
EXPECT_CALL(debug_visitor, OnSettingsFrameSent(_));
session_->set_debug_visitor(&debug_visitor);
CompleteHandshake();
SettingsFrame server_settings;
server_settings.values[SETTINGS_H3_DATAGRAM_DRAFT04] = 1;
server_settings.values[SETTINGS_WEBTRANS_DRAFT00] = 0;
std::string data = std::string(1, kControlStream) +
HttpEncoder::SerializeSettingsFrame(server_settings);
QuicStreamId stream_id =
GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
QuicStreamFrame frame(stream_id, false, 0, data);
EXPECT_CALL(debug_visitor, OnPeerControlStreamCreated(stream_id));
EXPECT_CALL(debug_visitor, OnSettingsFrameReceived(server_settings));
session_->OnStreamFrame(frame);
EXPECT_FALSE(session_->SupportsWebTransport());
}
TEST_P(QuicSpdySessionTestServer, WebTransportSetting) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_supports_webtransport(true);
EXPECT_FALSE(session_->SupportsWebTransport());
EXPECT_FALSE(session_->ShouldProcessIncomingRequests());
CompleteHandshake();
ReceiveWebTransportSettings();
EXPECT_TRUE(session_->SupportsWebTransport());
EXPECT_TRUE(session_->ShouldProcessIncomingRequests());
}
TEST_P(QuicSpdySessionTestServer, BufferingIncomingStreams) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_supports_webtransport(true);
CompleteHandshake();
QuicStreamId session_id =
GetNthClientInitiatedBidirectionalStreamId(transport_version(), 1);
QuicStreamId data_stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 4);
ReceiveWebTransportUnidirectionalStream(session_id, data_stream_id);
ReceiveWebTransportSettings();
ReceiveWebTransportSession(session_id);
WebTransportHttp3* web_transport =
session_->GetWebTransportSession(session_id);
ASSERT_TRUE(web_transport != nullptr);
EXPECT_EQ(web_transport->NumberOfAssociatedStreams(), 1u);
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(session_id, _));
EXPECT_CALL(
*connection_,
OnStreamReset(data_stream_id, QUIC_STREAM_WEBTRANSPORT_SESSION_GONE));
session_->ResetStream(session_id, QUIC_STREAM_INTERNAL_ERROR);
}
TEST_P(QuicSpdySessionTestServer, BufferingIncomingStreamsLimit) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_supports_webtransport(true);
CompleteHandshake();
QuicStreamId session_id =
GetNthClientInitiatedBidirectionalStreamId(transport_version(), 1);
const int streams_to_send = kMaxUnassociatedWebTransportStreams + 4;
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_,
OnStreamReset(
_, QUIC_STREAM_WEBTRANSPORT_BUFFERED_STREAMS_LIMIT_EXCEEDED))
.Times(4);
for (int i = 0; i < streams_to_send; i++) {
QuicStreamId data_stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 4 + i);
ReceiveWebTransportUnidirectionalStream(session_id, data_stream_id);
}
ReceiveWebTransportSettings();
ReceiveWebTransportSession(session_id);
WebTransportHttp3* web_transport =
session_->GetWebTransportSession(session_id);
ASSERT_TRUE(web_transport != nullptr);
EXPECT_EQ(web_transport->NumberOfAssociatedStreams(),
kMaxUnassociatedWebTransportStreams);
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(_, _))
.Times(kMaxUnassociatedWebTransportStreams + 1);
session_->ResetStream(session_id, QUIC_STREAM_INTERNAL_ERROR);
}
TEST_P(QuicSpdySessionTestServer, BufferingIncomingStreamsWithFin) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
CompleteHandshake();
const UberQuicStreamIdManager& stream_id_manager =
*QuicSessionPeer::ietf_streamid_manager(&*session_);
const QuicStreamId initial_advertized_max_streams =
stream_id_manager.advertised_max_incoming_unidirectional_streams();
const size_t num_streams_to_open =
session_->max_open_incoming_unidirectional_streams();
EXPECT_CALL(*connection_, SendControlFrame(_)).Times(testing::AnyNumber());
for (size_t i = 0; i < num_streams_to_open; i++) {
const QuicStreamId stream_id =
GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 4 + i);
QuicStreamFrame frame(stream_id, true, 0, "");
session_->OnStreamFrame(frame);
}
EXPECT_LT(initial_advertized_max_streams,
stream_id_manager.advertised_max_incoming_unidirectional_streams());
EXPECT_EQ(0, session_->pending_streams_size());
}
TEST_P(QuicSpdySessionTestServer, ResetOutgoingWebTransportStreams) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_supports_webtransport(true);
CompleteHandshake();
QuicStreamId session_id =
GetNthClientInitiatedBidirectionalStreamId(transport_version(), 1);
ReceiveWebTransportSettings();
ReceiveWebTransportSession(session_id);
WebTransportHttp3* web_transport =
session_->GetWebTransportSession(session_id);
ASSERT_TRUE(web_transport != nullptr);
session_->set_writev_consumes_all_data(true);
EXPECT_TRUE(web_transport->CanOpenNextOutgoingUnidirectionalStream());
EXPECT_EQ(web_transport->NumberOfAssociatedStreams(), 0u);
WebTransportStream* stream =
web_transport->OpenOutgoingUnidirectionalStream();
EXPECT_EQ(web_transport->NumberOfAssociatedStreams(), 1u);
ASSERT_TRUE(stream != nullptr);
QuicStreamId stream_id = stream->GetStreamId();
EXPECT_CALL(*connection_, SendControlFrame(_))
.WillRepeatedly(Invoke(&ClearControlFrame));
EXPECT_CALL(*connection_, OnStreamReset(session_id, _));
EXPECT_CALL(*connection_,
OnStreamReset(stream_id, QUIC_STREAM_WEBTRANSPORT_SESSION_GONE));
session_->ResetStream(session_id, QUIC_STREAM_INTERNAL_ERROR);
EXPECT_EQ(web_transport->NumberOfAssociatedStreams(), 0u);
}
TEST_P(QuicSpdySessionTestClient, WebTransportWithoutExtendedConnect) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
session_->set_local_http_datagram_support(
HttpDatagramSupport::kRfcAndDraft04);
session_->set_supports_webtransport(true);
EXPECT_FALSE(session_->SupportsWebTransport());
CompleteHandshake();
SettingsFrame settings;
settings.values[SETTINGS_H3_DATAGRAM_DRAFT04] = 1;
settings.values[SETTINGS_WEBTRANS_DRAFT00] = 1;
std::string data = std::string(1, kControlStream) +
HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamId control_stream_id =
session_->perspective() == Perspective::IS_SERVER
? GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3)
: GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
QuicStreamFrame frame(control_stream_id, false, 0, data);
session_->OnStreamFrame(frame);
EXPECT_TRUE(session_->SupportsWebTransport());
}
TEST_P(QuicSpdySessionTestClient, LimitEncoderDynamicTableSize) {
Initialize();
if (version().UsesHttp3()) {
return;
}
CompleteHandshake();
QuicSpdySessionPeer::SetHeadersStream(&*session_, nullptr);
TestHeadersStream* headers_stream =
new StrictMock<TestHeadersStream>(&*session_);
QuicSpdySessionPeer::SetHeadersStream(&*session_, headers_stream);
session_->MarkConnectionLevelWriteBlocked(headers_stream->id());
session_->OnSetting(spdy::SETTINGS_HEADER_TABLE_SIZE, 1024 * 1024 * 1024);
TestStream* stream = session_->CreateOutgoingBidirectionalStream();
EXPECT_CALL(*writer_, IsWriteBlocked()).WillRepeatedly(Return(true));
HttpHeaderBlock headers;
headers[":method"] = "GET";
stream->WriteHeaders(std::move(headers), true, nullptr);
EXPECT_TRUE(headers_stream->HasBufferedData());
QuicStreamSendBuffer& send_buffer =
QuicStreamPeer::SendBuffer(headers_stream);
ASSERT_EQ(1u, send_buffer.size());
const quiche::QuicheMemSlice& slice =
QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer)->slice;
absl::string_view stream_data(slice.data(), slice.length());
std::string expected_stream_data_1;
ASSERT_TRUE(
absl::HexStringToBytes("000009"
"01"
"25",
&expected_stream_data_1));
EXPECT_EQ(expected_stream_data_1, stream_data.substr(0, 5));
stream_data.remove_prefix(5);
stream_data.remove_prefix(4);
std::string expected_stream_data_2;
ASSERT_TRUE(
absl::HexStringToBytes("00000000"
"92",
&expected_stream_data_2));
EXPECT_EQ(expected_stream_data_2, stream_data.substr(0, 5));
stream_data.remove_prefix(5);
std::string expected_stream_data_3;
ASSERT_TRUE(absl::HexStringToBytes(
"3fe17f"
"82",
&expected_stream_data_3));
EXPECT_EQ(expected_stream_data_3, stream_data);
}
class QuicSpdySessionTestServerNoExtendedConnect
: public QuicSpdySessionTestBase {
public:
QuicSpdySessionTestServerNoExtendedConnect()
: QuicSpdySessionTestBase(Perspective::IS_SERVER, false) {}
};
INSTANTIATE_TEST_SUITE_P(Tests, QuicSpdySessionTestServerNoExtendedConnect,
::testing::ValuesIn(AllSupportedVersions()),
::testing::PrintToStringParamName());
TEST_P(QuicSpdySessionTestServerNoExtendedConnect,
WebTransportSettingNoEffect) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
EXPECT_FALSE(session_->SupportsWebTransport());
EXPECT_TRUE(session_->ShouldProcessIncomingRequests());
CompleteHandshake();
ReceiveWebTransportSettings();
EXPECT_FALSE(session_->allow_extended_connect());
EXPECT_FALSE(session_->SupportsWebTransport());
EXPECT_TRUE(session_->ShouldProcessIncomingRequests());
}
TEST_P(QuicSpdySessionTestServerNoExtendedConnect, BadExtendedConnectSetting) {
Initialize();
if (!version().UsesHttp3()) {
return;
}
SetQuicReloadableFlag(quic_act_upon_invalid_header, true);
EXPECT_FALSE(session_->SupportsWebTransport());
EXPECT_TRUE(session_->ShouldProcessIncomingRequests());
CompleteHandshake();
SettingsFrame settings;
settings.values[SETTINGS_ENABLE_CONNECT_PROTOCOL] = 2;
std::string data = std::string(1, kControlStream) +
HttpEncoder::SerializeSettingsFrame(settings);
QuicStreamId control_stream_id =
session_->perspective() == Perspective::IS_SERVER
? GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3)
: GetNthServerInitiatedUnidirectionalStreamId(transport_version(), 3);
QuicStreamFrame frame(control_stream_id, false, 0, data);
EXPECT_QUIC_PEER_BUG(
{
EXPECT_CALL(*connection_,
CloseConnection(QUIC_HTTP_INVALID_SETTING_VALUE, _, _));
session_->OnStreamFrame(frame);
},
"Received SETTINGS_ENABLE_CONNECT_PROTOCOL with invalid value");
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_spdy_session.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/quic_spdy_session_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f9a2563e-0ca3-4912-9e82-14717a0a6efa | cpp | tensorflow/tensorflow | graph_utils | tensorflow/core/grappler/optimizers/data/graph_utils.cc | tensorflow/core/grappler/optimizers/data/graph_utils_test.cc | #include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include <cstddef>
#include "tensorflow/core/framework/dataset_metadata.pb.h"
#include "tensorflow/core/framework/device_base.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/platform/strcat.h"
namespace tensorflow {
namespace grappler {
namespace graph_utils {
namespace {
constexpr char kConstOpName[] = "Const";
constexpr char kRetValOp[] = "_Retval";
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kToutputTypes[] = "Toutput_types";
template <typename Predicate, typename Collection>
std::vector<int> GetElementIndicesWithPredicate(const Predicate& predicate,
const Collection& collection) {
std::vector<int> indices = {};
unsigned idx = 0;
for (auto&& element : collection) {
if (predicate(element)) {
indices.push_back(idx);
}
idx++;
}
return indices;
}
std::vector<int> CreateNameIndex(const GraphDef& graph) {
std::map<string, int> names;
for (int i = 0; i < graph.node_size(); ++i) {
names[graph.node(i).name()] = i;
}
std::vector<int> index(graph.node_size());
int i = 0;
for (const auto& pair : names) {
index[i++] = pair.second;
}
return index;
}
std::vector<int> CreateInputIndex(const NodeDef& node) {
std::map<string, int> inputs;
for (int i = 0; i < node.input_size(); ++i) {
inputs[node.input(i)] = i;
}
std::vector<int> index(node.input_size());
int i = 0;
for (const auto& pair : inputs) {
index[i++] = pair.second;
}
return index;
}
NodeDef* AddScalarConstNodeHelper(
DataType dtype, const std::function<void(TensorProto*)>& add_value,
MutableGraphView* graph) {
NodeDef node;
node.set_op(kConstOpName);
SetUniqueGraphNodeName(kConstOpName, graph->graph(), &node);
(*node.mutable_attr())["dtype"].set_type(dtype);
std::unique_ptr<tensorflow::TensorProto> tensor =
std::make_unique<tensorflow::TensorProto>();
std::unique_ptr<tensorflow::TensorShapeProto> tensor_shape =
std::make_unique<tensorflow::TensorShapeProto>();
tensor->set_allocated_tensor_shape(tensor_shape.release());
tensor->set_dtype(dtype);
add_value(tensor.get());
(*node.mutable_attr())["value"].set_allocated_tensor(tensor.release());
return graph->AddNode(std::move(node));
}
}
NodeDef* AddScalarPlaceholder(DataType dtype, MutableGraphView* graph) {
NodeDef node;
node.set_op("Placeholder");
SetUniqueGraphNodeName(node.op(), graph->graph(), &node);
(*node.mutable_attr())["dtype"].set_type(dtype);
TensorShapeProto* shape = (*node.mutable_attr())["shape"].mutable_shape();
shape->set_unknown_rank(false);
return graph->AddNode(std::move(node));
}
NodeDef* AddNode(StringPiece name, StringPiece op,
const std::vector<string>& inputs,
const std::vector<std::pair<string, AttrValue>>& attributes,
MutableGraphView* graph) {
NodeDef node;
if (!name.empty()) {
node.set_name(string(name));
} else {
SetUniqueGraphNodeName(op, graph->graph(), &node);
}
node.set_op(string(op));
for (const string& input : inputs) {
node.add_input(input);
}
for (const auto& attr : attributes) {
(*node.mutable_attr())[attr.first] = attr.second;
}
return graph->AddNode(std::move(node));
}
template <>
NodeDef* AddScalarConstNode(bool v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_BOOL, [v](TensorProto* proto) { proto->add_bool_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(double v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_DOUBLE, [v](TensorProto* proto) { proto->add_double_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(float v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_FLOAT, [v](TensorProto* proto) { proto->add_float_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(int v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_INT32, [v](TensorProto* proto) { proto->add_int_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(int64_t v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_INT64, [v](TensorProto* proto) { proto->add_int64_val(v); }, graph);
}
template <>
NodeDef* AddScalarConstNode(StringPiece v, MutableGraphView* graph) {
return AddScalarConstNodeHelper(
DT_STRING,
[v](TensorProto* proto) { proto->add_string_val(v.data(), v.size()); },
graph);
}
Status GetScalarConstNodeValueHelper(
const NodeDef& node, DataType dtype,
const std::function<void(const Tensor&)>& get_value) {
if (node.op() != kConstOpName)
return errors::InvalidArgument("Node ", node.name(),
" is not a Const node. Op: ", node.op());
Tensor tensor;
TF_RETURN_IF_ERROR(GetNodeAttr(node, "value", &tensor));
if (!TensorShapeUtils::IsScalar(tensor.shape())) {
return errors::InvalidArgument(
"Node ", node.name(),
" should be a scalar but has shape: ", tensor.shape());
}
if (tensor.dtype() != dtype) {
return errors::InvalidArgument(
"Node ", node.name(), " should have type ", DataTypeString(dtype),
" but has type: ", DataTypeString(tensor.dtype()));
}
get_value(tensor);
return absl::OkStatus();
}
template <>
Status GetScalarConstNodeValue(const NodeDef& node, int64_t* value) {
return GetScalarConstNodeValueHelper(
node, DT_INT64,
[value](const Tensor& tensor) { *value = tensor.scalar<int64_t>()(); });
}
template <>
Status GetScalarConstNodeValue(const NodeDef& node, bool* value) {
return GetScalarConstNodeValueHelper(
node, DT_BOOL,
[value](const Tensor& tensor) { *value = tensor.scalar<bool>()(); });
}
bool Compare(const GraphDef& g1, const GraphDef& g2) {
if (g1.node_size() != g2.node_size()) {
return false;
}
std::vector<int> name_index1 = CreateNameIndex(g1);
std::vector<int> name_index2 = CreateNameIndex(g2);
for (int i = 0; i < g1.node_size(); ++i) {
int idx1 = name_index1[i];
int idx2 = name_index2[i];
if (g1.node(idx1).op() != g2.node(idx2).op()) {
return false;
}
if (g1.node(idx1).name() != g2.node(idx2).name()) {
return false;
}
if (g1.node(idx1).input_size() != g2.node(idx2).input_size()) {
return false;
}
std::vector<int> input_index1 = CreateInputIndex(g1.node(idx1));
std::vector<int> input_index2 = CreateInputIndex(g2.node(idx2));
for (int j = 0; j < g1.node(idx1).input_size(); ++j) {
if (!IsSameInput(g1.node(idx1).input(input_index1[j]),
g2.node(idx2).input(input_index2[j]))) {
return false;
}
}
}
return true;
}
bool ContainsGraphFunctionWithName(StringPiece name,
const FunctionDefLibrary& library) {
return FindGraphFunctionWithName(name, library) != -1;
}
bool ContainsGraphNodeWithName(StringPiece name, const GraphDef& graph) {
return FindGraphNodeWithName(name, graph) != -1;
}
bool ContainsNodeWithOp(StringPiece op, const GraphDef& graph) {
return FindGraphNodeWithOp(op, graph) != -1;
}
int FindGraphFunctionWithName(StringPiece name,
const FunctionDefLibrary& library) {
return GetFirstElementIndexWithPredicate(
[&name](const FunctionDef& function) {
return function.signature().name() == name;
},
library.function());
}
int FindGraphNodeWithName(StringPiece name, const GraphDef& graph) {
return GetFirstElementIndexWithPredicate(
[&name](const NodeDef& node) { return node.name() == name; },
graph.node());
}
int FindGraphNodeWithOp(StringPiece op, const GraphDef& graph) {
return GetFirstElementIndexWithPredicate(
[&op](const NodeDef& node) { return node.op() == op; }, graph.node());
}
std::vector<int> FindAllGraphNodesWithOp(const string& op,
const GraphDef& graph) {
return GetElementIndicesWithPredicate(
[&op](const NodeDef& node) { return node.op() == op; }, graph.node());
}
NodeDef* GetInputNode(const NodeDef& node, const MutableGraphView& graph) {
if (node.input_size() == 0) return nullptr;
MutableGraphView::InputPort input_port = graph.GetInputPort(node.name(), 0);
return graph.GetRegularFanin(input_port).node;
}
NodeDef* GetInputNode(const NodeDef& node, const MutableGraphView& graph,
int64_t i) {
if (node.input_size() <= i) return nullptr;
MutableGraphView::InputPort input_port = graph.GetInputPort(node.name(), i);
return graph.GetRegularFanin(input_port).node;
}
Status GetDatasetOutputTypesAttr(const NodeDef& node,
DataTypeVector* output_types) {
for (const string& attr_name : {"output_types", "Toutput_types"}) {
if (node.attr().contains(attr_name)) {
return GetNodeAttr(node, attr_name, output_types);
}
}
return errors::InvalidArgument("Could not find output_types attr for node: ",
node.name(), " with op: ", node.op());
}
void SetUniqueGraphNodeName(StringPiece prefix, GraphDef* graph,
NodeDef* node) {
string name = string(prefix);
int id = graph->node_size();
while (ContainsGraphNodeWithName(name, *graph)) {
if (name.rfind("_generated") != string::npos &&
(name.rfind("_generated") == (name.size() - strlen("_generated")))) {
name.insert(name.rfind("_generated"), strings::StrCat("/_", id));
} else {
name = strings::StrCat(prefix, "/_", id);
}
++id;
}
node->set_name(std::move(name));
}
void SetUniqueGraphFunctionName(StringPiece prefix,
const FunctionDefLibrary* library,
FunctionDef* function) {
string name = string(prefix);
int id = library->function_size();
while (ContainsGraphFunctionWithName(name, *library)) {
name = strings::StrCat(prefix, "/_", id);
++id;
}
function->mutable_signature()->set_name(std::move(name));
}
void CopyAttribute(const string& attribute_name, const NodeDef& from,
NodeDef* to_node) {
(*to_node->mutable_attr())[attribute_name] = from.attr().at(attribute_name);
}
void ConcatAttributeList(const string& attribute_name, const NodeDef& first,
const NodeDef& second, NodeDef* to_node) {
CopyAttribute(attribute_name, first, to_node);
(*to_node->mutable_attr())
.at(attribute_name)
.mutable_list()
->MergeFrom(second.attr().at(attribute_name).list());
}
Status EnsureNodeNamesUnique(Graph* g) {
std::unordered_map<string, int> name_map;
for (auto node : g->op_nodes()) {
const string& prefix = node->name();
if (auto entry = gtl::FindOrNull(name_map, prefix)) {
string unique_name;
do {
unique_name = strings::StrCat(prefix, "_", ++(*entry));
} while (name_map.find(unique_name) != name_map.end());
name_map.insert({unique_name, 0});
node->set_name(std::move(unique_name));
} else {
name_map.insert({node->name(), 0});
}
}
return absl::OkStatus();
}
Status GetFetchNode(const MutableGraphView& graph, const GrapplerItem& item,
NodeDef** fetch_node) {
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
*fetch_node = graph.GetNode(item.fetch.at(0));
return absl::OkStatus();
}
bool IsItemDerivedFromFunctionDef(const GrapplerItem& item,
const MutableGraphView& graph_view) {
for (const auto& fetch_name : item.fetch) {
auto fetch = graph_view.GetNode(fetch_name);
if (fetch != nullptr && fetch->op() != kRetValOp) {
return false;
}
}
return true;
}
void MaybeSetFusedMetadata(const NodeDef& node1, const NodeDef& node2,
NodeDef* fused_node) {
data::Metadata metadata1;
if (node1.attr().contains("metadata")) {
metadata1.ParseFromString(node1.attr().at("metadata").s());
}
data::Metadata metadata2;
if (node2.attr().contains("metadata")) {
metadata2.ParseFromString(node2.attr().at("metadata").s());
}
data::Metadata fused_metadata;
auto normalize_name = [](const string& name) {
return name.empty() ? "?" : name;
};
*fused_metadata.mutable_name() =
strings::StrCat("fused(", normalize_name(metadata1.name()), ",",
normalize_name(metadata2.name()), ")");
fused_metadata.SerializeToString(
(*fused_node->mutable_attr())["metadata"].mutable_s());
}
bool CopyShapesAndTypesAttrs(const NodeDef& from, NodeDef* to_node) {
auto* attr = gtl::FindOrNull(from.attr(), kOutputTypes);
attr = (attr == nullptr ? gtl::FindOrNull(from.attr(), kToutputTypes) : attr);
if (attr == nullptr) return false;
(*to_node->mutable_attr())[kOutputTypes] = *attr;
attr = gtl::FindOrNull(from.attr(), kOutputShapes);
if (attr == nullptr) return false;
(*to_node->mutable_attr())[kOutputShapes] = *attr;
return true;
}
namespace {
const auto* kSloppyAttrOps = new absl::flat_hash_set<string>{
"ParallelInterleaveDatasetV2",
"ParallelMapDataset",
"ParseExampleDataset",
};
const auto* kReplicateOnSplitAttrOps = new absl::flat_hash_set<string>{
"TensorSliceDataset",
"RangeDataset",
};
const auto* kDeterministicAttrOps = new absl::flat_hash_set<string>{
"LegacyParallelInterleaveDatasetV2",
"ParallelInterleaveDatasetV3",
"ParallelInterleaveDatasetV4",
"ParallelMapDatasetV2",
"ParallelBatchDataset",
};
}
bool HasSloppyAttr(const string& op) { return kSloppyAttrOps->contains(op); }
bool HasReplicateOnSplitAttr(const string& op) {
return kReplicateOnSplitAttrOps->contains(op);
}
bool HasDeterministicAttr(const string& op) {
return kDeterministicAttrOps->contains(op);
}
Status SetMetadataName(const std::string& name, NodeDef* node) {
data::Metadata metadata;
if (node->attr().contains("metadata")) {
metadata.ParseFromString(node->attr().at("metadata").s());
}
if (!metadata.name().empty()) {
return errors::InvalidArgument("Node ", node->name(),
" already has a metadata name \"",
metadata.name(), "\".");
}
*metadata.mutable_name() = name;
metadata.SerializeToString((*node->mutable_attr())["metadata"].mutable_s());
return absl::OkStatus();
}
}
}
} | #include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/framework/dataset_metadata.pb.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace graph_utils {
namespace {
using test::function::NDef;
constexpr char kOutputShapes[] = "output_shapes";
constexpr char kOutputTypes[] = "output_types";
constexpr char kToutputTypes[] = "Toutput_types";
TEST(GraphUtilsTest, GetFirstElementIndexWithPredicate) {
std::vector<int> vec({1, 2, 3, 4, 5, 6});
auto result = GetFirstElementIndexWithPredicate(
[](int elem) { return elem % 3 == 0; }, vec);
EXPECT_EQ(result, 2);
result = GetFirstElementIndexWithPredicate(
[](int elem) { return elem % 7 == 0; }, vec);
EXPECT_EQ(result, -1);
}
TEST(GraphUtilsTest, AddScalarConstNodeBool) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* bool_node = AddScalarConstNode<bool>(true, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(bool_node->name(), *graph.graph()));
EXPECT_EQ(bool_node->attr().at("value").tensor().bool_val(0), true);
}
TEST(GraphUtilsTest, AddScalarConstNodeDouble) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* double_node = AddScalarConstNode<double>(3.14, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(double_node->name(), *graph.graph()));
EXPECT_FLOAT_EQ(double_node->attr().at("value").tensor().double_val(0), 3.14);
}
TEST(GraphUtilsTest, AddScalarConstNodeFloat) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* float_node = AddScalarConstNode<float>(3.14, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(float_node->name(), *graph.graph()));
EXPECT_FLOAT_EQ(float_node->attr().at("value").tensor().float_val(0), 3.14);
}
TEST(GraphUtilsTest, AddScalarConstNodeInt) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int_node = AddScalarConstNode<int>(42, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(int_node->name(), *graph.graph()));
EXPECT_EQ(int_node->attr().at("value").tensor().int_val(0), 42);
}
TEST(GraphUtilsTest, AddScalarConstNodeInt64) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(42, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(int64_node->name(), *graph.graph()));
EXPECT_EQ(int64_node->attr().at("value").tensor().int64_val(0), 42);
}
TEST(GraphUtilsTest, AddScalarConstNodeString) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* string_node = AddScalarConstNode<StringPiece>("hello", &graph);
EXPECT_TRUE(ContainsGraphNodeWithName(string_node->name(), *graph.graph()));
EXPECT_EQ(string_node->attr().at("value").tensor().string_val(0), "hello");
}
TEST(GraphUtilsTest, GetScalarConstNodeInt64) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(128, &graph);
int64_t result;
EXPECT_TRUE(GetScalarConstNodeValue<int64_t>(*int64_node, &result).ok());
EXPECT_EQ(result, 128);
}
TEST(GraphUtilsTest, GetScalarConstNodeBool) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* bool_node = AddScalarConstNode<bool>(true, &graph);
bool result;
EXPECT_TRUE(GetScalarConstNodeValue<bool>(*bool_node, &result).ok());
EXPECT_EQ(result, true);
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithNonConst) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* non_const = AddScalarPlaceholder(DT_INT64, &graph);
int64_t result;
Status s = GetScalarConstNodeValue<int64_t>(*non_const, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Node Placeholder is not a Const node. Op: Placeholder");
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithType) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* int64_node = AddScalarConstNode<int64_t>(128, &graph);
bool result;
Status s = GetScalarConstNodeValue<bool>(*int64_node, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(),
"Node Const should have type bool but has type: int64");
}
TEST(GraphUtilsTest, GetScalarConstNodeErrorWithVector) {
NodeDef node;
node.set_name("Const");
node.set_op("Const");
(*node.mutable_attr())["dtype"].set_type(DT_INT64);
auto tensor = (*node.mutable_attr())["value"].mutable_tensor();
tensor->set_dtype(DT_INT64);
tensor->mutable_tensor_shape()->mutable_dim()->Add()->set_size(1);
tensor->add_int64_val(128);
int64_t result;
Status s = GetScalarConstNodeValue<int64_t>(node, &result);
EXPECT_FALSE(s.ok());
EXPECT_EQ(s.message(), "Node Const should be a scalar but has shape: [1]");
}
TEST(GraphUtilsTest, Compare) {
GraphDef graph_def_a;
MutableGraphView graph_a(&graph_def_a);
GraphDef graph_def_b;
MutableGraphView graph_b(&graph_def_b);
EXPECT_TRUE(Compare(graph_def_a, graph_def_b));
AddNode("A", "OpA", {}, {}, &graph_a);
AddNode("B", "OpB", {"A"}, {}, &graph_a);
EXPECT_FALSE(Compare(graph_def_a, graph_def_b));
graph_def_b.mutable_node()->CopyFrom(graph_def_a.node());
EXPECT_TRUE(Compare(graph_def_a, graph_def_b));
}
TEST(GraphUtilsTest, ContainsGraphNodeWithName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_TRUE(!ContainsGraphNodeWithName("A", *graph.graph()));
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_TRUE(ContainsGraphNodeWithName("A", *graph.graph()));
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_TRUE(!ContainsGraphNodeWithName("A", *graph.graph()));
}
TEST(GraphUtilsTest, ContainsGraphFunctionWithName) {
FunctionDefLibrary library;
EXPECT_FALSE(ContainsGraphFunctionWithName("new_function", library));
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
EXPECT_TRUE(
ContainsGraphFunctionWithName(new_function->signature().name(), library));
}
TEST(GraphUtilsTest, ContainsNodeWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_TRUE(!ContainsNodeWithOp("OpA", *graph.graph()));
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_TRUE(ContainsNodeWithOp("OpA", *graph.graph()));
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_TRUE(!ContainsNodeWithOp("OpA", *graph.graph()));
}
TEST(GraphUtilsTest, FindGraphNodeWithName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithName("A", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
EXPECT_NE(FindGraphNodeWithName("A", *graph.graph()), -1);
EXPECT_TRUE(graph.DeleteNodes({"A"}).ok());
EXPECT_EQ(FindGraphNodeWithName("A", *graph.graph()), -1);
}
TEST(GraphUtilsTest, FindGraphFunctionWithName) {
FunctionDefLibrary library;
EXPECT_EQ(FindGraphFunctionWithName("new_function", library), -1);
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
EXPECT_NE(
FindGraphFunctionWithName(new_function->signature().name(), library), -1);
}
TEST(GraphUtilsTest, FindGraphNodeWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
AddNode("B", "OpB", {"A"}, {}, &graph);
AddNode("A2", "OpA", {"A"}, {}, &graph);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), 0);
EXPECT_TRUE(graph.DeleteNodes({"B"}).ok());
EXPECT_EQ(FindGraphNodeWithOp("OpB", *graph.graph()), -1);
EXPECT_EQ(FindGraphNodeWithName("A2", *graph.graph()), 1);
}
TEST(GraphUtilsTest, FindAllGraphNodesWithOp) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
EXPECT_EQ(FindGraphNodeWithOp("OpA", *graph.graph()), -1);
AddNode("A", "OpA", {}, {}, &graph);
AddNode("B", "OpB", {"A"}, {}, &graph);
AddNode("A2", "OpA", {"B"}, {}, &graph);
std::vector<int> result_indices =
FindAllGraphNodesWithOp("OpA", *graph.graph());
EXPECT_EQ(result_indices.size(), 2);
EXPECT_EQ(result_indices.at(0), 0);
EXPECT_EQ(result_indices.at(1), 2);
EXPECT_TRUE(graph.DeleteNodes({"A2"}).ok());
std::vector<int> result_indices_new =
FindAllGraphNodesWithOp("OpA", *graph.graph());
EXPECT_EQ(result_indices_new.size(), 1);
EXPECT_EQ(result_indices_new.at(0), 0);
}
TEST(GraphUtilsTest, SetUniqueGraphNodeName) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {}, {}, &graph);
EXPECT_NE(node1->name(), node2->name());
EXPECT_TRUE(graph.DeleteNodes({node1->name()}).ok());
NodeDef* node3 = AddNode("", "A", {}, {}, &graph);
EXPECT_NE(node2->name(), node3->name());
}
TEST(GraphUtilsTest, SetUniqueGraphFunctionName) {
FunctionDefLibrary library;
FunctionDef* new_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, new_function);
FunctionDef* other_function = library.add_function();
SetUniqueGraphFunctionName("new_function", &library, other_function);
EXPECT_NE(new_function->signature().name(),
other_function->signature().name());
}
TEST(GraphUtilsTest, GetInputNode) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {node1->name()}, {}, &graph);
EXPECT_EQ(GetInputNode(*node2, graph), node1);
EXPECT_EQ(GetInputNode(*node1, graph), nullptr);
}
TEST(GraphUtilsTest, GetIthInputNode) {
GraphDef graph_def;
MutableGraphView graph(&graph_def);
NodeDef* node1 = AddNode("", "A", {}, {}, &graph);
NodeDef* node2 = AddNode("", "A", {}, {}, &graph);
NodeDef* node3 = AddNode("", "A", {node1->name(), node2->name()}, {}, &graph);
EXPECT_EQ(GetInputNode(*node3, graph), node1);
EXPECT_EQ(GetInputNode(*node3, graph, 1), node2);
EXPECT_EQ(GetInputNode(*node3, graph, 0), node1);
EXPECT_EQ(GetInputNode(*node3, graph, 2), nullptr);
EXPECT_EQ(GetInputNode(*node1, graph), nullptr);
}
TEST(GraphUtilsTest, EnsureNodeNamesUnique) {
Graph g(OpRegistry::Global());
Node *const_0, *const_1, *const_2;
Tensor tensor(DT_INT32, {});
tensor.scalar<int32>()() = 5;
for (auto node : {&const_0, &const_1}) {
TF_EXPECT_OK(NodeBuilder("Const", "Const")
.Attr("value", tensor)
.Attr("dtype", DT_INT32)
.Finalize(&g, node));
}
TF_EXPECT_OK(NodeBuilder("Const_1", "Const")
.Attr("value", tensor)
.Attr("dtype", DT_INT32)
.Finalize(&g, &const_2));
TF_EXPECT_OK(EnsureNodeNamesUnique(&g));
EXPECT_NE(const_0->name(), const_1->name());
EXPECT_NE(const_1->name(), const_2->name());
EXPECT_NE(const_0->name(), const_2->name());
}
TEST(GraphUtilsTest, TestGetFetchNode) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
NodeDef* node3 = AddNode("node3", "Identity", {node2->name()}, {}, &graph);
item.fetch.push_back(node3->name());
NodeDef* sink_node;
TF_EXPECT_OK(GetFetchNode(graph, item, &sink_node));
EXPECT_EQ(sink_node->name(), node3->name());
}
TEST(GraphUtilsTest, TestFindSinkNodeMultipleFetches) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
NodeDef* node3 = AddNode("node3", "Identity", {node2->name()}, {}, &graph);
item.fetch.push_back(node2->name());
item.fetch.push_back(node3->name());
NodeDef* sink_node;
Status s = GetFetchNode(graph, item, &sink_node);
EXPECT_FALSE(s.ok());
}
TEST(GraphUtilsTest, TestFindSinkNodeNoFetches) {
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef* node1 = AddNode("node1", "Identity", {}, {}, &graph);
NodeDef* node2 = AddNode("node2", "Identity", {node1->name()}, {}, &graph);
AddNode("node3", "Identity", {node2->name()}, {}, &graph);
NodeDef* sink_node;
Status s = GetFetchNode(graph, item, &sink_node);
EXPECT_FALSE(s.ok());
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsNoShapes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputTypes, absl::Span<const DataType>{}}});
NodeDef to_node;
EXPECT_FALSE(CopyShapesAndTypesAttrs(from, &to_node));
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsNoTypes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputShapes, absl::Span<const TensorShape>{}}});
NodeDef to_node;
EXPECT_FALSE(CopyShapesAndTypesAttrs(from, &to_node));
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsOutputTypes) {
NodeDef from = NDef("range", "RangeDataset", {},
{{kOutputShapes, 666}, {kOutputTypes, 888}});
NodeDef to_node;
EXPECT_TRUE(CopyShapesAndTypesAttrs(from, &to_node));
EXPECT_EQ(to_node.attr().at(kOutputShapes).i(), 666);
EXPECT_EQ(to_node.attr().at(kOutputTypes).i(), 888);
}
TEST(GraphUtilsTest, TestCopyShapesAndTypesAttrsToutputTypes) {
NodeDef from = NDef("tensor", "TensorDataset", {},
{{kOutputShapes, 666}, {kToutputTypes, 888}});
NodeDef to_node;
EXPECT_TRUE(CopyShapesAndTypesAttrs(from, &to_node));
EXPECT_EQ(to_node.attr().at(kOutputShapes).i(), 666);
EXPECT_EQ(to_node.attr().at(kOutputTypes).i(), 888);
}
TEST(GraphUtilsTest, TestSetMetadataName) {
NodeDef node = NDef("range", "RangeDataset", {},
{{kOutputShapes, 666}, {kOutputTypes, 888}});
EXPECT_TRUE(SetMetadataName("metadata_name", &node).ok());
EXPECT_TRUE(node.attr().contains("metadata"));
data::Metadata metadata;
metadata.ParseFromString(node.attr().at("metadata").s());
EXPECT_EQ("metadata_name", metadata.name());
EXPECT_FALSE(SetMetadataName("new_metadata_name", &node).ok());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/graph_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/graph_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
241e305d-2df4-4bfa-a29d-7db00655992a | cpp | tensorflow/tensorflow | quantization_util | tensorflow/compiler/mlir/lite/kernels/internal/quantization_util.cc | tensorflow/lite/delegates/xnnpack/quantization_util_test.cc | #include "tensorflow/compiler/mlir/lite/kernels/internal/quantization_util.h"
#include <algorithm>
#include <cmath>
#include <limits>
#include "tensorflow/compiler/mlir/lite/kernels/internal/compatibility_macros.h"
#include "tensorflow/compiler/mlir/lite/kernels/internal/cppmath.h"
namespace tflite_migration {
namespace {
constexpr uint64_t kSignMask = 0x8000000000000000LL;
constexpr uint64_t kExponentMask = 0x7ff0000000000000LL;
constexpr int32_t kExponentShift = 52;
constexpr int32_t kExponentBias = 1023;
constexpr uint32_t kExponentIsBadNum = 0x7ff;
constexpr uint64_t kFractionMask = 0x000fffffffc00000LL;
constexpr uint32_t kFractionShift = 22;
constexpr uint32_t kFractionRoundingMask = 0x003fffff;
constexpr uint32_t kFractionRoundingThreshold = 0x00200000;
}
void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
int* shift) {
#if TFLITE_SINGLE_ROUNDING
#endif
if (double_multiplier == 0.) {
*quantized_multiplier = 0;
*shift = 0;
return;
}
#ifdef TFLITE_EMULATE_FLOAT
int64_t q_fixed = IntegerFrExp(double_multiplier, shift);
#else
const double q = std::frexp(double_multiplier, shift);
auto q_fixed = static_cast<int64_t>(TfLiteRound(q * (1LL << 31)));
#endif
TFLITE_DCHECK(q_fixed <= (1LL << 31));
if (q_fixed == (1LL << 31)) {
q_fixed /= 2;
++*shift;
}
TFLITE_DCHECK_LE(q_fixed, std::numeric_limits<int32_t>::max());
if (*shift < -31) {
*shift = 0;
q_fixed = 0;
}
#if TFLITE_SINGLE_ROUNDING
if (*shift > 30) {
*shift = 30;
q_fixed = (1LL << 31) - 1;
}
#endif
*quantized_multiplier = static_cast<int32_t>(q_fixed);
}
void QuantizeMultiplierGreaterThanOne(double double_multiplier,
int32_t* quantized_multiplier,
int* left_shift) {
TFLITE_DCHECK_GT(double_multiplier, 1.);
QuantizeMultiplier(double_multiplier, quantized_multiplier, left_shift);
TFLITE_DCHECK_GE(*left_shift, 0);
}
int64_t IntegerFrExp(double input, int* shift) {
TFLITE_DCHECK_EQ(8, sizeof(double));
union {
double double_value;
uint64_t double_as_uint;
} cast_union;
cast_union.double_value = input;
const uint64_t u = cast_union.double_as_uint;
if ((u & ~kSignMask) == 0) {
*shift = 0;
return 0;
}
const uint32_t exponent_part = ((u & kExponentMask) >> kExponentShift);
if (exponent_part == kExponentIsBadNum) {
*shift = std::numeric_limits<int>::max();
if (u & kFractionMask) {
return 0;
} else {
if (u & kSignMask) {
return std::numeric_limits<int64_t>::min();
} else {
return std::numeric_limits<int64_t>::max();
}
}
}
*shift = (exponent_part - kExponentBias) + 1;
int64_t fraction = 0x40000000 + ((u & kFractionMask) >> kFractionShift);
if ((u & kFractionRoundingMask) > kFractionRoundingThreshold) {
fraction += 1;
}
if (u & kSignMask) {
fraction *= -1;
}
return fraction;
}
double DoubleFromFractionAndShift(int64_t fraction, int shift) {
union {
double double_value;
uint64_t double_as_uint;
} result;
if (shift == std::numeric_limits<int>::max()) {
if (fraction == 0) {
return std::numeric_limits<double>::quiet_NaN();
} else if (fraction > 0) {
return std::numeric_limits<double>::infinity();
} else {
return -std::numeric_limits<double>::infinity();
}
}
if (fraction == 0) {
result.double_as_uint = 0;
return result.double_value;
}
bool is_negative = (fraction < 0);
int64_t encoded_fraction = is_negative ? -fraction : fraction;
int64_t encoded_shift = (shift - 1);
while (encoded_fraction < 0x40000000) {
encoded_fraction *= 2;
encoded_shift -= 1;
}
while (encoded_fraction > 0x80000000) {
encoded_fraction /= 2;
encoded_shift += 1;
}
encoded_fraction -= 0x40000000;
if (encoded_shift < -1022) {
encoded_shift = -1023;
} else if (encoded_shift > 1022) {
encoded_shift = 1023;
}
encoded_shift += kExponentBias;
uint64_t encoded_sign = is_negative ? kSignMask : 0;
result.double_as_uint = encoded_sign | (encoded_shift << kExponentShift) |
(encoded_fraction << kFractionShift);
return result.double_value;
}
double IntegerDoubleMultiply(double a, double b) {
int a_shift;
const int64_t a_fraction = IntegerFrExp(a, &a_shift);
int b_shift;
const int64_t b_fraction = IntegerFrExp(b, &b_shift);
if (a_shift == std::numeric_limits<int>::max() ||
(b_shift == std::numeric_limits<int>::max())) {
return std::numeric_limits<double>::quiet_NaN();
}
const int result_shift = a_shift + b_shift + 1;
const int64_t result_fraction = (a_fraction * b_fraction) >> 32;
return DoubleFromFractionAndShift(result_fraction, result_shift);
}
int IntegerDoubleCompare(double a, double b) {
int a_shift;
const int64_t a_fraction = IntegerFrExp(a, &a_shift);
int b_shift;
const int64_t b_fraction = IntegerFrExp(b, &b_shift);
if (a_shift == std::numeric_limits<int>::max() ||
(b_shift == std::numeric_limits<int>::max())) {
return 1;
}
if ((a_fraction == 0) && (b_fraction < 0)) {
return 1;
} else if ((a_fraction < 0) && (b_fraction == 0)) {
return -1;
} else if (a_shift < b_shift) {
return -1;
} else if (a_shift > b_shift) {
return 1;
} else if (a_fraction < b_fraction) {
return -1;
} else if (a_fraction > b_fraction) {
return 1;
} else {
return 0;
}
}
void PreprocessSoftmaxScaling(double beta, double input_scale,
int input_integer_bits,
int32_t* quantized_multiplier, int* left_shift) {
#if TFLITE_SINGLE_ROUNDING
const double max_real_multiplier = (1LL << 30) - 1.0;
#else
const double max_real_multiplier = (1LL << 31) - 1.0;
#endif
#ifdef TFLITE_EMULATE_FLOAT
const double input_beta = IntegerDoubleMultiply(beta, input_scale);
int shift;
int64_t fraction = IntegerFrExp(input_beta, &shift);
shift += (31 - input_integer_bits);
double input_beta_real_multiplier =
DoubleFromFractionAndShift(fraction, shift);
if (IntegerDoubleCompare(input_beta_real_multiplier, max_real_multiplier) >
0) {
input_beta_real_multiplier = max_real_multiplier;
}
#else
const double input_beta_real_multiplier =
std::min<double>(beta * input_scale * (1 << (31 - input_integer_bits)),
max_real_multiplier);
#endif
QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier,
quantized_multiplier, left_shift);
}
int CalculateInputRadius(int input_integer_bits, int input_left_shift,
int total_signed_bits) {
#ifdef TFLITE_EMULATE_FLOAT
int64_t result = (1 << input_integer_bits) - 1;
result <<= (total_signed_bits - input_integer_bits);
result >>= input_left_shift;
return result;
#else
const double max_input_rescaled =
1.0 * ((1 << input_integer_bits) - 1) *
(1LL << (total_signed_bits - input_integer_bits)) /
(1LL << input_left_shift);
return static_cast<int>(std::floor(max_input_rescaled));
#endif
}
} | #include "tensorflow/lite/delegates/xnnpack/quantization_util.h"
#include <stdint.h>
#include <limits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/internal/runtime_shape.h"
using ::testing::FloatNear;
using ::testing::Pointwise;
namespace tflite {
namespace xnnpack {
namespace {
template <typename T>
inline double ScaleFromMinMax(const float min, const float max) {
return (max - min) / ((std::numeric_limits<T>::max() * 1.0) -
std::numeric_limits<T>::min());
}
template <typename T>
inline int32_t ZeroPointFromMinMax(const float min, const float max) {
return static_cast<int32_t>(std::numeric_limits<T>::min()) +
static_cast<int32_t>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
}
TEST(Dequantize, Int8) {
std::vector<int8_t> quantized_data = {-3, -2, -1, 1, 2, 3};
std::vector<float> dequantized_data(quantized_data.size());
RuntimeShape tensor_shape(1, quantized_data.size());
const float min = -12.8f;
const float max = 12.7f;
const double scale = ScaleFromMinMax<int8_t>(min, max);
const int32_t zero_point = ZeroPointFromMinMax<int8_t>(min, max);
DequantizeInt8(quantized_data.data(), dequantized_data.data(), tensor_shape,
zero_point, scale);
EXPECT_THAT(dequantized_data,
Pointwise(FloatNear(1e-6), {-0.3, -0.2, -0.1, 0.1, 0.2, 0.3}));
}
TEST(Dequantize, PerChannelInt8) {
const std::vector<float> scales = {0.5, 0.25};
const std::vector<int> zero_points = {-1, -1};
const int quantized_dimension = 0;
const RuntimeShape shape({2, 5});
const std::vector<int8_t> input = {-128, -127, -126, -125, -124,
123, 124, 125, 126, 127};
std::vector<float> output(10, -1);
PerChannelDequantizeInt8(input.data(), output.data(), shape,
zero_points.data(), scales.data(),
quantized_dimension);
EXPECT_THAT(output,
Pointwise(FloatNear(1e-6), {-63.5, -63., -62.5, -62., -61.5, 31.,
31.25, 31.5, 31.75, 32.}));
}
TEST(Dequantize, Float16) {
std::vector<uint16_t> quantized_data = {
UINT16_C(0x3000),
UINT16_C(0x3400),
UINT16_C(0x3800),
UINT16_C(0x3C00),
UINT16_C(0x4000),
UINT16_C(0x4400)
};
std::vector<float> dequantized_data(quantized_data.size());
DequantizeFloat16(quantized_data.data(), dequantized_data.data(),
quantized_data.size());
EXPECT_THAT(dequantized_data,
Pointwise(FloatNear(1e-6), {0.125, 0.25, 0.5, 1., 2., 4.}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/kernels/internal/quantization_util.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/quantization_util_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2d30b848-631b-4668-8f6c-fe02c615407f | cpp | tensorflow/tensorflow | literal_comparison | third_party/xla/xla/literal_comparison.cc | third_party/xla/xla/literal_comparison_test.cc | #include "xla/literal_comparison.h"
#include <complex>
#ifndef _WIN32
#include <unistd.h>
#endif
#include <array>
#include <cmath>
#include <cstdint>
#include <limits>
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/error_spec.h"
#include "xla/fp_util.h"
#include "xla/index_util.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
using absl::StrAppend;
using absl::StrAppendFormat;
using absl::StrCat;
namespace xla {
namespace literal_comparison {
namespace {
template <typename FloatT, typename UnsignedT>
bool CompareFloatsBitwiseEqual(FloatT lhs, FloatT rhs,
absl::Span<const int64_t> multi_index) {
auto ulhs = Eigen::numext::bit_cast<UnsignedT>(lhs);
auto urhs = Eigen::numext::bit_cast<UnsignedT>(rhs);
return ulhs == urhs;
}
template <typename NativeT>
bool CompareEqual(NativeT lhs, NativeT rhs,
absl::Span<const int64_t> multi_index) {
if constexpr (is_specialized_floating_point_v<NativeT>) {
using BitT = UnsignedIntegerTypeForSizeType<sizeof(NativeT)>;
return CompareFloatsBitwiseEqual<NativeT, BitT>(lhs, rhs, multi_index);
}
if constexpr (is_complex_v<NativeT>) {
using ComponentT = typename NativeT::value_type;
return CompareEqual<ComponentT>(lhs.real(), rhs.real(), multi_index) &&
CompareEqual<ComponentT>(lhs.imag(), rhs.imag(), multi_index);
}
return lhs == rhs;
}
template <typename NativeT, typename UnsignedT>
absl::Status MakeBitwiseErrorStatus(NativeT lhs, NativeT rhs,
absl::Span<const int64_t> multi_index) {
auto ulhs = Eigen::numext::bit_cast<UnsignedT>(lhs);
auto urhs = Eigen::numext::bit_cast<UnsignedT>(rhs);
auto lhs_double = static_cast<double>(lhs);
auto rhs_double = static_cast<double>(rhs);
return InvalidArgument(
"floating values are not bitwise-equal; and equality testing "
"was requested: %s=%s=%a vs %s=%s=%a at array index %s",
StrCat(absl::Hex(ulhs)), RoundTripFpToString(lhs), lhs_double,
StrCat(absl::Hex(urhs)), RoundTripFpToString(rhs), rhs_double,
LiteralUtil::MultiIndexAsString(multi_index));
}
template <typename NativeT>
absl::Status MakeErrorStatus(NativeT lhs, NativeT rhs,
absl::Span<const int64_t> multi_index) {
if constexpr (is_specialized_integral_v<NativeT>) {
return InvalidArgument(
"first mismatch at array index %s:\n expected value: %s\n actual "
"value: %s",
LiteralUtil::MultiIndexAsString(multi_index), StrCat(lhs), StrCat(rhs));
}
if constexpr (is_specialized_floating_point_v<NativeT>) {
using BitT = UnsignedIntegerTypeForSizeType<sizeof(NativeT)>;
return MakeBitwiseErrorStatus<NativeT, BitT>(lhs, rhs, multi_index);
}
if constexpr (is_complex_v<NativeT>) {
using ComponentT = typename NativeT::value_type;
if (!CompareEqual<ComponentT>(lhs.real(), rhs.real(), multi_index)) {
return MakeErrorStatus(lhs.real(), rhs.real(), multi_index);
}
return MakeErrorStatus(lhs.imag(), rhs.imag(), multi_index);
}
}
template <typename NativeT>
absl::Status Equal(LiteralSlice expected, LiteralSlice actual,
absl::Span<int64_t> multi_index, int64_t dimension,
Literal* mismatched = nullptr) {
if (dimension == expected.shape().dimensions_size()) {
NativeT expected_value = expected.Get<NativeT>(multi_index);
NativeT actual_value = actual.Get<NativeT>(multi_index);
bool result =
CompareEqual<NativeT>(expected_value, actual_value, multi_index);
if (mismatched) {
mismatched->Set<bool>(multi_index, !result);
}
return result ? absl::OkStatus()
: MakeErrorStatus<NativeT>(expected_value, actual_value,
multi_index);
}
absl::Status result;
int64_t upper_bound = expected.shape().dimensions(dimension);
if (expected.shape().is_dynamic_dimension(dimension)) {
upper_bound = expected.GetDynamicSize(dimension);
}
for (int64_t i = 0; i < upper_bound; ++i) {
multi_index[dimension] = i;
if (mismatched != nullptr) {
result.Update(Equal<NativeT>(expected, actual, multi_index, dimension + 1,
mismatched));
} else {
TF_RETURN_IF_ERROR(Equal<NativeT>(expected, actual, multi_index,
dimension + 1, mismatched));
}
}
return result;
}
int64_t RecursiveElementCount(const Shape& shape) {
if (shape.IsTuple()) {
const int64_t tuple_elements = ShapeUtil::TupleElementCount(shape);
int64_t total = 0;
for (int64_t i = 0; i < tuple_elements; ++i) {
total += RecursiveElementCount(ShapeUtil::GetTupleElementShape(shape, i));
}
return total;
} else if (shape.IsArray()) {
return ShapeUtil::ElementsIn(shape);
} else {
return 0;
}
}
template <typename NativeT>
bool IsInf(NativeT val) {
return Eigen::numext::isinf(val);
}
template <typename NativeT>
bool IsNan(NativeT value) {
return Eigen::numext::isnan(value);
}
template <typename NativeT>
std::string FpValueToString(NativeT value) {
if constexpr (is_specialized_floating_point_v<NativeT>) {
constexpr int kPrecisionDigits = std::numeric_limits<NativeT>::max_digits10;
const int kExponentDigts =
std::ceil(std::log10(std::numeric_limits<NativeT>::max_exponent10));
constexpr int kExtraChars = 4;
const int kTotalChars = kPrecisionDigits * kExponentDigts + kExtraChars;
return absl::StrFormat("%*.*g", kTotalChars, kPrecisionDigits,
static_cast<double>(value));
}
if constexpr (is_complex_v<NativeT>) {
return absl::StrCat(FpValueToString(value.real()), " + ",
FpValueToString(value.imag()));
}
}
template <typename NativeT>
double FpAbsoluteValue(NativeT value) {
return static_cast<double>(Eigen::numext::abs(value));
}
template <typename NativeT>
class NearComparator {
public:
static absl::Status Compare(const LiteralSlice& expected,
const LiteralSlice& actual,
const ShapeIndex& shape_index, ErrorSpec error,
bool detailed_message,
const MiscompareCallback& miscompare_callback) {
NearComparator<NativeT> comparator(expected, actual, shape_index, error,
detailed_message, miscompare_callback);
return comparator.Run();
}
private:
struct Mismatch {
NativeT actual;
NativeT expected;
double rel_error;
double abs_error;
int64_t linear_index;
int float_distance = -1;
bool operator<(const Mismatch& other) const {
return rel_error < other.rel_error;
}
std::string ToString(const Shape& shape) const {
auto s = absl::StrFormat(
"actual %s, expected %s, index %s, rel error %8.3g, abs error "
"%8.3g",
FpValueToString(actual), FpValueToString(expected),
LiteralUtil::MultiIndexAsString(
IndexUtil::LinearIndexToMultidimensionalIndex(shape,
linear_index)),
rel_error, abs_error);
if (float_distance >= 0) {
StrAppendFormat(&s, ", float distance %d", float_distance);
}
return s;
}
};
NearComparator(const LiteralSlice& expected, const LiteralSlice& actual,
const ShapeIndex& shape_index, ErrorSpec error,
bool detailed_message,
const MiscompareCallback& miscompare_callback)
: expected_(expected),
actual_(actual),
shape_index_(shape_index),
error_(error),
detailed_message_(detailed_message),
miscompare_callback_(miscompare_callback),
abs_value_buckets_(kAbsValueBucketBounds.size() - 1, {0, 0}),
abs_error_buckets_(kErrorBucketBounds.size(), 0),
rel_error_buckets_(kErrorBucketBounds.size(), 0) {}
absl::Status Run() {
TF_RETURN_IF_ERROR(EqualShapes(expected_.shape(), actual_.shape()));
if (!expected_.shape().IsArray()) {
return InvalidArgument("Expected array shape; got %s.",
ShapeUtil::HumanString(expected_.shape()));
}
mismatches_ = Literal(ShapeUtil::ChangeElementType(actual_.shape(), PRED));
mismatches_.PopulateWithValue(false);
CompareLiterals();
if (num_mismatches_ == 0) {
return absl::OkStatus();
} else if (!VLOG_IS_ON(1) && miscompare_callback_ != nullptr) {
miscompare_callback_(
expected_, actual_, mismatches_, shape_index_,
ErrorBuckets(abs_error_buckets_, rel_error_buckets_));
}
return InvalidArgument("%s", ErrorMessage());
}
void UpdateAbsValueBucket(NativeT value, bool is_mismatch) {
const double abs_value = FpAbsoluteValue(value);
for (int i = 0; i < abs_value_buckets_.size(); ++i) {
if (i == abs_value_buckets_.size() - 1 ||
(abs_value >= kAbsValueBucketBounds[i] &&
abs_value < kAbsValueBucketBounds[i + 1])) {
abs_value_buckets_[i].first++;
if (is_mismatch) {
abs_value_buckets_[i].second++;
}
return;
}
}
}
void UpdateErrorBucket(double error, absl::Span<int64_t> error_buckets) {
CHECK_EQ(error_buckets.size(), kErrorBucketBounds.size());
for (int i = 0; i < error_buckets.size(); ++i) {
if (error >= kErrorBucketBounds[i]) {
error_buckets[i]++;
}
}
}
template <typename T>
int CalculateFloatDistance(T expected, T actual) {
if (error_.low_precision_fp_error_spec.type ==
PrimitiveType::PRIMITIVE_TYPE_INVALID)
return -1;
return primitive_util::FloatingPointTypeSwitch<int>(
[&](const auto kType) -> int {
using NarrowNativeT = primitive_util::NativeTypeOf<kType>;
if constexpr (std::is_same_v<NarrowNativeT, tsl::float8_e3m4>) {
return CalculateDistanceInFloats(NarrowNativeT(half(expected)),
NarrowNativeT(half(actual)));
} else {
return CalculateDistanceInFloats(NarrowNativeT(expected),
NarrowNativeT(actual));
}
},
error_.low_precision_fp_error_spec.type);
}
template <typename T>
void CompareValues(T expected, T actual, int64_t linear_index) {
double abs_error;
double rel_error;
int float_distance = -1;
if (CompareEqual<T>(expected, actual, {linear_index})) {
abs_error = 0;
rel_error = 0;
} else if (IsNan(expected) || IsNan(actual)) {
if (error_.all_nans_are_equivalent && IsNan(expected) && IsNan(actual)) {
abs_error = 0;
rel_error = 0;
} else if ((!error_.relaxed_nans && IsNan(expected) != IsNan(actual)) ||
(error_.relaxed_nans && !IsNan(expected) && IsNan(actual))) {
num_nan_mismatches_++;
abs_error = std::numeric_limits<double>::infinity();
rel_error = std::numeric_limits<double>::infinity();
} else {
abs_error = 0;
rel_error = 0;
}
} else if (IsInf(actual) && !IsInf(expected) && error_.fewer_infs_ok) {
T actual_finite = actual > T{0} ? std::numeric_limits<T>::max()
: std::numeric_limits<T>::lowest();
abs_error = FpAbsoluteValue(actual_finite - expected);
if (expected != T{0}) {
rel_error = abs_error / FpAbsoluteValue(expected);
} else {
rel_error = std::numeric_limits<double>::infinity();
}
} else if (IsInf(expected) || IsInf(actual)) {
CHECK(!CompareEqual(expected, actual, {linear_index}));
abs_error = std::numeric_limits<double>::infinity();
rel_error = std::numeric_limits<double>::infinity();
} else {
float_distance = CalculateFloatDistance<T>(expected, actual);
abs_error = FpAbsoluteValue(actual - expected);
if (expected != T{0}) {
rel_error = abs_error / FpAbsoluteValue(expected);
} else {
rel_error = std::numeric_limits<double>::infinity();
}
}
bool is_within_n_floats = false;
bool should_use_float_error_spec =
error_.low_precision_fp_error_spec.type !=
PrimitiveType::PRIMITIVE_TYPE_INVALID;
if (should_use_float_error_spec &&
error_.low_precision_fp_error_spec.within_n_values >= 0) {
is_within_n_floats =
float_distance <= error_.low_precision_fp_error_spec.within_n_values;
}
const bool is_abs_mismatch =
(should_use_float_error_spec && is_within_n_floats)
? false
: (abs_error > error_.abs);
const bool is_rel_mismatch =
(should_use_float_error_spec && is_within_n_floats)
? false
: (rel_error > error_.rel);
const bool is_mismatch = is_abs_mismatch && is_rel_mismatch;
if (is_abs_mismatch) {
num_abs_mismatches_++;
UpdateErrorBucket(rel_error, absl::MakeSpan(rel_error_buckets_));
}
if (is_rel_mismatch) {
num_rel_mismatches_++;
UpdateErrorBucket(abs_error, absl::MakeSpan(abs_error_buckets_));
}
UpdateAbsValueBucket(actual, is_mismatch);
if (!is_mismatch) {
return;
}
num_mismatches_++;
if (top_rel_mismatches_.size() < kTopRelativeErrorCount ||
rel_error > top_rel_mismatches_.begin()->rel_error) {
Mismatch mismatch = {actual,
expected,
rel_error,
abs_error,
linear_index,
float_distance};
top_rel_mismatches_.insert(mismatch);
if (top_rel_mismatches_.size() > kTopRelativeErrorCount) {
top_rel_mismatches_.erase(top_rel_mismatches_.begin());
}
}
mismatches_.data<bool>()[linear_index] = true;
}
template <typename T>
void CompareValues(std::complex<T> expected, std::complex<T> actual,
int64_t linear_index) {
const auto both_parts_mismatch = num_mismatches_ + 2;
CompareValues<T>(expected.real(), actual.real(), linear_index);
CompareValues<T>(expected.imag(), actual.imag(), linear_index);
if (num_mismatches_ == both_parts_mismatch) {
num_mismatches_--;
}
}
void CompareLiterals() {
if (LayoutUtil::Equal(actual_.shape().layout(),
expected_.shape().layout()) &&
expected_.shape().is_static() && actual_.shape().is_static()) {
absl::Span<const NativeT> expected_data = expected_.data<NativeT>();
absl::Span<const NativeT> actual_data = actual_.data<NativeT>();
const int64_t len = expected_data.size();
for (int64_t i = 0; i < len; ++i) {
CompareValues(expected_data[i], actual_data[i], i);
}
return;
}
std::vector<int64_t> multi_index(actual_.shape().rank(), 0);
CompareLiteralsSlow(0, &multi_index);
}
void CompareLiteralsSlow(int64_t dimension,
std::vector<int64_t>* multi_index) {
if (dimension == multi_index->size()) {
CompareValues(expected_.Get<NativeT>(*multi_index),
actual_.Get<NativeT>(*multi_index),
IndexUtil::MultidimensionalIndexToLinearIndex(
actual_.shape(), *multi_index));
} else {
int64_t upper_bound = expected_.shape().dimensions(dimension);
if (expected_.shape().is_dynamic_dimension(dimension)) {
upper_bound = expected_.GetDynamicSize(dimension);
}
for (int64_t i = 0; i < upper_bound; ++i) {
(*multi_index)[dimension] = i;
CompareLiteralsSlow(dimension + 1, multi_index);
}
}
}
std::string ErrorMessage() {
std::string out;
int64_t element_count = ShapeUtil::ElementsIn(actual_.shape());
auto percent_string = [](double a, double b) {
double pct = b == 0.0 ? 0.0 : 100.0 * a / b;
return absl::StrFormat("%0.4f%%", pct);
};
StrAppendFormat(
&out,
"\nMismatch count %d (%s) in shape %s (%d elements), abs bound "
"%g, rel bound %g\n",
num_mismatches_, percent_string(num_mismatches_, element_count),
ShapeUtil::HumanString(actual_.shape()),
ShapeUtil::ElementsIn(actual_.shape()), error_.abs, error_.rel);
if (num_nan_mismatches_ > 0) {
StrAppend(&out, "nan mismatches ", num_nan_mismatches_, "\n");
}
StrAppendFormat(&out, "Top relative error mismatches:\n");
for (auto it = top_rel_mismatches_.rbegin();
it != top_rel_mismatches_.rend(); ++it) {
StrAppend(&out, " ", it->ToString(actual_.shape()), "\n");
}
if (!detailed_message_) {
return out;
}
StrAppend(&out, "Absolute magnitude breakdown of actual values:\n");
CHECK_EQ(abs_value_buckets_.size() + 1, kAbsValueBucketBounds.size());
for (int i = 0; i < abs_value_buckets_.size(); ++i) {
const int64_t bucket_size = abs_value_buckets_[i].first;
const int64_t bucket_mismatches = abs_value_buckets_[i].second;
std::string mismatch_str =
bucket_mismatches > 0
? absl::StrFormat(", mismatches %d", bucket_mismatches)
: "";
StrAppendFormat(&out, " %-6g <= x < %-6g : %7d (%9s)%s\n",
kAbsValueBucketBounds[i], kAbsValueBucketBounds[i + 1],
bucket_size, percent_string(bucket_size, element_count),
mismatch_str);
}
auto print_accum_buckets = [&](const std::string& header, int64_t total,
absl::Span<const int64_t> buckets) {
StrAppend(&out, header, ":\n");
StrAppendFormat(&out, " < %-6g : %7d (%s)\n", kErrorBucketBounds[0],
total - buckets[0],
percent_string(total - buckets[0], total));
CHECK_EQ(buckets.size(), kErrorBucketBounds.size());
for (int i = 0; i < kErrorBucketBounds.size(); ++i) {
StrAppendFormat(&out, " >= %-6g : %7d (%s)\n", kErrorBucketBounds[i],
buckets[i], percent_string(buckets[i], total));
}
};
StrAppendFormat(&out, "Elements exceeding abs error bound %g: %d (%s)\n",
error_.abs, num_abs_mismatches_,
percent_string(num_abs_mismatches_, element_count));
print_accum_buckets(
"Relative error breakdown of elements exceeding abs error bound",
num_abs_mismatches_, rel_error_buckets_);
StrAppendFormat(&out, "Elements exceeding rel error bound %g: %d (%s)\n",
error_.rel, num_rel_mismatches_,
percent_string(num_rel_mismatches_, element_count));
print_accum_buckets(
"Absolute error breakdown of elements exceeding rel error bound",
num_rel_mismatches_, abs_error_buckets_);
return out;
}
LiteralSlice expected_;
LiteralSlice actual_;
ShapeIndex shape_index_;
ErrorSpec error_;
bool detailed_message_;
MiscompareCallback miscompare_callback_;
int64_t num_mismatches_ = 0;
int64_t num_nan_mismatches_ = 0;
int64_t num_abs_mismatches_ = 0;
int64_t num_rel_mismatches_ = 0;
Literal mismatches_;
static constexpr int64_t kTopRelativeErrorCount = 5;
std::multiset<Mismatch> top_rel_mismatches_;
static inline constexpr std::array<double, 7> kAbsValueBucketBounds = {
0.0, 0.0001, 0.001, 0.01, 0.1, 1, std::numeric_limits<double>::infinity(),
};
std::vector<std::pair<int64_t, int64_t>> abs_value_buckets_;
static inline constexpr std::array<double, 5> kErrorBucketBounds = {
0.0001, 0.001, 0.01, 0.1, 1};
std::vector<int64_t> abs_error_buckets_;
std::vector<int64_t> rel_error_buckets_;
};
absl::Status EqualHelper(const LiteralSlice& expected,
const LiteralSlice& actual,
const ShapeIndex& shape_index,
const MiscompareCallback& miscompare_callback) {
if (expected.shape().is_static() && actual.shape().is_static()) {
TF_RETURN_IF_ERROR(EqualShapes(expected.shape(), actual.shape()));
} else {
TF_RETURN_IF_ERROR(EqualDynamicShapesAndDimensions(expected, actual));
}
absl::Status result;
if (expected.shape().IsTuple()) {
ShapeIndex next_index = shape_index;
for (int i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) {
next_index.push_back(i);
absl::Status tuple_result =
EqualHelper(LiteralSlice(expected, {i}), LiteralSlice(actual, {i}),
next_index, miscompare_callback);
if (miscompare_callback) {
result.Update(tuple_result);
} else {
TF_RETURN_IF_ERROR(tuple_result);
}
next_index.pop_back();
}
} else {
std::vector<int64_t> multi_index(expected.shape().dimensions_size(), 0);
auto index = absl::MakeSpan(multi_index);
Shape unequal_shape = ShapeUtil::MakeShape(PrimitiveType::PRED,
expected.shape().dimensions());
Literal miscompared(unequal_shape);
Literal* miscompared_ptr =
(miscompare_callback == nullptr ? nullptr : &miscompared);
primitive_util::PrimitiveTypeSwitch<void>(
[&](auto primitive_type_constant) -> void {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
using NativeT =
primitive_util::NativeTypeOf<primitive_type_constant>;
result =
Equal<NativeT>(expected, actual, index, 0, miscompared_ptr);
return;
}
if constexpr (primitive_type_constant == TOKEN) {
return;
}
LOG(FATAL) << "Unsupported primitive type: "
<< PrimitiveType_Name(expected.shape().element_type());
},
expected.shape().element_type());
if (!result.ok() && miscompare_callback) {
miscompare_callback(expected, actual, LiteralSlice(miscompared),
shape_index, ErrorBuckets());
}
}
return result;
}
absl::Status NearHelper(const LiteralSlice& expected,
const LiteralSlice& actual,
const ShapeIndex& shape_index, const ErrorSpec& error,
std::optional<bool> detailed_message,
const MiscompareCallback& miscompare_callback) {
if (expected.shape().is_static() && actual.shape().is_static()) {
TF_RETURN_IF_ERROR(EqualShapes(expected.shape(), actual.shape()));
} else {
TF_RETURN_IF_ERROR(EqualDynamicShapesAndDimensions(expected, actual));
}
if (expected.shape().IsTuple()) {
absl::Status return_status;
for (int64_t i = 0; i < ShapeUtil::TupleElementCount(expected.shape());
++i) {
const auto expected_element = LiteralSlice(expected, {i});
const auto actual_element = LiteralSlice(actual, {i});
ShapeIndex element_index = shape_index;
element_index.push_back(i);
absl::Status element_result =
NearHelper(expected_element, actual_element, element_index, error,
detailed_message, miscompare_callback);
if (!element_result.ok()) {
element_result =
InvalidArgument("Array at shape index %s, %s",
element_index.ToString(), element_result.message());
if (return_status.ok()) {
return_status = element_result;
} else {
return_status = AppendStatus(return_status, element_result.message());
}
}
}
if (!return_status.ok() && shape_index.empty()) {
int64_t total_elements = RecursiveElementCount(actual.shape());
return_status =
InvalidArgument("\nMismatches in shape %s (%d elements):\n%s",
ShapeUtil::HumanString(actual.shape()),
total_elements, return_status.message());
}
return return_status;
}
if (ShapeUtil::ElementIsFloating(expected.shape()) ||
ShapeUtil::ElementIsComplex(expected.shape())) {
bool use_detailed_message = detailed_message.value_or(
ShapeUtil::ElementsIn(expected.shape()) >= 64);
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&](auto primitive_type) -> absl::Status {
if constexpr (primitive_util::IsFloatingPointType(primitive_type) ||
primitive_util::IsComplexType(primitive_type)) {
using NativeT = primitive_util::NativeTypeOf<primitive_type>;
return NearComparator<NativeT>::Compare(
expected, actual, shape_index, error, use_detailed_message,
miscompare_callback);
}
LOG(FATAL) << "Unsupported primitive type in near comparator: "
<< PrimitiveType_Name(expected.shape().element_type())
<< ". Must be floating-point type.";
},
expected.shape().element_type());
}
return EqualHelper(expected, actual, shape_index, miscompare_callback);
}
}
absl::Status EqualShapes(const Shape& expected, const Shape& actual) {
if (expected.element_type() != actual.element_type()) {
return InvalidArgument("element type mismatch, want: %s got %s",
ShapeUtil::HumanString(expected),
ShapeUtil::HumanString(actual));
}
if (expected.IsTuple()) {
if (ShapeUtil::TupleElementCount(expected) !=
ShapeUtil::TupleElementCount(actual)) {
return InvalidArgument(
"want tuple element count: %d got tuple element count: %d",
ShapeUtil::TupleElementCount(expected),
ShapeUtil::TupleElementCount(actual));
}
for (int i = 0; i < expected.tuple_shapes_size(); ++i) {
absl::Status result =
EqualShapes(expected.tuple_shapes(i), actual.tuple_shapes(i));
if (!result.ok()) {
return AppendStatus(result, StrCat("mismatch in tuple index", i));
}
}
} else if (expected.IsArray()) {
if (expected.rank() != actual.rank()) {
return InvalidArgument("want rank of %s got rank of %s",
ShapeUtil::HumanString(expected),
ShapeUtil::HumanString(actual));
}
if (expected.element_type() != actual.element_type()) {
return InvalidArgument("mismatch in primitive type %s vs %s",
PrimitiveType_Name(expected.element_type()),
PrimitiveType_Name(actual.element_type()));
}
if (expected.dimensions_size() != actual.dimensions_size()) {
return InvalidArgument("want dimensions_size %d got dimensions_size %d",
expected.dimensions_size(),
actual.dimensions_size());
}
for (int i = 0; i < expected.dimensions_size(); ++i) {
if (expected.dimensions(i) != actual.dimensions(i)) {
return InvalidArgument(
"mismatch in dimension #%d expected: %s actual: %s", i,
ShapeUtil::HumanString(expected), ShapeUtil::HumanString(actual));
}
}
}
return absl::OkStatus();
}
absl::Status EqualDynamicShapesAndDimensions(const LiteralSlice& expected,
const LiteralSlice& actual) {
TF_RETURN_IF_ERROR(EqualShapes(expected.shape(), actual.shape()));
return ShapeUtil::ForEachSubshapeWithStatus(
expected.shape(),
[&expected, &actual](const Shape& expected_shape,
const ShapeIndex& index) -> absl::Status {
auto actual_shape = ShapeUtil::GetSubshape(actual.shape(), index);
for (int i = 0; i < expected_shape.dimensions().size(); ++i) {
if (!expected_shape.is_dynamic_dimension(i) &&
!actual_shape.is_dynamic_dimension(i)) {
continue;
}
if (expected_shape.is_dynamic_dimension(i) &&
!actual_shape.is_dynamic_dimension(i)) {
return InvalidArgument(
"mismatch at dimension %d. the expected shape %s is dynamic "
"while "
"the actual shape %s is not.",
i, ShapeUtil::HumanString(expected.shape()),
ShapeUtil::HumanString(actual.shape()));
}
if (!expected_shape.is_dynamic_dimension(i) &&
actual_shape.is_dynamic_dimension(i)) {
return InvalidArgument(
"mismatch at dimension %d. the expected shape %s is not "
"dynamic "
"while the actual shape %s is dynamic.",
i, ShapeUtil::HumanString(expected.shape()),
ShapeUtil::HumanString(actual.shape()));
}
int64_t expected_dynamic_size = expected.GetDynamicSize(i, index);
int64_t actual_dynamic_size = actual.GetDynamicSize(i, index);
if (expected_dynamic_size != actual_dynamic_size) {
return InvalidArgument(
"mismatch at dimension %d. The expected dynamic size does not "
"match "
"the actual dynamic size. %d vs. %d",
i, expected_dynamic_size, actual_dynamic_size);
}
}
return absl::OkStatus();
});
}
namespace {
absl::Status EmitLiteralsInErrorMessage(const absl::Status& result,
const LiteralSlice& expected,
const LiteralSlice& actual) {
if (result.ok()) {
return result;
}
return InvalidArgument("%s\n\nExpected literal:\n%s\n\nActual literal:\n%s",
result.message(), ToStringTruncated(expected),
ToStringTruncated(actual));
}
}
absl::Status Equal(const LiteralSlice& expected, const LiteralSlice& actual) {
if (VLOG_IS_ON(1)) {
LOG(INFO) << "expected:";
XLA_LOG_LINES(INFO, expected.ToString());
LOG(INFO) << "actual:";
XLA_LOG_LINES(INFO, actual.ToString());
}
absl::Status result = EqualHelper(expected, actual, {}, nullptr);
return EmitLiteralsInErrorMessage(result, expected, actual);
}
absl::Status Near(const LiteralSlice& expected, const LiteralSlice& actual,
const ErrorSpec& error, std::optional<bool> detailed_message,
const MiscompareCallback& miscompare_callback) {
if (VLOG_IS_ON(1)) {
LOG(INFO) << "Expected literal:";
XLA_LOG_LINES(INFO, expected.ToString());
LOG(INFO) << "Actual literal:";
XLA_LOG_LINES(INFO, actual.ToString());
}
absl::Status result = NearHelper(expected, actual, {}, error,
detailed_message, miscompare_callback);
return EmitLiteralsInErrorMessage(result, expected, actual);
}
std::string ToStringTruncated(const LiteralSlice& literal) {
return RecursiveElementCount(literal.shape()) < 1000
? literal.ToString()
: "[TRUNCATED, Literal with more than 1000 values]";
}
}
} | #include "xla/literal_comparison.h"
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/literal_util.h"
#include "xla/test_helpers.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/ml_dtypes.h"
namespace xla {
namespace {
template <typename T>
class LiteralComparisonTest : public ::testing::Test {};
using TestedTypes =
::testing::Types<tsl::float8_e3m4, tsl::float8_e4m3, tsl::float8_e4m3fn,
tsl::float8_e4m3b11fnuz, tsl::float8_e5m2>;
TYPED_TEST_SUITE(LiteralComparisonTest, TestedTypes);
TYPED_TEST(LiteralComparisonTest, CompareNear_Equal) {
auto actual = LiteralUtil::CreateR0<TypeParam>(TypeParam(8.0));
auto expected = LiteralUtil::CreateR0<TypeParam>(TypeParam(8.0));
TF_EXPECT_OK(literal_comparison::Near(expected, actual, ErrorSpec(0.0, 0.0),
false,
nullptr));
}
TYPED_TEST(LiteralComparisonTest, CompareNear_NotEqual_1ulp) {
PrimitiveType type = primitive_util::NativeToPrimitiveType<TypeParam>();
auto actual = LiteralUtil::CreateR0<TypeParam>(TypeParam(8.0));
float expV = 9.0;
if (type == F8E5M2)
expV = 10.0;
else if (type == F8E3M4)
expV = 8.5;
auto expected = LiteralUtil::CreateR0<TypeParam>(TypeParam{expV});
auto error_spec = ErrorSpec(0.0, 0.0);
EXPECT_IS_NOT_OK(literal_comparison::Near(expected, actual, error_spec,
false,
nullptr));
error_spec.low_precision_fp_error_spec.type = type;
error_spec.low_precision_fp_error_spec.within_n_values = 1;
EXPECT_IS_OK(literal_comparison::Near(expected, actual, error_spec,
false,
nullptr));
}
TYPED_TEST(LiteralComparisonTest, CompareNear_NotEqual_4ulps) {
PrimitiveType type = primitive_util::NativeToPrimitiveType<TypeParam>();
auto actual = LiteralUtil::CreateR0<TypeParam>(TypeParam(8.0));
float expV = 12.0;
if (type == F8E5M2)
expV = 14.0;
else if (type == F8E3M4)
expV = 10.0;
auto expected = LiteralUtil::CreateR0<TypeParam>(TypeParam{expV});
auto error_spec = ErrorSpec(0.0, 0.0);
error_spec.low_precision_fp_error_spec.type = type;
error_spec.low_precision_fp_error_spec.within_n_values = 1;
EXPECT_IS_NOT_OK(literal_comparison::Near(expected, actual, error_spec,
false,
nullptr));
error_spec.low_precision_fp_error_spec.type = type;
error_spec.low_precision_fp_error_spec.within_n_values = 4;
EXPECT_IS_OK(literal_comparison::Near(expected, actual, error_spec,
false,
nullptr));
}
TYPED_TEST(LiteralComparisonTest, FloatUsingCompareNear_NotEqual_4ulps) {
PrimitiveType type = primitive_util::NativeToPrimitiveType<TypeParam>();
auto actual = LiteralUtil::CreateR0<float>(8.0);
float expV = 12.1;
if (type == F8E5M2)
expV = 13.0;
else if (type == F8E3M4)
expV = 10.125;
auto expected = LiteralUtil::CreateR0<float>(expV);
auto error_spec = ErrorSpec(0.0, 0.0);
error_spec.low_precision_fp_error_spec.type = type;
error_spec.low_precision_fp_error_spec.within_n_values = 1;
EXPECT_IS_NOT_OK(literal_comparison::Near(expected, actual, error_spec,
false,
nullptr));
error_spec.low_precision_fp_error_spec.type = type;
error_spec.low_precision_fp_error_spec.within_n_values = 4;
EXPECT_IS_OK(literal_comparison::Near(expected, actual, error_spec,
false,
nullptr));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/literal_comparison.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/literal_comparison_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
de45acd3-74d9-4eaa-9c40-ea4acfaf2a07 | cpp | tensorflow/tensorflow | tuple_points_to_analysis | third_party/xla/xla/service/tuple_points_to_analysis.cc | third_party/xla/xla/service/tuple_points_to_analysis_test.cc | #include "xla/service/tuple_points_to_analysis.h"
#include <memory>
#include <ostream>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/map_util.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/shape_util.h"
#include "xla/types.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
std::string BufferAlias::ToString() const {
return absl::StrCat("BufferAlias(", instruction_->name(), "[",
absl::StrJoin(index_, ","), "])");
}
std::ostream& operator<<(std::ostream& out, const BufferAlias& buffer_alias) {
out << buffer_alias.ToString();
return out;
}
bool PointsToSet::IsAmbiguous() const {
bool ambiguous = false;
ForEachElement(
[&ambiguous](const ShapeIndex& , const BufferList& points_to) {
ambiguous |= points_to.size() > 1;
});
return ambiguous;
}
bool PointsToSet::IsDistinct() const {
bool distinct = true;
absl::flat_hash_set<const LogicalBuffer*> all_points_to;
ForEachElement([&](const ShapeIndex& , const BufferList& points_to) {
for (auto& buffer : points_to) {
if (all_points_to.contains(buffer)) {
distinct = false;
}
all_points_to.insert(buffer);
}
});
return distinct;
}
size_t PointsToSet::size() const {
return CreateFlattenedSet().size();
}
PointsToSet::BufferSet PointsToSet::CreateFlattenedSet() const {
BufferSet flat_set;
ForEachElement(
[&flat_set](const ShapeIndex& , const BufferList& buffers) {
flat_set.insert(buffers.begin(), buffers.end());
});
return flat_set;
}
bool PointsToSet::ContainsBuffer(const LogicalBuffer& buffer) const {
bool found = false;
ForEachElement([&found, &buffer](const ShapeIndex& ,
const BufferList& pointed_to_buffers) {
if (!found && absl::c_linear_search(pointed_to_buffers, &buffer)) {
found = true;
}
});
return found;
}
bool PointsToSet::ContainsBufferAtIndex(const LogicalBuffer& buffer,
const ShapeIndex& index) const {
const auto& pointed_to_buffers = element(index);
return absl::c_linear_search(pointed_to_buffers, &buffer);
}
void PointsToSet::AddPointedToBuffer(const LogicalBuffer& buffer,
const ShapeIndex& index) {
if (ContainsBufferAtIndex(buffer, index)) {
return;
}
mutable_element(index)->push_back(&buffer);
}
const PointsToSet::SourceSet& PointsToSet::tuple_sources(
const ShapeIndex& index) const {
return tree_.element(index).tuple_sources;
}
void PointsToSet::add_tuple_source(const ShapeIndex& index,
HloInstruction* tuple) {
tree_.mutable_element(index)->tuple_sources.insert(tuple);
}
namespace {
void GatherFusionInstructions(
HloInstruction* instruction,
std::vector<HloInstruction*>* fusion_instructions) {
CHECK_EQ(HloOpcode::kFusion, instruction->opcode());
for (auto* fused : instruction->fused_instructions()) {
if (fused->opcode() == HloOpcode::kFusion) {
GatherFusionInstructions(fused, fusion_instructions);
}
}
fusion_instructions->push_back(instruction);
}
}
absl::StatusOr<std::unique_ptr<TuplePointsToAnalysis>>
TuplePointsToAnalysis::Run(const HloModule* module) {
auto logical_buffer_analysis = LogicalBufferAnalysis::Run(module);
std::unique_ptr<TuplePointsToAnalysis> analysis(new TuplePointsToAnalysis(
module, std::move(logical_buffer_analysis).value()));
TF_RETURN_IF_ERROR(analysis->Analyze());
return std::move(analysis);
}
absl::Status TuplePointsToAnalysis::Analyze() {
per_instruction_.clear();
per_instruction_.reserve(module_->instruction_count());
logical_buffer_aliases_.clear();
logical_buffer_aliases_.resize(
logical_buffer_analysis_->num_logical_buffers());
std::vector<HloInstruction*> fusion_instructions;
for (auto* computation : module_->MakeNonfusionComputations()) {
TF_RETURN_IF_ERROR(computation->Accept(this));
TF_RETURN_IF_ERROR(
PopulateDefinedBuffersAndAliases(computation->instructions()));
for (auto* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kFusion) {
GatherFusionInstructions(instruction, &fusion_instructions);
}
}
}
for (auto* instruction : fusion_instructions) {
TF_RETURN_IF_ERROR(instruction->fused_expression_root()->Accept(this));
TF_RETURN_IF_ERROR(
PopulateDefinedBuffersAndAliases(instruction->fused_instructions()));
}
XLA_VLOG_LINES(3, ToString());
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::PopulateDefinedBuffersAndAliases(
const decltype(std::declval<HloComputation>()
.instructions())& instructions) {
for (auto* instruction : instructions) {
PerInstruction* pi = PerInst(instruction);
TF_RETURN_IF_ERROR(GatherBuffersDefinedByInstruction(
instruction, &pi->instruction_defined_buffers));
const PointsToSet& points_to_set = GetPointsToSet(instruction);
points_to_set.ForEachElement(
[this, &instruction](
const ShapeIndex& index,
const PointsToSet::BufferList& pointed_to_buffers) {
for (const LogicalBuffer* buffer : pointed_to_buffers) {
logical_buffer_aliases_[buffer->id()].emplace_back(instruction,
index);
}
});
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::DefaultAction(
HloInstruction* hlo_instruction) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(hlo_instruction);
points_to_set.ForEachMutableElement(
[this, hlo_instruction](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(hlo_instruction, index));
});
if (hlo_instruction->shape().IsTuple()) {
points_to_set.add_tuple_source({}, hlo_instruction);
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleGetTupleElement(
HloInstruction* get_tuple_element) {
int64_t element_index = get_tuple_element->tuple_index();
PointsToSet& points_to_set = CreateEmptyPointsToSet(get_tuple_element);
const PointsToSet& operand_points_to_set =
*PerInst(get_tuple_element->operand(0))->points_to_set;
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* points_to) {
ShapeIndex src_index;
src_index.push_back(element_index);
for (auto element : target_index) {
src_index.push_back(element);
}
*points_to = operand_points_to_set.element(src_index);
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopy(HloInstruction* copy) {
PointsToSet& points_to_set = CreateCopiedPointsToSet(copy, copy->operand(0));
points_to_set.mutable_element({})->clear();
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(copy, {}),
{});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleBitcast(HloInstruction* bitcast) {
CreateCopiedPointsToSet(bitcast, bitcast->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleDomain(HloInstruction* domain) {
CreateCopiedPointsToSet(domain, domain->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAddDependency(
HloInstruction* add_dependency) {
CreateCopiedPointsToSet(add_dependency, add_dependency->operand(0));
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleRecvDone(HloInstruction* recv_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(recv_done);
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(recv_done, {}),
{});
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(recv_done, {1}),
{1});
const PointsToSet& operand_points_to_set =
GetPointsToSet(recv_done->operand(0));
points_to_set.ForEachMutableElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& index, PointsToSet::BufferList* buffers) {
if (index.empty() || index[0] != 0) {
return;
}
*buffers = operand_points_to_set.element(index);
for (auto& tuple_source : operand_points_to_set.tuple_sources(index)) {
points_to_set.add_tuple_source(index, tuple_source);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncStart(
HloInstruction* async_start) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_start);
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) {
if (target_index.size() >= 2 && target_index.front() == 0) {
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_start->operand(target_index[1]));
ShapeIndex source_index(target_index.begin() + 2, target_index.end());
*buffers = operand_points_to_set.element(source_index);
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(source_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
} else {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(async_start, target_index));
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncUpdate(
HloInstruction* async_update) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_update);
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_update->operand(0));
CHECK_EQ(async_update->shape(), async_update->operand(0)->shape());
points_to_set.ForEachMutableElement([&](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
*buffers = operand_points_to_set.element(index);
for (HloInstruction* tuple : operand_points_to_set.tuple_sources(index)) {
points_to_set.add_tuple_source(index, tuple);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleAsyncDone(
HloInstruction* async_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(async_done);
const PointsToSet& operand_points_to_set =
GetPointsToSet(async_done->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
if (!src_index.empty() && src_index.front() == 1) {
const ShapeIndex target_index(src_index.begin() + 1, src_index.end());
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopyStart(
HloInstruction* copy_start) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_start);
const PointsToSet& operand_points_to_set =
GetPointsToSet(copy_start->operand(0));
points_to_set.ForEachMutableElement(
[&](const ShapeIndex& target_index, PointsToSet::BufferList* buffers) {
if (target_index == ShapeIndex({1})) {
*buffers = operand_points_to_set.element({});
} else {
buffers->push_back(
&logical_buffer_analysis_->GetBuffer(copy_start, target_index));
}
});
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources({})) {
points_to_set.add_tuple_source({1}, tuple);
}
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCopyDone(HloInstruction* copy_done) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(copy_done);
const PointsToSet& operand_points_to_set =
GetPointsToSet(copy_done->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
if (src_index == ShapeIndex({0})) {
const ShapeIndex target_index = {};
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleSend(HloInstruction* send) {
PointsToSet& points_to_set = CreateEmptyPointsToSet(send);
auto top_buffer = points_to_set.mutable_element(ShapeIndex({}));
top_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({})));
points_to_set.add_tuple_source({}, send);
auto context_buffer = points_to_set.mutable_element(ShapeIndex({1}));
context_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({1})));
auto token_buffer = points_to_set.mutable_element(ShapeIndex({2}));
token_buffer->push_back(
&logical_buffer_analysis_->GetBuffer(send, ShapeIndex({2})));
const PointsToSet& operand_points_to_set = GetPointsToSet(send->operand(0));
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
ShapeIndex target_index({0});
for (auto element : src_index) {
target_index.push_back(element);
}
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
});
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleTuple(HloInstruction* tuple) {
absl::Span<HloInstruction* const> operands(tuple->operands());
PointsToSet& points_to_set = CreateEmptyPointsToSet(tuple);
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(tuple, {}),
{});
for (int64_t i = 0; i < operands.size(); ++i) {
const PointsToSet& operand_points_to_set =
*PerInst(operands[i])->points_to_set;
operand_points_to_set.ForEachElement(
[&points_to_set, &operand_points_to_set, i](
const ShapeIndex& src_index,
const PointsToSet::BufferList& points_to) {
ShapeIndex target_index;
target_index.push_back(i);
for (auto element : src_index) {
target_index.push_back(element);
}
*points_to_set.mutable_element(target_index) = points_to;
for (HloInstruction* tuple :
operand_points_to_set.tuple_sources(src_index)) {
points_to_set.add_tuple_source(target_index, tuple);
}
});
}
points_to_set.add_tuple_source({}, tuple);
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleCustomCall(
HloInstruction* custom_call) {
auto ccall = Cast<HloCustomCallInstruction>(custom_call);
PointsToSet& points_to_set = CreateEmptyPointsToSet(custom_call);
absl::flat_hash_map<ShapeIndex, std::pair<int64_t, ShapeIndex>>
aliased_outputs;
for (const auto& pair : ccall->output_to_operand_aliasing()) {
aliased_outputs.emplace(pair.first, pair.second);
}
points_to_set.ForEachMutableElement([&](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
auto it = aliased_outputs.find(index);
if (it == aliased_outputs.end() || !alias_buffer_across_dataflow_) {
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(custom_call, index), index);
} else {
const PointsToSet& input_set =
*PerInst(ccall->operand(it->second.first))->points_to_set;
for (const LogicalBuffer* input_buffer :
input_set.element(it->second.second)) {
points_to_set.AddPointedToBuffer(*input_buffer, index);
}
for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) {
points_to_set.add_tuple_source(index, tuple);
}
}
});
points_to_set.add_tuple_source({}, custom_call);
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleFusion(HloInstruction* fusion) {
auto cfusion = Cast<HloFusionInstruction>(fusion);
PointsToSet& points_to_set = CreateEmptyPointsToSet(fusion);
absl::flat_hash_map<ShapeIndex, std::pair<int64_t, ShapeIndex>>
aliased_outputs;
for (const auto& pair : cfusion->output_to_operand_aliasing()) {
aliased_outputs.emplace(pair.first, pair.second);
}
points_to_set.ForEachMutableElement([&](const ShapeIndex& index,
PointsToSet::BufferList* buffers) {
auto it = aliased_outputs.find(index);
if (it == aliased_outputs.end()) {
points_to_set.AddPointedToBuffer(
logical_buffer_analysis_->GetBuffer(fusion, index), index);
} else {
const PointsToSet& input_set =
*PerInst(cfusion->operand(it->second.first))->points_to_set;
for (const LogicalBuffer* input_buffer :
input_set.element(it->second.second)) {
points_to_set.AddPointedToBuffer(*input_buffer, index);
}
for (HloInstruction* tuple : input_set.tuple_sources(it->second.second)) {
points_to_set.add_tuple_source(index, tuple);
}
}
});
points_to_set.add_tuple_source({}, fusion);
return absl::OkStatus();
}
absl::Status TuplePointsToAnalysis::HandleOptimizationBarrier(
HloInstruction* barrier) {
CreateCopiedPointsToSet(barrier, barrier->operand(0));
return absl::OkStatus();
}
const PointsToSet& TuplePointsToAnalysis::GetPointsToSet(
const HloInstruction* hlo_instruction) const {
return *PerInst(hlo_instruction)->points_to_set;
}
PointsToSet& TuplePointsToAnalysis::CreateEmptyPointsToSet(
const HloInstruction* instruction) {
PerInstruction* pi = PerInst(instruction);
CHECK(pi->points_to_set == nullptr)
<< "instruction should not have been present in the map.";
auto set = std::make_unique<PointsToSet>(&instruction->shape());
pi->points_to_set = std::move(set);
return *pi->points_to_set;
}
bool TuplePointsToAnalysis::InstructionDefinesBufferAtIndex(
const HloInstruction* instruction, const ShapeIndex& index) const {
const auto& buffers = GetPointsToSet(instruction).element(index);
return (buffers.size() == 1 && buffers[0]->instruction() == instruction);
}
absl::Status TuplePointsToAnalysis::VerifyBuffer(
const LogicalBuffer& buffer) const {
if (!InstructionDefinesBufferAtIndex(buffer.instruction(), buffer.index())) {
return FailedPrecondition(
"LogicalBuffer %s is ill-defined: instruction %s does not define a "
"buffer at that index",
buffer.ToString(), buffer.instruction()->name());
}
if (buffer.id() < 0 ||
buffer.id() >= logical_buffer_analysis_->num_logical_buffers()) {
return FailedPrecondition("LogicalBuffer %s is ill-defined: invalid id %d",
buffer.ToString(), buffer.id());
}
if (GetBuffer(buffer.id()).instruction() != buffer.instruction() ||
GetBuffer(buffer.id()).index() != buffer.index()) {
return FailedPrecondition(
"LogicalBuffer %s is ill-defined: buffer with same id differs: %s",
buffer.ToString(), GetBuffer(buffer.id()).ToString());
}
return absl::OkStatus();
}
const LogicalBuffer& TuplePointsToAnalysis::GetBuffer(
LogicalBuffer::Id id) const {
CHECK_GE(id, 0);
CHECK_LT(id, logical_buffer_analysis_->num_logical_buffers());
return logical_buffer_analysis_->GetBuffer(id);
}
absl::StatusOr<const LogicalBuffer*> TuplePointsToAnalysis::GetBufferDefinedAt(
const HloInstruction* instruction, const ShapeIndex& index) const {
const auto& buffers = GetPointsToSet(instruction).element(index);
if (buffers.size() != 1 || buffers[0]->instruction() != instruction) {
return FailedPrecondition(
"instruction %s does not define buffer at index {%s}",
instruction->name(), absl::StrJoin(index, ","));
}
return buffers[0];
}
const TuplePointsToAnalysis::BufferAliasVector&
TuplePointsToAnalysis::GetBufferAliases(const LogicalBuffer& buffer) const {
return logical_buffer_aliases_[buffer.id()];
}
const TuplePointsToAnalysis::BufferDefinitionVector&
TuplePointsToAnalysis::GetBuffersDefinedByInstruction(
const HloInstruction* instruction) const {
return PerInst(instruction)->instruction_defined_buffers;
}
absl::Status TuplePointsToAnalysis::GatherBuffersDefinedByInstruction(
const HloInstruction* instruction,
TuplePointsToAnalysis::BufferDefinitionVector* buffers) {
GetPointsToSet(instruction)
.ForEachElement([buffers, instruction](
const ShapeIndex& index,
const PointsToSet::BufferList& source_buffers) {
CHECK(!source_buffers.empty());
if (source_buffers.size() == 1 &&
source_buffers[0]->instruction() == instruction) {
DCHECK(source_buffers[0]->index() == index);
buffers->push_back(source_buffers[0]);
} else {
for (const LogicalBuffer* source_buffer : source_buffers) {
DCHECK(source_buffer->instruction() != instruction);
}
}
});
return absl::OkStatus();
}
PointsToSet& TuplePointsToAnalysis::CreateCopiedPointsToSet(
const HloInstruction* instruction, const HloInstruction* src) {
PointsToSet& dst_points_to_set = CreateEmptyPointsToSet(instruction);
const PointsToSet& src_points_to_set = GetPointsToSet(src);
dst_points_to_set.ForEachMutableElement(
[&dst_points_to_set, &src_points_to_set](
const ShapeIndex& index, PointsToSet::BufferList* buffers) {
*buffers = src_points_to_set.element(index);
for (auto& tuple_source : src_points_to_set.tuple_sources(index)) {
dst_points_to_set.add_tuple_source(index, tuple_source);
}
});
return *PerInst(instruction)->points_to_set;
}
std::string TuplePointsToAnalysis::ToString() const {
std::string output =
absl::StrFormat("TuplePointsToSet for module %s:\n", module_->name());
for (const auto* computation : module_->MakeNonfusionComputations()) {
const char* entry =
computation == module_->entry_computation() ? "entry " : "";
absl::StrAppend(&output, entry, "computation ", computation->name(), ":\n");
for (const HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
InstructionToString(instruction, &output);
if (instruction->opcode() == HloOpcode::kFusion) {
for (auto* fused : instruction->fused_instructions()) {
InstructionToString(fused, &output);
}
}
}
}
absl::StrAppend(&output, "LogicalBuffers:\n");
for (const auto& b : logical_buffer_analysis_->logical_buffers()) {
absl::StrAppend(&output, " buffer ", b->ToString(), ":\n");
for (const BufferAlias& alias : logical_buffer_aliases_[b->id()]) {
absl::StrAppend(&output, " alias ", alias.ToString(), "\n");
}
}
return output;
}
void TuplePointsToAnalysis::InstructionToString(
const HloInstruction* instruction, std::string* output) const {
const std::string prefix = instruction->IsFused() ? " " : "";
absl::StrAppend(output, prefix, " instruction ",
instruction->ToShortString(), ":\n");
const PointsToSet& points_to_set = GetPointsToSet(instruction);
points_to_set.ForEachElement(
[&prefix, &output](const ShapeIndex& index,
const PointsToSet::BufferList& points_to) {
absl::StrAppend(
output, prefix, " {", absl::StrJoin(index, ","), "}: ",
absl::StrJoin(points_to, ", ",
[](std::string* out, const LogicalBuffer* source) {
out->append(source->ToString());
}),
"\n");
});
}
bool TuplePointsToAnalysis::DoesNotUseOperandBuffer(
const HloInstruction* operand, const ShapeIndex& index,
const HloInstruction* user) const {
CHECK(user->IsUserOf(operand))
<< "user: " << user->ToString() << " operand: " << operand->ToString();
if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) {
return true;
} else if (user->IsLoopFusion()) {
auto it = absl::c_find_if(
user->fused_parameters(), [&](HloInstruction* fused_param) {
return user->operand(fused_param->parameter_number()) == operand;
});
CHECK(it != user->fused_parameters().end());
const LogicalBuffer* buffer = GetBufferDefinedAt(*it, index).value();
for (const BufferAlias& alias : GetBufferAliases(*buffer)) {
for (HloInstruction* alias_user : alias.instruction()->users()) {
if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(),
alias_user)) {
continue;
}
return false;
}
}
return true;
}
return false;
}
std::vector<std::pair<HloInstruction*, int64_t>>
TuplePointsToAnalysis::GetAllUsesOfInstructionAtIndex(
HloInstruction* instruction, const ShapeIndex& index) const {
std::vector<std::pair<HloInstruction*, int64_t>> uses;
const PointsToSet::BufferList& points_to =
GetPointsToSet(instruction).element(index);
for (const LogicalBuffer* buffer : points_to) {
for (const BufferAlias& alias : GetBufferAliases(*buffer)) {
for (HloInstruction* alias_user : alias.instruction()->users()) {
if (DoesNotUseOperandBuffer(alias.instruction(), alias.index(),
alias_user)) {
continue;
}
for (int64_t op_idx : alias_user->OperandIndices(alias.instruction())) {
uses.emplace_back(alias_user, op_idx);
}
}
}
}
return uses;
}
bool TuplePointsToAnalysis::HasUniqueFusedUseOfOperandAt(
HloInstruction* operand, const ShapeIndex& operand_index,
HloInstruction* fusion, const int64_t use_operand_index) const {
CHECK_EQ(HloOpcode::kFusion, fusion->opcode());
if (fusion->OperandIndices(operand).size() > 1) {
return false;
}
const auto& fused_params = fusion->fused_parameters();
auto fused_param_it =
absl::c_find_if(fused_params, [&](HloInstruction* fused_param) {
return fusion->operand(fused_param->parameter_number()) == operand;
});
if (fused_param_it == fused_params.end()) {
return false;
}
auto* fused_param = *fused_param_it;
auto fused_param_uses =
GetAllUsesOfInstructionAtIndex(fused_param, operand_index);
return fused_param_uses.size() == 1 &&
fused_param_uses[0].first == fusion->fused_expression_root() &&
fused_param_uses[0].second == use_operand_index;
}
} | #include "xla/service/tuple_points_to_analysis.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/logical_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
class TuplePointsToAnalysisTest : public HloTestBase {
protected:
void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) {
BuildModule(std::move(computation));
RunAnalysis();
}
void BuildModule(std::unique_ptr<HloComputation> computation) {
module_ = CreateNewVerifiedModule();
module_->AddEntryComputation(std::move(computation));
}
void RunAnalysis() {
CHECK_NOTNULL(module_.get());
points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value();
}
const LogicalBuffer* GetBuffer(const HloInstruction* instruction,
const ShapeIndex& index) {
const auto& pointed_to =
points_to_analysis_->GetPointsToSet(instruction).element(index);
CHECK_EQ(1, pointed_to.size());
CHECK_EQ(instruction, pointed_to[0]->instruction());
CHECK(index == pointed_to[0]->index());
return pointed_to[0];
}
void ExpectHasBuffers(const PointsToSet::BufferList& points_to_set,
absl::Span<const LogicalBuffer* const> buffers) {
std::vector<const LogicalBuffer*> vec(buffers.begin(), buffers.end());
EXPECT_THAT(points_to_set, UnorderedElementsAreArray(vec));
}
void ExpectHasTopLevelBuffers(
const PointsToSet::BufferList& points_to_set,
absl::Span<HloInstruction* const> instructions) {
PointsToSet::BufferList buffers;
for (auto instruction : instructions) {
buffers.push_back(GetBuffer(instruction, {}));
}
ExpectHasBuffers(points_to_set, buffers);
}
void ExpectHasTopLevelBuffers(
const PointsToSet::BufferSet& points_to_set,
absl::Span<HloInstruction* const> instructions) {
ExpectHasTopLevelBuffers(
PointsToSet::BufferList(points_to_set.begin(), points_to_set.end()),
instructions);
}
void ExpectHasBufferAliases(
const HloInstruction* instruction, const ShapeIndex& index,
absl::Span<const std::pair<HloInstruction*, ShapeIndex>> expected) {
const LogicalBuffer* buffer =
points_to_analysis_->GetBufferDefinedAt(instruction, index).value();
std::vector<BufferAlias> expected_aliases;
expected_aliases.reserve(expected.size());
for (auto& pair : expected) {
expected_aliases.push_back(BufferAlias(pair.first, pair.second));
}
EXPECT_THAT(points_to_analysis_->GetBufferAliases(*buffer),
UnorderedElementsAreArray(expected_aliases));
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_;
};
TEST_F(TuplePointsToAnalysisTest, SimpleTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(constant1).size());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant1).element({}), {constant1});
EXPECT_TRUE(
points_to_analysis_->GetPointsToSet(constant1).tuple_sources({}).empty());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(tuple).IsDistinct());
EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(constant2).size());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant2).element({}), {constant2});
EXPECT_TRUE(
points_to_analysis_->GetPointsToSet(constant2).tuple_sources({}).empty());
EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());
EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),
UnorderedElementsAre(tuple));
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant1, constant2, tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0}), {constant1});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant2});
const PointsToSet& tuple_points_to_set =
points_to_analysis_->GetPointsToSet(tuple);
EXPECT_TRUE(tuple_points_to_set.ContainsBufferAtIndex(
*GetBuffer(constant1, {}), {0}));
EXPECT_TRUE(tuple_points_to_set.ContainsBufferAtIndex(
*GetBuffer(constant2, {}), {1}));
EXPECT_FALSE(tuple_points_to_set.ContainsBufferAtIndex(
*GetBuffer(constant2, {}), {0}));
EXPECT_TRUE(tuple_points_to_set.ContainsBuffer(*GetBuffer(constant1, {})));
EXPECT_TRUE(tuple_points_to_set.ContainsBuffer(*GetBuffer(constant2, {})));
}
TEST_F(TuplePointsToAnalysisTest, NestedTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, constant3}));
BuildModuleAndRunAnalysis(builder.Build());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant1).element({}), {constant1});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant2).element({}), {constant2});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(constant3).element({}), {constant3});
EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(inner_tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(inner_tuple).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(inner_tuple).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(inner_tuple).CreateFlattenedSet(),
{constant1, constant2, inner_tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(inner_tuple).element({}),
{inner_tuple});
EXPECT_THAT(
points_to_analysis_->GetPointsToSet(inner_tuple).tuple_sources({}),
UnorderedElementsAre(inner_tuple));
EXPECT_EQ(5, points_to_analysis_->GetPointsToSet(tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant1, constant2, constant3, inner_tuple, tuple});
EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),
UnorderedElementsAre(tuple));
EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({0}),
UnorderedElementsAre(inner_tuple));
EXPECT_TRUE(
points_to_analysis_->GetPointsToSet(tuple).tuple_sources({1}).empty());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0}), {inner_tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0, 0}), {constant1});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0, 1}), {constant2});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant3});
}
TEST_F(TuplePointsToAnalysisTest, GetTupleElement) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(3.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, constant3}));
auto get_tuple_element = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(inner_tuple->shape(), tuple, 0));
BuildModuleAndRunAnalysis(builder.Build());
auto& points_to_set = points_to_analysis_->GetPointsToSet(get_tuple_element);
EXPECT_EQ(3, points_to_set.size());
EXPECT_FALSE(points_to_set.IsAmbiguous());
EXPECT_TRUE(points_to_set.IsDistinct());
ExpectHasTopLevelBuffers(points_to_set.CreateFlattenedSet(),
{constant1, constant2, inner_tuple});
ExpectHasTopLevelBuffers(points_to_set.element({}), {inner_tuple});
EXPECT_THAT(points_to_set.tuple_sources({}),
UnorderedElementsAre(inner_tuple));
}
TEST_F(TuplePointsToAnalysisTest, AddDependency) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto add_dependency = builder.AddInstruction(
HloInstruction::CreateAddDependency(constant, token));
BuildModuleAndRunAnalysis(builder.Build());
auto& points_to_set = points_to_analysis_->GetPointsToSet(add_dependency);
EXPECT_EQ(1, points_to_set.size());
EXPECT_FALSE(points_to_set.IsAmbiguous());
EXPECT_TRUE(points_to_set.IsDistinct());
ExpectHasTopLevelBuffers(points_to_set.CreateFlattenedSet(), {constant});
}
TEST_F(TuplePointsToAnalysisTest, DuplicatedElement) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant, constant, constant}));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_EQ(2, points_to_analysis_->GetPointsToSet(tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant, tuple});
}
TEST_F(TuplePointsToAnalysisTest, TupleCopy) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(tuple->shape(), HloOpcode::kCopy, tuple));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant1, constant2, tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(copy).element({}), {copy});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(copy).CreateFlattenedSet(),
{constant1, constant2, copy});
}
TEST_F(TuplePointsToAnalysisTest, CopyStartAndCopyDone) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto copy_start = builder.AddInstruction(HloInstruction::CreateCopyStart(
ShapeUtil::MakeTupleShape({constant->shape(), constant->shape(),
ShapeUtil::MakeShape(U32, {})}),
constant));
auto copy_done = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kCopyDone, copy_start));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy_start).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy_start).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(copy_done).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(copy_done).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(copy_start).element({}),
{copy_start});
ExpectHasBufferAliases(copy_start, {0}, {{copy_start, {0}}, {copy_done, {}}});
ExpectHasBufferAliases(constant, {}, {{constant, {}}, {copy_start, {1}}});
}
TEST_F(TuplePointsToAnalysisTest, AsyncOps) {
std::string hlo_str = R"(
HloModule module
ENTRY entry {
p0 = f32[2,3] parameter(0)
async-start = ((f32[2,3]), f32[2,3], u32[]) custom-call-start(p0), custom_call_target="foo"
async-update = ((f32[2,3]), f32[2,3], u32[]) custom-call-update(async-start)
ROOT async-done = f32[2,3] custom-call-done(async-update)
}
)";
TF_ASSERT_OK_AND_ASSIGN(
module_, ParseAndReturnVerifiedModule(hlo_str, GetModuleConfigForTest()));
HloInstruction* param =
module_->entry_computation()->parameter_instruction(0);
HloInstruction* async_start = FindInstruction(module_.get(), "async-start");
HloInstruction* async_update = FindInstruction(module_.get(), "async-update");
HloInstruction* async_done = FindInstruction(module_.get(), "async-done");
RunAnalysis();
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_start).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_start).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_update).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_update).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(async_done).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(async_done).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(async_start).element({}),
{async_start});
ExpectHasBufferAliases(
param, {}, {{param, {}}, {async_start, {0, 0}}, {async_update, {0, 0}}});
ExpectHasBufferAliases(
async_start, {1},
{{async_start, {1}}, {async_update, {1}}, {async_done, {}}});
ExpectHasBufferAliases(async_start, {2},
{{async_start, {2}}, {async_update, {2}}});
}
TEST_F(TuplePointsToAnalysisTest, SendAndSendDone) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto send = builder.AddInstruction(
HloInstruction::CreateSend(constant, token, 0));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(send).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(send).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(send_done).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(send_done).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(send).element({}), {send});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(send).element({0}), {constant});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(send_done).CreateFlattenedSet(),
{send_done});
ExpectHasBufferAliases(constant, {}, {{constant, {}}, {send, {0}}});
}
TEST_F(TuplePointsToAnalysisTest, RecvAndRecvDone) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto recv = builder.AddInstruction(HloInstruction::CreateRecv(
ShapeUtil::MakeShape(F32, {1, 2, 3}), token, 0));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(recv).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(recv).IsDistinct());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(recv_done).IsAmbiguous());
EXPECT_TRUE(points_to_analysis_->GetPointsToSet(recv_done).IsDistinct());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(recv).element({}), {recv});
ExpectHasBufferAliases(recv, {0}, {{recv, {0}}, {recv_done, {0}}});
}
TEST_F(TuplePointsToAnalysisTest, TupleWithBitcast) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(constant2->shape(), constant2));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant1, bitcast}));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_EQ(1, points_to_analysis_->GetPointsToSet(bitcast).size());
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(bitcast).element({}), {constant2});
EXPECT_TRUE(
points_to_analysis_->GetPointsToSet(bitcast).tuple_sources({}).empty());
EXPECT_EQ(3, points_to_analysis_->GetPointsToSet(tuple).size());
EXPECT_FALSE(points_to_analysis_->GetPointsToSet(tuple).IsAmbiguous());
EXPECT_THAT(points_to_analysis_->GetPointsToSet(tuple).tuple_sources({}),
UnorderedElementsAre(tuple));
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).CreateFlattenedSet(),
{constant1, constant2, tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({}), {tuple});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({0}), {constant1});
ExpectHasTopLevelBuffers(
points_to_analysis_->GetPointsToSet(tuple).element({1}), {constant2});
}
TEST_F(TuplePointsToAnalysisTest, PointsToTupleConstantElements) {
auto builder = HloComputation::Builder(TestName());
Literal elements[] = {LiteralUtil::CreateR2<float>({{1.0}, {2.0}}),
LiteralUtil::CreateR1<float>({2.0, 42})};
auto tuple_constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::MakeTuple({&elements[0], &elements[1]})));
auto copy = builder.AddInstruction(HloInstruction::CreateUnary(
tuple_constant->shape(), HloOpcode::kCopy, tuple_constant));
BuildModuleAndRunAnalysis(builder.Build());
auto& points_to_set = points_to_analysis_->GetPointsToSet(copy);
ExpectHasBuffers(points_to_set.element({}), {GetBuffer(copy, {})});
ExpectHasBuffers(points_to_set.element({0}),
{GetBuffer(tuple_constant, {0})});
ExpectHasBuffers(points_to_set.element({1}),
{GetBuffer(tuple_constant, {1})});
}
TEST_F(TuplePointsToAnalysisTest, BufferAliases) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto inner_tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({inner_tuple, constant2}));
BuildModuleAndRunAnalysis(builder.Build());
ExpectHasBufferAliases(
constant1, {},
{{constant1, {}}, {inner_tuple, {0}}, {tuple, {0, 0}}});
ExpectHasBufferAliases(
constant2, {},
{{constant2, {}}, {inner_tuple, {1}}, {tuple, {0, 1}}, {tuple, {1}}});
ExpectHasBufferAliases(inner_tuple, {},
{{inner_tuple, {}}, {tuple, {0}}});
ExpectHasBufferAliases(tuple, {}, {{tuple, {}}});
}
TEST_F(TuplePointsToAnalysisTest, DISABLED_CustomCall) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
Shape data_shape = ShapeUtil::MakeShape(F32, {});
auto ccall = builder.AddInstruction(HloInstruction::CreateCustomCall(
ShapeUtil::MakeTupleShape({data_shape, data_shape}), {constant},
"TestOp"));
Cast<HloCustomCallInstruction>(ccall)->set_output_to_operand_aliasing(
{std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>{
ShapeIndex{1}, std::pair<int64_t, ShapeIndex>(0, {})}});
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, ccall, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, ccall, 1));
BuildModuleAndRunAnalysis(builder.Build());
ExpectHasBufferAliases(ccall, {0}, {{gte0, {}}, {ccall, {0}}});
ExpectHasBufferAliases(constant, {},
{{constant, {}}, {gte1, {}}, {ccall, {1}}});
}
class FusionPointsToAnalysisTest : public TuplePointsToAnalysisTest {
protected:
void Run(const std::string& hlo_str, int64_t expected_num_users) {
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_str));
auto* fusion = module_->entry_computation()->root_instruction();
auto* tuple_param0 = fusion->operand(0);
RunAnalysis();
auto* fusion_param = GetFusionParameterForOperand(fusion, tuple_param0);
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fusion_param).element({}),
{GetBuffer(fusion_param, {})});
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fusion_param).element({0}),
{GetBuffer(fusion_param, {0})});
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fusion_param).element({1}),
{GetBuffer(fusion_param, {1})});
auto fused_gte0 = GetUniqueFusionParameterUserAt(fusion_param, 0);
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fused_gte0).element({}),
{GetBuffer(fusion_param, {0})});
auto fused_gte1 = GetUniqueFusionParameterUserAt(fusion_param, 1);
ExpectHasBuffers(
points_to_analysis_->GetPointsToSet(fused_gte1).element({}),
{GetBuffer(fusion_param, {1})});
ExpectHasBufferAliases(fusion_param, {0},
{{fusion_param, {0}}, {fused_gte0, {}}});
ExpectHasBufferAliases(fusion_param, {1},
{{fusion_param, {1}}, {fused_gte1, {}}});
ExpectNumUsersOfAliases(fusion_param, {0}, expected_num_users);
}
HloInstruction* GetFusionParameterForOperand(HloInstruction* fusion,
const HloInstruction* operand) {
const auto& fused_instructions = fusion->fused_instructions();
auto it =
absl::c_find_if(fused_instructions, [&](const HloInstruction* fused) {
return fused->opcode() == HloOpcode::kParameter &&
fusion->operand(fused->parameter_number()) == operand;
});
CHECK(it != fusion->fused_instructions().end());
return *it;
}
std::vector<HloInstruction*> GetFusionParameterUsersAt(
HloInstruction* fusion_param, int64_t tuple_index) {
CHECK(fusion_param->shape().IsTuple());
std::vector<HloInstruction*> users_at_tuple_index;
for (auto user : fusion_param->users()) {
CHECK_EQ(HloOpcode::kGetTupleElement, user->opcode());
if (user->tuple_index() == tuple_index) {
users_at_tuple_index.push_back(user);
}
}
return users_at_tuple_index;
}
HloInstruction* GetUniqueFusionParameterUserAt(HloInstruction* fusion_param,
int64_t tuple_index) {
std::vector<HloInstruction*> users =
GetFusionParameterUsersAt(fusion_param, tuple_index);
CHECK_EQ(1, users.size());
return users[0];
}
void ExpectNumUsersOfAliases(const HloInstruction* instruction,
const ShapeIndex& index,
const int64_t expected_num_users) {
const auto* buffer = GetBuffer(instruction, index);
int64_t num_users = 0;
for (const auto& alias : points_to_analysis_->GetBufferAliases(*buffer)) {
for (auto user : alias.instruction()->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement && !index.empty()) {
continue;
}
++num_users;
}
}
EXPECT_EQ(expected_num_users, num_users);
}
};
TEST_F(FusionPointsToAnalysisTest, FusionParam0OneUser) {
std::string hlo_str = R"(
HloModule FusionParam0OneUser
%fused_computation (param_1.2: (f32[8], f32[3])) -> f32[8] {
%param_1.2 = (f32[8]{0}, f32[3]{0}) parameter(0)
%get-tuple-element.1 = f32[8]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=0
%get-tuple-element.2 = f32[3]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=1
%constant.3 = f32[3]{0} constant({1, 1, 1})
%add.1 = f32[3]{0} add(f32[3]{0} %get-tuple-element.2, f32[3]{0} %constant.3)
%constant.2 = s32[] constant(0)
ROOT %dynamic-update-slice.1 = f32[8]{0} dynamic-update-slice(f32[8]{0} %get-tuple-element.1, f32[3]{0} %add.1, s32[] %constant.2)
}
ENTRY %FusionParam0OneUser (param0: (f32[8], f32[3])) -> f32[8] {
%param0 = (f32[8]{0}, f32[3]{0}) parameter(0)
ROOT %fusion = f32[8]{0} fusion((f32[8]{0}, f32[3]{0}) %param0), kind=kLoop, calls=%fused_computation
}
)";
Run(hlo_str, 1);
}
TEST_F(FusionPointsToAnalysisTest, FusionParam0TwoUsers) {
std::string hlo_str = R"(
HloModule FusionParam0TwoUsers
%fused_computation (param_1.2: (f32[8], f32[3])) -> f32[8] {
%param_1.2 = (f32[8]{0}, f32[3]{0}) parameter(0)
%get-tuple-element.1 = f32[8]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=0
%get-tuple-element.2 = f32[3]{0} get-tuple-element((f32[8]{0}, f32[3]{0}) %param_1.2), index=1
%constant.3 = f32[3]{0} constant({1, 1, 1})
%add.1 = f32[3]{0} add(f32[3]{0} %get-tuple-element.2, f32[3]{0} %constant.3)
%slice = f32[3]{0} slice(f32[8]{0} %get-tuple-element.1), slice={[0:3]}
%add.2 = f32[3]{0} add(f32[3]{0} %add.1, f32[3]{0} %slice)
%constant.2 = s32[] constant(0)
ROOT %dynamic-update-slice.1 = f32[8]{0} dynamic-update-slice(f32[8]{0} %get-tuple-element.1, f32[3]{0} %add.2, s32[] %constant.2)
}
ENTRY %FusionParam0TwoUsers (param0: (f32[8], f32[3])) -> f32[8] {
%param0 = (f32[8]{0}, f32[3]{0}) parameter(0)
ROOT %fusion = f32[8]{0} fusion((f32[8]{0}, f32[3]{0}) %param0), kind=kLoop, calls=%fused_computation
}
)";
Run(hlo_str, 2);
}
class PointsToAnalysisTestBase : public HloTestBase {
protected:
void BuildModule(std::unique_ptr<HloComputation> computation) {
module_ = CreateNewVerifiedModule();
computation_ = module_->AddEntryComputation(std::move(computation));
}
void RunAnalysis() {
CHECK_NOTNULL(module_.get());
points_to_analysis_ = TuplePointsToAnalysis::Run(module_.get()).value();
}
void BuildModuleAndRunAnalysis(std::unique_ptr<HloComputation> computation) {
BuildModule(std::move(computation));
RunAnalysis();
}
std::unique_ptr<HloModule> module_;
HloComputation* computation_ = nullptr;
std::unique_ptr<TuplePointsToAnalysis> points_to_analysis_;
};
class DoesNotUseOperandBufferTest : public PointsToAnalysisTestBase {};
TEST_F(DoesNotUseOperandBufferTest, GetTupleElement) {
auto builder = HloComputation::Builder(TestName());
Shape elem_shape = ShapeUtil::MakeShape(F32, {8});
auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({elem_shape, elem_shape}), "tuple"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(elem_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(elem_shape, tuple, 1));
builder.AddInstruction(
HloInstruction::CreateBinary(elem_shape, HloOpcode::kAdd, gte0, gte1));
BuildModuleAndRunAnalysis(builder.Build());
EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {0}, gte0));
EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {1}, gte1));
EXPECT_FALSE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte0));
EXPECT_FALSE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {}, gte1));
}
TEST_F(DoesNotUseOperandBufferTest, FusedDynamicUpdateSlice) {
auto builder = HloComputation::Builder(TestName());
Shape data_shape = ShapeUtil::MakeShape(F32, {8});
auto tuple = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeTupleShape({data_shape, data_shape}), "tuple"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape, tuple, 1));
auto starts = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(2)));
auto update = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({2.f, 2.f, 2.f})));
auto dynamic_update_slice =
builder.AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
data_shape, gte1, update, {starts}));
builder.AddInstruction(
HloInstruction::CreateTuple({gte0, dynamic_update_slice}));
BuildModule(builder.Build());
auto fusion = computation_->CreateFusionInstruction(
{dynamic_update_slice, starts, update, gte1},
HloInstruction::FusionKind::kLoop);
RunAnalysis();
EXPECT_TRUE(points_to_analysis_->DoesNotUseOperandBuffer(tuple, {0}, fusion));
EXPECT_FALSE(
points_to_analysis_->DoesNotUseOperandBuffer(tuple, {1}, fusion));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_points_to_analysis.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/tuple_points_to_analysis_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8ff0df2a-07c8-4646-97be-0b6b24b801e6 | cpp | google/quiche | moqt_outgoing_queue | quiche/quic/moqt/moqt_outgoing_queue.cc | quiche/quic/moqt/moqt_outgoing_queue_test.cc | #include "quiche/quic/moqt/moqt_outgoing_queue.h"
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "quiche/quic/moqt/moqt_cached_object.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/moqt/moqt_publisher.h"
#include "quiche/quic/moqt/moqt_subscribe_windows.h"
#include "quiche/common/platform/api/quiche_bug_tracker.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
namespace moqt {
void MoqtOutgoingQueue::AddObject(quiche::QuicheMemSlice payload, bool key) {
if (queue_.empty() && !key) {
QUICHE_BUG(MoqtOutgoingQueue_AddObject_first_object_not_key)
<< "The first object ever added to the queue must have the \"key\" "
"flag.";
return;
}
if (key) {
if (!queue_.empty()) {
AddRawObject(MoqtObjectStatus::kEndOfGroup, quiche::QuicheMemSlice());
}
if (queue_.size() == kMaxQueuedGroups) {
queue_.erase(queue_.begin());
}
queue_.emplace_back();
++current_group_id_;
}
AddRawObject(MoqtObjectStatus::kNormal, std::move(payload));
}
void MoqtOutgoingQueue::AddRawObject(MoqtObjectStatus status,
quiche::QuicheMemSlice payload) {
FullSequence sequence{current_group_id_, queue_.back().size()};
queue_.back().push_back(CachedObject{
sequence, status,
std::make_shared<quiche::QuicheMemSlice>(std::move(payload))});
for (MoqtObjectListener* listener : listeners_) {
listener->OnNewObjectAvailable(sequence);
}
}
std::optional<PublishedObject> MoqtOutgoingQueue::GetCachedObject(
FullSequence sequence) const {
if (sequence.group < first_group_in_queue()) {
return PublishedObject{FullSequence{sequence.group, sequence.object},
MoqtObjectStatus::kGroupDoesNotExist,
quiche::QuicheMemSlice()};
}
if (sequence.group > current_group_id_) {
return std::nullopt;
}
const std::vector<CachedObject>& group =
queue_[sequence.group - first_group_in_queue()];
if (sequence.object >= group.size()) {
if (sequence.group == current_group_id_) {
return std::nullopt;
}
return PublishedObject{FullSequence{sequence.group, sequence.object},
MoqtObjectStatus::kObjectDoesNotExist,
quiche::QuicheMemSlice()};
}
QUICHE_DCHECK(sequence == group[sequence.object].sequence);
return CachedObjectToPublishedObject(group[sequence.object]);
}
std::vector<FullSequence> MoqtOutgoingQueue::GetCachedObjectsInRange(
FullSequence start, FullSequence end) const {
std::vector<FullSequence> sequences;
SubscribeWindow window(start, end);
for (const Group& group : queue_) {
for (const CachedObject& object : group) {
if (window.InWindow(object.sequence)) {
sequences.push_back(object.sequence);
}
}
}
return sequences;
}
absl::StatusOr<MoqtTrackStatusCode> MoqtOutgoingQueue::GetTrackStatus() const {
if (queue_.empty()) {
return MoqtTrackStatusCode::kNotYetBegun;
}
return MoqtTrackStatusCode::kInProgress;
}
FullSequence MoqtOutgoingQueue::GetLargestSequence() const {
if (queue_.empty()) {
QUICHE_BUG(MoqtOutgoingQueue_GetLargestSequence_not_begun)
<< "Calling GetLargestSequence() on a track that hasn't begun";
return FullSequence{0, 0};
}
return FullSequence{current_group_id_, queue_.back().size() - 1};
}
} | #include "quiche/quic/moqt/moqt_outgoing_queue.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/moqt/moqt_messages.h"
#include "quiche/quic/moqt/moqt_publisher.h"
#include "quiche/quic/moqt/moqt_subscribe_windows.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/platform/api/quiche_expect_bug.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace moqt {
namespace {
using ::quic::test::MemSliceFromString;
using ::testing::AnyOf;
class TestMoqtOutgoingQueue : public MoqtOutgoingQueue,
public MoqtObjectListener {
public:
TestMoqtOutgoingQueue()
: MoqtOutgoingQueue(FullTrackName{"test", "track"},
MoqtForwardingPreference::kSubgroup) {
AddObjectListener(this);
}
void OnNewObjectAvailable(FullSequence sequence) override {
std::optional<PublishedObject> object = GetCachedObject(sequence);
QUICHE_CHECK(object.has_value());
ASSERT_THAT(object->status, AnyOf(MoqtObjectStatus::kNormal,
MoqtObjectStatus::kEndOfGroup));
if (object->status == MoqtObjectStatus::kNormal) {
PublishObject(object->sequence.group, object->sequence.object,
object->payload.AsStringView());
} else {
CloseStreamForGroup(object->sequence.group);
}
}
void CallSubscribeForPast(const SubscribeWindow& window) {
std::vector<FullSequence> objects =
GetCachedObjectsInRange(FullSequence(0, 0), GetLargestSequence());
for (FullSequence object : objects) {
if (window.InWindow(object)) {
OnNewObjectAvailable(object);
}
}
}
MOCK_METHOD(void, CloseStreamForGroup, (uint64_t group_id), ());
MOCK_METHOD(void, PublishObject,
(uint64_t group_id, uint64_t object_id,
absl::string_view payload),
());
};
TEST(MoqtOutgoingQueue, FirstObjectNotKeyframe) {
TestMoqtOutgoingQueue queue;
EXPECT_QUICHE_BUG(queue.AddObject(MemSliceFromString("a"), false),
"The first object");
}
TEST(MoqtOutgoingQueue, SingleGroup) {
TestMoqtOutgoingQueue queue;
{
testing::InSequence seq;
EXPECT_CALL(queue, PublishObject(0, 0, "a"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, PublishObject(0, 2, "c"));
}
queue.AddObject(MemSliceFromString("a"), true);
queue.AddObject(MemSliceFromString("b"), false);
queue.AddObject(MemSliceFromString("c"), false);
}
TEST(MoqtOutgoingQueue, SingleGroupPastSubscribeFromZero) {
TestMoqtOutgoingQueue queue;
{
testing::InSequence seq;
EXPECT_CALL(queue, PublishObject(0, 0, "a"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, PublishObject(0, 2, "c"));
EXPECT_CALL(queue, PublishObject(0, 0, "a"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, PublishObject(0, 2, "c"));
}
queue.AddObject(MemSliceFromString("a"), true);
queue.AddObject(MemSliceFromString("b"), false);
queue.AddObject(MemSliceFromString("c"), false);
queue.CallSubscribeForPast(SubscribeWindow(0, 0));
}
TEST(MoqtOutgoingQueue, SingleGroupPastSubscribeFromMidGroup) {
TestMoqtOutgoingQueue queue;
{
testing::InSequence seq;
EXPECT_CALL(queue, PublishObject(0, 0, "a"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, PublishObject(0, 2, "c"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, PublishObject(0, 2, "c"));
}
queue.AddObject(MemSliceFromString("a"), true);
queue.AddObject(MemSliceFromString("b"), false);
queue.AddObject(MemSliceFromString("c"), false);
queue.CallSubscribeForPast(SubscribeWindow(0, 1));
}
TEST(MoqtOutgoingQueue, TwoGroups) {
TestMoqtOutgoingQueue queue;
{
testing::InSequence seq;
EXPECT_CALL(queue, PublishObject(0, 0, "a"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, PublishObject(0, 2, "c"));
EXPECT_CALL(queue, CloseStreamForGroup(0));
EXPECT_CALL(queue, PublishObject(1, 0, "d"));
EXPECT_CALL(queue, PublishObject(1, 1, "e"));
EXPECT_CALL(queue, PublishObject(1, 2, "f"));
}
queue.AddObject(MemSliceFromString("a"), true);
queue.AddObject(MemSliceFromString("b"), false);
queue.AddObject(MemSliceFromString("c"), false);
queue.AddObject(MemSliceFromString("d"), true);
queue.AddObject(MemSliceFromString("e"), false);
queue.AddObject(MemSliceFromString("f"), false);
}
TEST(MoqtOutgoingQueue, TwoGroupsPastSubscribe) {
TestMoqtOutgoingQueue queue;
{
testing::InSequence seq;
EXPECT_CALL(queue, PublishObject(0, 0, "a"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, PublishObject(0, 2, "c"));
EXPECT_CALL(queue, CloseStreamForGroup(0));
EXPECT_CALL(queue, PublishObject(1, 0, "d"));
EXPECT_CALL(queue, PublishObject(1, 1, "e"));
EXPECT_CALL(queue, PublishObject(1, 2, "f"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, PublishObject(0, 2, "c"));
EXPECT_CALL(queue, CloseStreamForGroup(0));
EXPECT_CALL(queue, PublishObject(1, 0, "d"));
EXPECT_CALL(queue, PublishObject(1, 1, "e"));
EXPECT_CALL(queue, PublishObject(1, 2, "f"));
}
queue.AddObject(MemSliceFromString("a"), true);
queue.AddObject(MemSliceFromString("b"), false);
queue.AddObject(MemSliceFromString("c"), false);
queue.AddObject(MemSliceFromString("d"), true);
queue.AddObject(MemSliceFromString("e"), false);
queue.AddObject(MemSliceFromString("f"), false);
queue.CallSubscribeForPast(SubscribeWindow(0, 1));
}
TEST(MoqtOutgoingQueue, FiveGroups) {
TestMoqtOutgoingQueue queue;
{
testing::InSequence seq;
EXPECT_CALL(queue, PublishObject(0, 0, "a"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, CloseStreamForGroup(0));
EXPECT_CALL(queue, PublishObject(1, 0, "c"));
EXPECT_CALL(queue, PublishObject(1, 1, "d"));
EXPECT_CALL(queue, CloseStreamForGroup(1));
EXPECT_CALL(queue, PublishObject(2, 0, "e"));
EXPECT_CALL(queue, PublishObject(2, 1, "f"));
EXPECT_CALL(queue, CloseStreamForGroup(2));
EXPECT_CALL(queue, PublishObject(3, 0, "g"));
EXPECT_CALL(queue, PublishObject(3, 1, "h"));
EXPECT_CALL(queue, CloseStreamForGroup(3));
EXPECT_CALL(queue, PublishObject(4, 0, "i"));
EXPECT_CALL(queue, PublishObject(4, 1, "j"));
}
queue.AddObject(MemSliceFromString("a"), true);
queue.AddObject(MemSliceFromString("b"), false);
queue.AddObject(MemSliceFromString("c"), true);
queue.AddObject(MemSliceFromString("d"), false);
queue.AddObject(MemSliceFromString("e"), true);
queue.AddObject(MemSliceFromString("f"), false);
queue.AddObject(MemSliceFromString("g"), true);
queue.AddObject(MemSliceFromString("h"), false);
queue.AddObject(MemSliceFromString("i"), true);
queue.AddObject(MemSliceFromString("j"), false);
}
TEST(MoqtOutgoingQueue, FiveGroupsPastSubscribe) {
TestMoqtOutgoingQueue queue;
{
testing::InSequence seq;
EXPECT_CALL(queue, PublishObject(0, 0, "a"));
EXPECT_CALL(queue, PublishObject(0, 1, "b"));
EXPECT_CALL(queue, CloseStreamForGroup(0));
EXPECT_CALL(queue, PublishObject(1, 0, "c"));
EXPECT_CALL(queue, PublishObject(1, 1, "d"));
EXPECT_CALL(queue, CloseStreamForGroup(1));
EXPECT_CALL(queue, PublishObject(2, 0, "e"));
EXPECT_CALL(queue, PublishObject(2, 1, "f"));
EXPECT_CALL(queue, CloseStreamForGroup(2));
EXPECT_CALL(queue, PublishObject(3, 0, "g"));
EXPECT_CALL(queue, PublishObject(3, 1, "h"));
EXPECT_CALL(queue, CloseStreamForGroup(3));
EXPECT_CALL(queue, PublishObject(4, 0, "i"));
EXPECT_CALL(queue, PublishObject(4, 1, "j"));
EXPECT_CALL(queue, PublishObject(2, 0, "e"));
EXPECT_CALL(queue, PublishObject(2, 1, "f"));
EXPECT_CALL(queue, CloseStreamForGroup(2));
EXPECT_CALL(queue, PublishObject(3, 0, "g"));
EXPECT_CALL(queue, PublishObject(3, 1, "h"));
EXPECT_CALL(queue, CloseStreamForGroup(3));
EXPECT_CALL(queue, PublishObject(4, 0, "i"));
EXPECT_CALL(queue, PublishObject(4, 1, "j"));
}
queue.AddObject(MemSliceFromString("a"), true);
queue.AddObject(MemSliceFromString("b"), false);
queue.AddObject(MemSliceFromString("c"), true);
queue.AddObject(MemSliceFromString("d"), false);
queue.AddObject(MemSliceFromString("e"), true);
queue.AddObject(MemSliceFromString("f"), false);
queue.AddObject(MemSliceFromString("g"), true);
queue.AddObject(MemSliceFromString("h"), false);
queue.AddObject(MemSliceFromString("i"), true);
queue.AddObject(MemSliceFromString("j"), false);
queue.CallSubscribeForPast(SubscribeWindow(0, 0));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_outgoing_queue.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/moqt/moqt_outgoing_queue_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
d96664ba-3a28-4df3-85b0-d237c15b7457 | cpp | tensorflow/tensorflow | interleave_dataset_op | tensorflow/core/kernels/data/interleave_dataset_op.cc | tensorflow/core/kernels/data/interleave_dataset_op_test.cc | #include "tensorflow/core/kernels/data/interleave_dataset_op.h"
#include <algorithm>
#include <memory>
#include <optional>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/model.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/stringprintf.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const InterleaveDatasetOp::kDatasetType;
constexpr const char* const InterleaveDatasetOp::kInputDataset;
constexpr const char* const InterleaveDatasetOp::kOtherArguments;
constexpr const char* const InterleaveDatasetOp::kCycleLength;
constexpr const char* const InterleaveDatasetOp::kBlockLength;
constexpr const char* const InterleaveDatasetOp::kFunc;
constexpr const char* const InterleaveDatasetOp::kTarguments;
constexpr const char* const InterleaveDatasetOp::kOutputTypes;
constexpr const char* const InterleaveDatasetOp::kOutputShapes;
constexpr char kCycleIndex[] = "cycle_index";
constexpr char kBlockIndex[] = "block_index";
constexpr char kEndOfInput[] = "end_of_input";
constexpr char kNumOpen[] = "num_open";
constexpr char kArgsSize[] = "args_size";
constexpr char kArgsList[] = "args_list_";
constexpr char kCurrentElementsUninitialized[] =
"current_elements_uninitialized";
constexpr char kNextInputElementIndex[] = "next_input_element_index";
constexpr char kLastCheckpointedInputElementIndex[] =
"last_checkpointed_input_element_index";
constexpr char kInputElementIndices[] = "input_element_indices";
class InterleaveDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func, int64_t cycle_length,
int64_t block_length, const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)),
cycle_length_(cycle_length),
block_length_(block_length),
output_types_(output_types),
output_shapes_(output_shapes),
traceme_metadata_(
{{"block_length",
strings::Printf("%lld", static_cast<long long>(block_length))},
{"cycle_length",
strings::Printf("%lld", static_cast<long long>(cycle_length))}}) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override { return output_types_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_node));
Node* cycle_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(cycle_length_, &cycle_length_node));
Node* block_length_node;
TF_RETURN_IF_ERROR(b->AddScalar(block_length_, &block_length_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {{0, input_node}, {2, cycle_length_node}, {3, block_length_node}},
{{1, other_arguments}},
{{kFunc, f}, {kTarguments, other_arguments_types_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
current_elements_(params.dataset->cycle_length_) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_ckpt_ = std::make_unique<MemoryCheckpoint>(ctx->id_registry());
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
void AdvanceToNextInCycle() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
block_index_ = 0;
cycle_index_ = (cycle_index_ + 1) % dataset()->cycle_length_;
}
Status AdvancePosition(int num_elements) TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
block_index_ += num_elements;
if (block_index_ == dataset()->block_length_) {
AdvanceToNextInCycle();
return absl::OkStatus();
} else if (block_index_ < dataset()->block_length_) {
return absl::OkStatus();
}
return absl::InternalError(
"Something went wrong as `block_index_` should never be larger than "
"`dataset()->block_length_`");
}
void AdvancePosition() TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
++block_index_;
if (block_index_ == dataset()->block_length_) {
AdvanceToNextInCycle();
}
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
while (!end_of_input_ || num_open_ > 0) {
if (current_elements_[cycle_index_]) {
bool end_of_element;
auto nested_ctx = MakeNestedIteratorContext(ctx);
CurrentElement& current_element = *current_elements_[cycle_index_];
TF_RETURN_IF_ERROR(current_element.iterator->GetNext(
&nested_ctx, out_tensors, &end_of_element));
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
AdvancePosition();
*end_of_sequence = false;
return absl::OkStatus();
} else {
ctx->PurgeCheckpoint(current_element.iterator->prefix());
UpdateSymbolicCheckpointAfterCurrentElementFinished(
*ctx, *current_elements_[cycle_index_]);
current_elements_[cycle_index_].reset();
--num_open_;
AdvanceToNextInCycle();
}
} else {
TF_RETURN_IF_ERROR(MoveToNextElement(ctx));
}
}
ctx->MergeCheckpoint(input_ckpt_.get());
*end_of_sequence = true;
return absl::OkStatus();
}
Status SkipInternal(IteratorContext* ctx, int num_to_skip,
bool* end_of_sequence, int* num_skipped) override {
mutex_lock l(mu_);
*num_skipped = 0;
while (!end_of_input_ || num_open_ > 0) {
if (current_elements_[cycle_index_]) {
CurrentElement& current_element = *current_elements_[cycle_index_];
int element_num_to_skip = num_to_skip - *num_skipped;
if (element_num_to_skip > dataset()->block_length_ - block_index_) {
element_num_to_skip = dataset()->block_length_ - block_index_;
}
bool end_of_element = false;
int element_num_skipped = 0;
auto nested_ctx = MakeNestedIteratorContext(ctx);
TF_RETURN_IF_ERROR(current_element.iterator->Skip(
&nested_ctx, element_num_to_skip, &end_of_element,
&element_num_skipped));
*num_skipped += element_num_skipped;
ctx->MergeCheckpoint(nested_ctx.checkpoint());
if (!end_of_element) {
TF_RETURN_IF_ERROR(AdvancePosition(element_num_skipped));
} else {
ctx->PurgeCheckpoint(current_element.iterator->prefix());
UpdateSymbolicCheckpointAfterCurrentElementFinished(
*ctx, *current_elements_[cycle_index_]);
current_elements_[cycle_index_].reset();
--num_open_;
AdvanceToNextInCycle();
}
if (num_to_skip == *num_skipped) {
*end_of_sequence = false;
return absl::OkStatus();
}
} else {
TF_RETURN_IF_ERROR(MoveToNextElement(ctx));
}
}
ctx->MergeCheckpoint(input_ckpt_.get());
*end_of_sequence = true;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeInterleaveManyNode(
std::move(args), {model::MakeNonTunableParameter(
kCycleLength, dataset()->cycle_length_)});
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kCycleIndex, cycle_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kBlockIndex, block_index_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kEndOfInput, static_cast<int64_t>(end_of_input_)));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNumOpen, num_open_));
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kNextInputElementIndex,
next_input_element_index_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kLastCheckpointedInputElementIndex,
last_checkpointed_input_element_index_));
TF_RETURN_IF_ERROR(SaveCurrentElements(ctx, writer));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
int64_t cycle_index;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kCycleIndex, &cycle_index));
cycle_index_ = size_t(cycle_index);
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kBlockIndex, &block_index_));
int64_t end_of_input;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kEndOfInput, &end_of_input));
end_of_input_ = static_cast<bool>(end_of_input);
int64_t num_open;
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNumOpen, &num_open));
num_open_ = size_t(num_open);
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kNextInputElementIndex,
&next_input_element_index_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kLastCheckpointedInputElementIndex,
&last_checkpointed_input_element_index_));
int64_t cycle_length = dataset()->cycle_length_;
std::vector<InputOffset> input_element_indices(cycle_length, -1);
std::vector<std::optional<MemoryCheckpoint>> checkpoints(cycle_length);
std::vector<std::vector<Tensor>> args(cycle_length);
if (ctx->symbolic_checkpoint()) {
auto status_or = RestoreInputOffsets(*reader);
if (!status_or.ok()) {
return status_or.status();
}
auto& input_offset_w_cycle_idxs = status_or.value();
TF_RETURN_IF_ERROR(RestoreArgsListAndInputOffsetCycleIdxMap(
*ctx, input_element_indices, checkpoints, args,
input_offset_w_cycle_idxs));
}
TF_RETURN_IF_ERROR(
RestoreCurrentElements(ctx, reader, input_element_indices,
std::move(checkpoints), std::move(args)));
return absl::OkStatus();
}
TraceMeMetadata GetTraceMeMetadata() const override {
return dataset()->traceme_metadata_;
}
private:
using InputOffset = int64_t;
using CycleIdx = int;
struct CurrentElement;
struct InputOffsetWithCycleIdx;
int64_t GetSubIteratorIndexForPrefix(bool symbolic_checkpoint)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return GetSubIteratorIndexForPrefix(symbolic_checkpoint, cycle_index_,
next_input_element_index_);
}
int64_t GetSubIteratorIndexForPrefix(
bool symbolic_checkpoint, int64_t cycle_index,
std::optional<int64_t> input_element_index)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
return (symbolic_checkpoint) ? (input_element_index.value())
: (cycle_index);
}
Status SaveCurrentElements(SerializationContext* ctx,
IteratorStateWriter* writer)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (int idx = 0; idx < current_elements_.size(); idx++) {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", idx, "]"),
!current_elements_[idx]));
if (!current_elements_[idx]) {
continue;
}
if (!ctx->symbolic_checkpoint()) {
TF_RETURN_IF_ERROR(
SaveInput(ctx, writer, current_elements_[idx]->iterator));
const auto& args = current_elements_[idx]->args;
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kArgsSize, "[", idx, "]"),
args.size()));
for (int i = 0; i < args.size(); i++) {
TF_RETURN_IF_ERROR(writer->WriteTensor(
prefix(), strings::StrCat(kArgsList, "[", idx, "][", i, "]"),
args[i]));
}
} else {
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), strings::StrCat(kInputElementIndices, "[", idx, "]"),
current_elements_[idx]->input_element_index));
}
}
return absl::OkStatus();
}
absl::StatusOr<std::vector<InputOffsetWithCycleIdx>> RestoreInputOffsets(
IteratorStateReader& reader) {
std::vector<InputOffsetWithCycleIdx> input_offsets;
int64_t cycle_length = dataset()->cycle_length_;
for (int cycle_idx = 0; cycle_idx < cycle_length; cycle_idx++) {
int64_t current_element_uninitialized;
TF_RETURN_IF_ERROR(reader.ReadScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", cycle_idx, "]"),
¤t_element_uninitialized));
if (!current_element_uninitialized) {
int64_t input_element_index;
TF_RETURN_IF_ERROR(reader.ReadScalar(
prefix(),
strings::StrCat(kInputElementIndices, "[", cycle_idx, "]"),
&input_element_index));
input_offsets.push_back(
InputOffsetWithCycleIdx{input_element_index, cycle_idx});
}
}
return std::move(input_offsets);
}
Status RestoreArgsListAndInputOffsetCycleIdxMap(
IteratorContext& ctx, std::vector<InputOffset>& input_element_indices,
std::vector<std::optional<MemoryCheckpoint>>& checkpoints,
std::vector<std::vector<Tensor>>& args,
std::vector<InputOffsetWithCycleIdx>& input_offset_w_cycle_idxs)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (input_element_indices.size() != dataset()->cycle_length_ ||
checkpoints.size() != dataset()->cycle_length_ ||
args.size() != dataset()->cycle_length_) {
return absl::FailedPreconditionError(
"input_element_indices, checkpoints and args should be of same "
"length");
}
std::sort(input_offset_w_cycle_idxs.begin(),
input_offset_w_cycle_idxs.end(),
[](const InputOffsetWithCycleIdx& lhs,
const InputOffsetWithCycleIdx& rhs) {
return lhs.input_element_index < rhs.input_element_index;
});
bool end_of_sequence = false;
int num_to_skip;
int num_actually_skip;
InputOffset prev_input_element_index =
last_checkpointed_input_element_index_;
auto input_ctx = std::make_unique<IteratorContext>(ctx);
for (const auto& input_offset_w_cycle_idx : input_offset_w_cycle_idxs) {
InputOffset input_element_index =
input_offset_w_cycle_idx.input_element_index;
CycleIdx cycle_idx = input_offset_w_cycle_idx.cycle_idx;
if (input_element_index >= next_input_element_index_) {
return absl::FailedPreconditionError(
"input_element_index < next_input_element_index_ must be "
"met.");
}
num_to_skip = input_element_index - prev_input_element_index - 1;
TF_RETURN_IF_ERROR(input_impl_->Skip(input_ctx.get(), num_to_skip,
&end_of_sequence,
&num_actually_skip));
if (end_of_sequence || num_actually_skip != num_to_skip) {
return absl::InternalError(
"Unexpected end of sequence while symbolically restoring "
"InterleaveDataset. Please verify that the input produces data "
"deterministically.");
}
std::vector<Tensor> current_element_args;
TF_RETURN_IF_ERROR(input_impl_->GetNext(
input_ctx.get(), ¤t_element_args, &end_of_sequence));
prev_input_element_index = input_element_index;
checkpoints[cycle_idx].emplace(*input_ctx->checkpoint());
args[cycle_idx] = std::move(current_element_args);
input_element_indices[cycle_idx] = input_element_index;
}
num_to_skip = next_input_element_index_ - prev_input_element_index - 1;
TF_RETURN_IF_ERROR(input_impl_->Skip(
input_ctx.get(), num_to_skip, &end_of_sequence, &num_actually_skip));
if (end_of_sequence || num_actually_skip != num_to_skip) {
return absl::InternalError(
"Unexpected end of sequence while symbolically restoring "
"InterleaveDataset. Please verify that the input produces data "
"deterministically.");
}
input_ckpt_->Merge(input_ctx->checkpoint());
return absl::OkStatus();
}
Status RestoreCurrentElements(
IteratorContext* ctx, IteratorStateReader* reader,
std::vector<InputOffset>& input_element_indices,
std::vector<std::optional<MemoryCheckpoint>>&& checkpoints,
std::vector<std::vector<Tensor>>&& args)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
for (int idx = 0; idx < current_elements_.size(); idx++) {
int64_t current_element_uninitialized;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(),
strings::StrCat(kCurrentElementsUninitialized, "[", idx, "]"),
¤t_element_uninitialized));
if (!current_element_uninitialized) {
if (!ctx->symbolic_checkpoint()) {
int64_t args_size;
std::vector<Tensor> current_element_args;
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), strings::StrCat(kArgsSize, "[", idx, "]"),
&args_size));
current_element_args.resize(args_size);
for (int i = 0; i < args_size; i++) {
TF_RETURN_IF_ERROR(reader->ReadTensor(
ctx->flr(), prefix(),
strings::StrCat(kArgsList, "[", idx, "][", i, "]"),
¤t_element_args[i]));
}
args[idx] = std::move(current_element_args);
}
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, this, args[idx],
GetSubIteratorIndexForPrefix(ctx->symbolic_checkpoint(), idx,
input_element_indices[idx]),
*instantiated_captured_func_, prefix(), &iterator,
nullptr));
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, iterator));
current_elements_[idx].emplace(
std::move(checkpoints[idx]), std::move(args[idx]),
input_element_indices[idx], std::move(iterator));
} else {
current_elements_[idx].reset();
}
}
return absl::OkStatus();
}
Status MoveToNextElement(IteratorContext* ctx)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!end_of_input_) {
IteratorContext input_ctx = MakeNestedIteratorContext(ctx);
std::vector<Tensor> args;
TF_RETURN_IF_ERROR(
input_impl_->GetNext(&input_ctx, &args, &end_of_input_));
input_ckpt_->Merge(input_ctx.checkpoint());
if (!end_of_input_) {
std::unique_ptr<IteratorBase> iterator;
TF_RETURN_IF_ERROR(MakeIteratorFromInputElement(
ctx, this, args,
GetSubIteratorIndexForPrefix(ctx->symbolic_checkpoint()),
*instantiated_captured_func_, prefix(), &iterator, model_node()));
++num_open_;
std::optional<MemoryCheckpoint> checkpoint;
if (ctx->symbolic_checkpoint()) {
checkpoint.emplace(*input_ckpt_);
}
current_elements_[cycle_index_].emplace(
std::move(checkpoint), std::move(args), next_input_element_index_,
std::move(iterator));
next_input_element_index_++;
}
} else {
AdvanceToNextInCycle();
}
return absl::OkStatus();
}
InputOffset IsEarliestInputElementIndex(InputOffset input_element_index) {
InputOffset min_input_element_index = input_element_index;
for (int i = 0; i < current_elements_.size(); ++i) {
if (!current_elements_[i]) continue;
if (current_elements_[i]->input_element_index <
min_input_element_index) {
min_input_element_index = current_elements_[i]->input_element_index;
}
}
return (min_input_element_index == input_element_index);
}
void UpdateSymbolicCheckpointAfterCurrentElementFinished(
IteratorContext& ctx, CurrentElement& current_element)
TF_EXCLUSIVE_LOCKS_REQUIRED(mu_) {
if (!ctx.symbolic_checkpoint()) {
return;
}
InputOffset input_element_index = current_element.input_element_index;
if (IsEarliestInputElementIndex(input_element_index)) {
MemoryCheckpoint& checkpoint =
const_cast<MemoryCheckpoint&>(current_element.checkpoint.value());
ctx.MergeCheckpoint(&checkpoint);
last_checkpointed_input_element_index_ = input_element_index;
}
}
mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
struct CurrentElement {
const std::optional<MemoryCheckpoint> checkpoint = std::nullopt;
const InputOffset input_element_index = -1;
const std::vector<Tensor> args;
std::unique_ptr<IteratorBase> iterator = nullptr;
explicit CurrentElement(std::optional<MemoryCheckpoint>&& checkpoint,
std::vector<Tensor>&& args,
InputOffset input_element_index,
std::unique_ptr<IteratorBase> iterator)
: checkpoint(std::move(checkpoint)),
input_element_index(input_element_index),
args(std::move(args)),
iterator(std::move(iterator)) {}
CurrentElement(CurrentElement&& other) = default;
};
struct InputOffsetWithCycleIdx {
InputOffset input_element_index;
CycleIdx cycle_idx;
};
std::vector<std::optional<CurrentElement>> current_elements_;
InputOffset last_checkpointed_input_element_index_ TF_GUARDED_BY(mu_) = -1;
InputOffset next_input_element_index_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<MemoryCheckpoint> input_ckpt_ TF_GUARDED_BY(mu_);
size_t cycle_index_ TF_GUARDED_BY(mu_) = 0;
int64_t block_index_ TF_GUARDED_BY(mu_) = 0;
bool end_of_input_ TF_GUARDED_BY(mu_) = false;
size_t num_open_ TF_GUARDED_BY(mu_) = 0;
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
const int64_t cycle_length_;
const int64_t block_length_;
const DataTypeVector output_types_;
const std::vector<PartialTensorShape> output_shapes_;
const TraceMeMetadata traceme_metadata_;
};
InterleaveDatasetOp::InterleaveDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx), graph_def_version_(ctx->graph_def_version()) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kFunc, {},
&func_metadata_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputTypes, &output_types_));
OP_REQUIRES_OK(ctx, ctx->GetAttr(kOutputShapes, &output_shapes_));
}
void InterleaveDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
int64_t cycle_length = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kCycleLength, &cycle_length));
if (cycle_length == model::kAutotune) {
cycle_length = port::MaxParallelism();
}
OP_REQUIRES(
ctx, cycle_length > 0,
errors::InvalidArgument("cycle_length must be greater than zero."));
int64_t block_length = 0;
OP_REQUIRES_OK(ctx, ParseScalarArgument(ctx, kBlockLength, &block_length));
OP_REQUIRES(
ctx, block_length > 0,
errors::InvalidArgument("block_length must be greater than zero."));
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func), cycle_length,
block_length, output_types_, output_shapes_);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("InterleaveDataset").Device(DEVICE_CPU),
InterleaveDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("InterleaveDataset");
}
}
} | #include "tensorflow/core/kernels/data/interleave_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "interleave_dataset";
class InterleaveDatasetParams : public DatasetParams {
public:
template <typename T>
InterleaveDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
int64_t cycle_length, int64_t block_length,
FunctionDefHelper::AttrValueWrapper func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
cycle_length_(cycle_length),
block_length_(block_length),
func_(std::move(func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
std::vector<Tensor> input_tensors = other_arguments_;
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {cycle_length_}));
input_tensors.emplace_back(
CreateTensor<int64_t>(TensorShape({}), {block_length_}));
return input_tensors;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size() + 2);
input_names->emplace_back(InterleaveDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(InterleaveDatasetOp::kOtherArguments, "_", i));
}
input_names->emplace_back(InterleaveDatasetOp::kCycleLength);
input_names->emplace_back(InterleaveDatasetOp::kBlockLength);
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"f", func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override {
return InterleaveDatasetOp::kDatasetType;
}
private:
std::vector<Tensor> other_arguments_;
int64_t cycle_length_;
int64_t block_length_;
FunctionDefHelper::AttrValueWrapper func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class InterleaveDatasetOpTest : public DatasetOpsTestBase {};
FunctionDefHelper::AttrValueWrapper MakeTensorSliceDatasetFunc(
const DataTypeVector& output_types,
const std::vector<PartialTensorShape>& output_shapes) {
return FunctionDefHelper::FunctionRef(
"MakeTensorSliceDataset",
{{"Toutput_types", output_types},
{"output_shapes", output_shapes}});
}
InterleaveDatasetParams InterleaveDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
3,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams4() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
5,
1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams5() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
2,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams6() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
3,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParams7() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<tstring>(TensorShape{3, 3, 1},
{"a", "b", "c", "d", "e", "f", "g", "h", "i"})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
2,
5,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_STRING}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_STRING},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParamsWithInvalidCycleLength() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
0,
5,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
InterleaveDatasetParams InterleaveDatasetParamsWithInvalidBlockLength() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 1, 2, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return InterleaveDatasetParams(
std::move(tensor_slice_dataset_params),
{},
1,
-1,
MakeTensorSliceDatasetFunc(
DataTypeVector({DT_INT64}),
std::vector<PartialTensorShape>({PartialTensorShape({1})})),
{test::function::MakeTensorSliceDataset()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
std::vector<GetNextTestCase<InterleaveDatasetParams>> GetNextTestCases() {
return {
{InterleaveDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams2(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams3(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams4(),
CreateTensors<int64_t>(
TensorShape({1}), {{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams5(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams6(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams7(),
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})}};
}
ITERATOR_GET_NEXT_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
GetNextTestCases())
std::vector<SkipTestCase<InterleaveDatasetParams>> SkipTestCases() {
return {{InterleaveDatasetParams1(),
0, 0, true,
CreateTensors<int64_t>(TensorShape({1}), {{0}})},
{InterleaveDatasetParams1(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{5}})},
{InterleaveDatasetParams1(),
10, 9},
{InterleaveDatasetParams2(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{5}})},
{InterleaveDatasetParams2(),
10, 9},
{InterleaveDatasetParams3(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{7}})},
{InterleaveDatasetParams3(),
10, 9},
{InterleaveDatasetParams4(),
5, 5, true,
CreateTensors<int64_t>(TensorShape({1}), {{7}})},
{InterleaveDatasetParams4(),
10, 9},
{InterleaveDatasetParams5(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"e"}})},
{InterleaveDatasetParams5(),
10, 9},
{InterleaveDatasetParams6(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"d"}})},
{InterleaveDatasetParams6(),
10, 9},
{InterleaveDatasetParams7(),
3, 3, true,
CreateTensors<tstring>(TensorShape({1}), {{"d"}})},
{InterleaveDatasetParams7(),
10, 9}};
}
ITERATOR_SKIP_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
SkipTestCases())
TEST_F(InterleaveDatasetOpTest, DatasetNodeName) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(InterleaveDatasetOpTest, DatasetTypeString) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(InterleaveDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<InterleaveDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{InterleaveDatasetParams1(),
{DT_INT64}},
{InterleaveDatasetParams2(),
{DT_INT64}},
{InterleaveDatasetParams3(),
{DT_INT64}},
{InterleaveDatasetParams4(),
{DT_INT64}},
{InterleaveDatasetParams5(),
{DT_STRING}},
{InterleaveDatasetParams6(),
{DT_STRING}},
{InterleaveDatasetParams7(),
{DT_STRING}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<InterleaveDatasetParams>>
DatasetOutputShapesTestCases() {
return {{InterleaveDatasetParams1(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams2(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams3(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams4(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams5(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams6(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams7(),
{PartialTensorShape({1})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<InterleaveDatasetParams>>
CardinalityTestCases() {
return {{InterleaveDatasetParams1(),
kUnknownCardinality},
{InterleaveDatasetParams2(),
kUnknownCardinality},
{InterleaveDatasetParams3(),
kUnknownCardinality},
{InterleaveDatasetParams4(),
kUnknownCardinality},
{InterleaveDatasetParams5(),
kUnknownCardinality},
{InterleaveDatasetParams6(),
kUnknownCardinality},
{InterleaveDatasetParams7(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<InterleaveDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{InterleaveDatasetParams1(),
{DT_INT64}},
{InterleaveDatasetParams2(),
{DT_INT64}},
{InterleaveDatasetParams3(),
{DT_INT64}},
{InterleaveDatasetParams4(),
{DT_INT64}},
{InterleaveDatasetParams5(),
{DT_STRING}},
{InterleaveDatasetParams6(),
{DT_STRING}},
{InterleaveDatasetParams7(),
{DT_STRING}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<InterleaveDatasetParams>>
IteratorOutputShapesTestCases() {
return {{InterleaveDatasetParams1(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams2(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams3(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams4(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams5(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams6(),
{PartialTensorShape({1})}},
{InterleaveDatasetParams7(),
{PartialTensorShape({1})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(InterleaveDatasetOpTest, InterleaveDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(InterleaveDatasetOpTest, IteratorPrefix) {
auto dataset_params = InterleaveDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
InterleaveDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<InterleaveDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {
{InterleaveDatasetParams1(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams2(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {1}, {4}, {2}, {5}, {6}, {7}, {8}})},
{InterleaveDatasetParams3(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams4(),
{0, 4, 11},
CreateTensors<int64_t>(TensorShape({1}),
{{0}, {3}, {6}, {1}, {4}, {7}, {2}, {5}, {8}})},
{InterleaveDatasetParams5(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"d"}, {"e"}, {"c"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams6(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})},
{InterleaveDatasetParams7(),
{0, 4, 11},
CreateTensors<tstring>(
TensorShape({1}),
{{"a"}, {"b"}, {"c"}, {"d"}, {"e"}, {"f"}, {"g"}, {"h"}, {"i"}})}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(InterleaveDatasetOpTest,
InterleaveDatasetParams,
IteratorSaveAndRestoreTestCases())
TEST_F(InterleaveDatasetOpTest, InvalidCycleLength) {
auto dataset_params = InterleaveDatasetParamsWithInvalidCycleLength();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(InterleaveDatasetOpTest, InvalidLength) {
auto dataset_params = InterleaveDatasetParamsWithInvalidBlockLength();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/interleave_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/interleave_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
abfebcd9-00e5-45bc-b724-26719c12e44e | cpp | tensorflow/tensorflow | device_compiler_client | tensorflow/compiler/jit/device_compiler_client.cc | tensorflow/compiler/jit/device_compiler_client_test.cc | #include "tensorflow/compiler/jit/device_compiler_client.h"
#include "tensorflow/compiler/tf2xla/xla_compiler.h"
#include "tensorflow/core/util/determinism.h"
namespace tensorflow {
xla::ExecutableBuildOptions GetExecutableBuildOptions(
const XlaCompiler::Options& options,
const XlaCompiler::CompilationResult& result, int default_device_ordinal) {
xla::ExecutableBuildOptions build_options;
if (result.collective_info) {
build_options.set_num_replicas(result.collective_info->group_size);
}
if (options.device_ordinal != -1) {
build_options.set_device_ordinal(options.device_ordinal);
} else if (default_device_ordinal != -1) {
build_options.set_device_ordinal(default_device_ordinal);
}
build_options.set_result_layout(result.xla_output_shape);
build_options.set_device_allocator(options.device_allocator.get());
build_options.set_alias_passthrough_params(options.alias_passthrough_params);
build_options.mutable_debug_options()->set_xla_detailed_logging(
options.detailed_logging);
if (tensorflow::OpDeterminismRequired()) {
build_options.mutable_debug_options()->set_xla_gpu_deterministic_ops(true);
}
return build_options;
}
} | #include "tensorflow/compiler/jit/device_compiler_client.h"
#include <gtest/gtest.h>
namespace tensorflow {
namespace {
TEST(GetExecutableOptionTest, Basic) {
XlaCompiler::Options options;
options.device_ordinal = 0;
options.alias_passthrough_params = true;
options.detailed_logging = true;
XlaCompiler::CompilationResult result;
xla::Shape xla_output_shape;
result.xla_output_shape = xla_output_shape;
auto build_option =
GetExecutableBuildOptions(options, result, -1);
EXPECT_EQ(build_option.device_ordinal(), 0);
EXPECT_EQ(build_option.result_layout()->ToString(),
xla_output_shape.ToString());
EXPECT_EQ(build_option.alias_passthrough_params(), true);
EXPECT_EQ(build_option.debug_options().xla_detailed_logging(), true);
EXPECT_EQ(build_option.debug_options().xla_enable_dumping(), true);
}
TEST(GetExecutableOptionTest, DefaultDeviceOrdinal) {
XlaCompiler::Options options;
XlaCompiler::CompilationResult result;
auto build_option =
GetExecutableBuildOptions(options, result, 0);
EXPECT_EQ(build_option.device_ordinal(), 0);
}
TEST(GetExecutableOptionTest, DeviceOrdinalNotSet) {
XlaCompiler::Options options;
XlaCompiler::CompilationResult result;
auto build_option =
GetExecutableBuildOptions(options, result, -1);
EXPECT_EQ(build_option.device_ordinal(), -1);
}
TEST(GetExecutableOptionTest, DumpingWithoutDetailedLogging) {
XlaCompiler::Options options;
options.detailed_logging = false;
XlaCompiler::CompilationResult result;
auto build_option =
GetExecutableBuildOptions(options, result, -1);
EXPECT_FALSE(build_option.debug_options().xla_detailed_logging());
EXPECT_TRUE(build_option.debug_options().xla_enable_dumping());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compiler_client.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/device_compiler_client_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2589f64c-eb8c-442a-8faa-a014e79a40f7 | cpp | tensorflow/tensorflow | conv_map_wrapper | tensorflow/core/util/autotune_maps/conv_map_wrapper.cc | tensorflow/core/util/autotune_maps/conv_map_wrapper_test.cc | #include "tensorflow/core/util/autotune_maps/conv_map_wrapper.h"
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "xla/tsl/lib/strings/proto_serialization.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "tensorflow/core/util/autotune_maps/autotune_map.pb.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
namespace tensorflow {
absl::StatusOr<ConvMapWrapper> ConvMapWrapper::FromKeyAndValue(
OpaqueKey key, OpaqueValue value) {
ConvMapProto::Entry key_proto;
if (!key_proto.ParseFromString(key)) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Could not parse the provided key");
}
ConvMapProto::Entry value_proto;
if (!value_proto.ParseFromString(value)) {
return absl::Status(absl::StatusCode::kInvalidArgument,
"Could not parse the provided value");
}
ConvMapProto::Entry full_entry;
*full_entry.mutable_key() = key_proto.key();
*full_entry.mutable_value() = value_proto.value();
return ConvMapWrapper(full_entry);
}
ConvMapWrapper::OpaqueKey ConvMapWrapper::Key() const {
ConvMapProto::Entry entry;
*entry.mutable_key() = conv_map_entry_.key();
OpaqueKey serialized;
CHECK(tsl::SerializeToStringDeterministic(entry, &serialized));
return serialized;
}
ConvMapWrapper::OpaqueValue ConvMapWrapper::Value() const {
ConvMapProto::Entry entry;
*entry.mutable_value() = conv_map_entry_.value();
OpaqueValue serialized;
CHECK(tsl::SerializeToStringDeterministic(entry, &serialized));
return serialized;
}
std::vector<ConvMapWrapper> ConvMapWrapper::ConvMapToWrappers(
const ConvMapProto& autotune_results) {
std::vector<ConvMapWrapper> wrappers;
wrappers.reserve(autotune_results.kv_pairs_size());
for (const auto& entry : autotune_results.kv_pairs()) {
wrappers.push_back(ConvMapWrapper(entry));
}
return wrappers;
}
absl::StatusOr<ConvMapProto> ConvMapWrapper::ConvMapFromWrappers(
const std::vector<ConvMapWrapper>& wrappers) {
ConvMapProto conv_map_proto;
for (const auto& wrapper : wrappers) {
*conv_map_proto.add_kv_pairs() = wrapper.conv_map_entry_;
}
return conv_map_proto;
}
} | #include "tensorflow/core/util/autotune_maps/conv_map_wrapper.h"
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "xla/test.h"
#include "xla/tsl/protobuf/dnn.pb.h"
#include "tensorflow/core/util/autotune_maps/autotune_map.pb.h"
#include "tensorflow/core/util/autotune_maps/conv_parameters.pb.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
ConvMapProto ThreeConvMapEntries() {
ConvMapProto proto;
auto r1 = proto.add_kv_pairs();
r1->mutable_key()->set_batch(1);
r1->mutable_key()->set_in_depths(2);
r1->mutable_key()->set_out_depths(3);
r1->mutable_value()->mutable_algorithm()->set_algo_id(4);
auto r2 = proto.add_kv_pairs();
r2->mutable_key()->set_batch(5);
r2->mutable_key()->set_in_depths(6);
r2->mutable_key()->set_out_depths(7);
r2->mutable_value()->mutable_algorithm()->set_algo_id(8);
auto r3 = proto.add_kv_pairs();
r3->mutable_key()->set_batch(9);
r3->mutable_key()->set_in_depths(10);
r3->mutable_key()->set_out_depths(11);
r3->mutable_value()->mutable_algorithm()->set_algo_id(12);
return proto;
}
TEST(ConvMapWrapperTest, FullRoundTrip) {
std::vector<ConvMapWrapper> wrappers =
ConvMapWrapper::ConvMapToWrappers(ThreeConvMapEntries());
std::vector<std::pair<ConvMapWrapper::OpaqueKey, ConvMapWrapper::OpaqueValue>>
key_value_pairs;
for (const auto& wrapper : wrappers) {
key_value_pairs.emplace_back(wrapper.Key(), wrapper.Value());
}
std::vector<ConvMapWrapper> new_wrappers;
for (const auto& [key, value] : key_value_pairs) {
TF_ASSERT_OK_AND_ASSIGN(ConvMapWrapper wrapper,
ConvMapWrapper::FromKeyAndValue(key, value));
new_wrappers.push_back(wrapper);
}
TF_ASSERT_OK_AND_ASSIGN(ConvMapProto round_tripped,
ConvMapWrapper::ConvMapFromWrappers(new_wrappers));
EXPECT_EQ(round_tripped.kv_pairs_size(), 3);
EXPECT_EQ(round_tripped.kv_pairs(0).key().batch(), 1);
EXPECT_EQ(round_tripped.kv_pairs(0).key().in_depths(), 2);
EXPECT_EQ(round_tripped.kv_pairs(0).key().out_depths(), 3);
EXPECT_EQ(round_tripped.kv_pairs(0).value().algorithm().algo_id(), 4);
EXPECT_EQ(round_tripped.kv_pairs(1).key().batch(), 5);
EXPECT_EQ(round_tripped.kv_pairs(1).key().in_depths(), 6);
EXPECT_EQ(round_tripped.kv_pairs(1).key().out_depths(), 7);
EXPECT_EQ(round_tripped.kv_pairs(1).value().algorithm().algo_id(), 8);
EXPECT_EQ(round_tripped.kv_pairs(2).key().batch(), 9);
EXPECT_EQ(round_tripped.kv_pairs(2).key().in_depths(), 10);
EXPECT_EQ(round_tripped.kv_pairs(2).key().out_depths(), 11);
EXPECT_EQ(round_tripped.kv_pairs(2).value().algorithm().algo_id(), 12);
}
TEST(ConvMapWrapperTest, DeterministicSerialization) {
std::vector<ConvMapWrapper> wrappers =
ConvMapWrapper::ConvMapToWrappers(ThreeConvMapEntries());
std::vector<ConvMapWrapper::OpaqueKey> keys;
std::vector<ConvMapWrapper::OpaqueValue> values;
for (const auto& wrapper : wrappers) {
keys.push_back(wrapper.Key());
values.push_back(wrapper.Value());
}
const int kNumIterations = 100;
for (int i = 0; i < kNumIterations; ++i) {
std::vector<ConvMapWrapper> test_wrappers =
ConvMapWrapper::ConvMapToWrappers(ThreeConvMapEntries());
std::vector<ConvMapWrapper::OpaqueKey> test_keys;
std::vector<ConvMapWrapper::OpaqueValue> test_values;
for (const auto& test_wrapper : test_wrappers) {
test_keys.push_back(test_wrapper.Key());
test_values.push_back(test_wrapper.Value());
}
EXPECT_EQ(keys, test_keys);
EXPECT_EQ(values, test_values);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/autotune_maps/conv_map_wrapper.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/util/autotune_maps/conv_map_wrapper_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
68355e70-e453-4069-8d76-79f9dacc44d2 | cpp | google/tensorstore | nditerable_data_type_conversion | tensorstore/internal/nditerable_data_type_conversion.cc | tensorstore/internal/nditerable_data_type_conversion_test.cc | #include "tensorstore/internal/nditerable_data_type_conversion.h"
#include <cassert>
#include <memory>
#include <utility>
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/elementwise_function.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_elementwise_input_transform.h"
#include "tensorstore/internal/nditerable_elementwise_output_transform.h"
#include "tensorstore/internal/nditerable_util.h"
#include "tensorstore/internal/unique_with_intrusive_allocator.h"
namespace tensorstore {
namespace internal {
namespace {
template <typename Derived, typename BasePointer = NDIterable::Ptr>
class NDIterableAdapter : public NDIterable::Base<Derived> {
public:
NDIterableAdapter(BasePointer base) : base_(std::move(base)) {}
const BasePointer& base() const { return base_; }
BasePointer& base() { return base_; }
int GetDimensionOrder(DimensionIndex dim_i,
DimensionIndex dim_j) const override {
return base_->GetDimensionOrder(dim_i, dim_j);
}
void UpdateDirectionPrefs(NDIterable::DirectionPref* prefs) const override {
base_->UpdateDirectionPrefs(prefs);
}
bool CanCombineDimensions(DimensionIndex dim_i, int dir_i,
DimensionIndex dim_j, int dir_j,
Index size_j) const override {
return base_->CanCombineDimensions(dim_i, dir_i, dim_j, dir_j, size_j);
}
NDIterable::IterationBufferConstraint GetIterationBufferConstraint(
NDIterable::IterationLayoutView layout) const override {
return base_->GetIterationBufferConstraint(layout);
}
std::ptrdiff_t GetWorkingMemoryBytesPerElement(
NDIterable::IterationLayoutView layout,
IterationBufferKind buffer_kind) const override {
return base_->GetWorkingMemoryBytesPerElement(layout, buffer_kind);
}
DataType dtype() const override { return base_->dtype(); }
ArenaAllocator<> get_allocator() const override {
return base_->get_allocator();
}
NDIterator::Ptr GetIterator(
NDIterable::IterationBufferKindLayoutView layout) const override {
return base_->GetIterator(layout);
}
private:
BasePointer base_;
};
class ReinterpretCastNDIterable
: public NDIterableAdapter<ReinterpretCastNDIterable> {
public:
ReinterpretCastNDIterable(NDIterable::Ptr base, DataType new_dtype,
ArenaAllocator<> allocator)
: NDIterableAdapter<ReinterpretCastNDIterable>(std::move(base)),
dtype_(new_dtype) {}
DataType dtype() const override { return dtype_; }
private:
DataType dtype_;
};
}
NDIterable::Ptr GetConvertedInputNDIterable(
NDIterable::Ptr iterable, DataType target_type,
const DataTypeConversionLookupResult& conversion) {
assert(DataTypeConversionFlags::kSupported ==
(conversion.flags & DataTypeConversionFlags::kSupported));
if (DataTypeConversionFlags::kIdentity ==
(conversion.flags & DataTypeConversionFlags::kIdentity)) {
return iterable;
}
auto allocator = iterable->get_allocator();
if (DataTypeConversionFlags::kCanReinterpretCast ==
(conversion.flags & DataTypeConversionFlags::kCanReinterpretCast)) {
return MakeUniqueWithVirtualIntrusiveAllocator<ReinterpretCastNDIterable>(
allocator, std::move(iterable), target_type);
}
return GetElementwiseInputTransformNDIterable({{std::move(iterable)}},
target_type, conversion.closure,
allocator.arena());
}
NDIterable::Ptr GetConvertedOutputNDIterable(
NDIterable::Ptr iterable, DataType source_type,
const DataTypeConversionLookupResult& conversion) {
assert(!!(conversion.flags & DataTypeConversionFlags::kSupported));
if (!!(conversion.flags & DataTypeConversionFlags::kIdentity)) {
return iterable;
}
auto allocator = iterable->get_allocator();
if (!!(conversion.flags & DataTypeConversionFlags::kCanReinterpretCast)) {
return MakeUniqueWithVirtualIntrusiveAllocator<ReinterpretCastNDIterable>(
allocator, std::move(iterable), source_type);
}
return GetElementwiseOutputTransformNDIterable(
std::move(iterable), source_type, conversion.closure, allocator.arena());
}
}
} | #include "tensorstore/internal/nditerable_data_type_conversion.h"
#include <stdint.h>
#include <memory>
#include <new>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/array.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/data_type_conversion.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/transformed_array.h"
#include "tensorstore/internal/arena.h"
#include "tensorstore/internal/memory.h"
#include "tensorstore/internal/nditerable.h"
#include "tensorstore/internal/nditerable_array.h"
#include "tensorstore/internal/nditerable_copy.h"
#include "tensorstore/internal/nditerable_transformed_array.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::DataType;
using ::tensorstore::dtype_v;
using ::tensorstore::MakeArray;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Shared;
using ::tensorstore::SharedArray;
using ::tensorstore::TransformedArray;
using ::tensorstore::internal::GetDataTypeConverter;
using ::testing::Pair;
using ::tensorstore::dtypes::json_t;
using ::tensorstore::dtypes::string_t;
}
class NDIterableDataTypeConversionTest : public ::testing::TestWithParam<bool> {
protected:
tensorstore::internal::Arena arena;
std::pair<absl::Status, SharedArray<const void>> Convert(
TransformedArray<Shared<const void>> source, DataType target_dtype) {
tensorstore::internal::Arena arena;
auto target =
tensorstore::AllocateArray(source.shape(), tensorstore::c_order,
tensorstore::value_init, target_dtype);
auto source_iterable =
tensorstore::internal::GetTransformedArrayNDIterable(source, &arena)
.value();
auto target_iterable =
tensorstore::internal::GetArrayNDIterable(target, &arena);
if (GetParam()) {
source_iterable = GetConvertedInputNDIterable(
std::move(source_iterable), target_dtype,
GetDataTypeConverter(source.dtype(), target_dtype));
} else {
target_iterable = GetConvertedOutputNDIterable(
std::move(target_iterable), source.dtype(),
GetDataTypeConverter(source.dtype(), target_dtype));
}
tensorstore::internal::NDIterableCopier copier(
*source_iterable, *target_iterable, target.shape(),
tensorstore::c_order, &arena);
absl::Status status = copier.Copy();
return std::make_pair(status, target);
}
};
INSTANTIATE_TEST_SUITE_P(GetConvertedInputNDIterable,
NDIterableDataTypeConversionTest,
::testing::Values(true));
INSTANTIATE_TEST_SUITE_P(GetConvertedOutputNDIterable,
NDIterableDataTypeConversionTest,
::testing::Values(false));
TEST_P(NDIterableDataTypeConversionTest, Int32ToInt32) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<int32_t>),
Pair(absl::OkStatus(), MakeArray<int32_t>({1, 2, 3})));
}
TEST_P(NDIterableDataTypeConversionTest, Int32ToUint32) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<uint32_t>),
Pair(absl::OkStatus(), MakeArray<uint32_t>({1, 2, 3})));
}
TEST_P(NDIterableDataTypeConversionTest, Int32ToString) {
EXPECT_THAT(Convert(MakeArray<int32_t>({1, 2, 3}), dtype_v<string_t>),
Pair(absl::OkStatus(), MakeArray<string_t>({"1", "2", "3"})));
}
TEST_P(NDIterableDataTypeConversionTest, JsonToString) {
EXPECT_THAT(
Convert(MakeArray<json_t>({"hello", "world", 3}), dtype_v<string_t>),
Pair(MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected string, but received: 3"),
MakeArray<string_t>({"hello", "world", ""})));
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_data_type_conversion.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/nditerable_data_type_conversion_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
d5247d11-dec1-4e23-8082-84562476de33 | cpp | google/quiche | metadata_decoder | quiche/quic/core/http/metadata_decoder.cc | quiche/quic/core/http/metadata_decoder_test.cc | #include "quiche/quic/core/http/metadata_decoder.h"
#include <cstddef>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/http/quic_header_list.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
namespace quic {
MetadataDecoder::MetadataDecoder(QuicStreamId id, size_t max_header_list_size,
size_t frame_header_len, size_t payload_length)
: qpack_decoder_(0,
0, &delegate_),
accumulator_(id, &qpack_decoder_, &decoder_, max_header_list_size),
frame_len_(frame_header_len + payload_length),
bytes_remaining_(payload_length) {}
bool MetadataDecoder::Decode(absl::string_view payload) {
accumulator_.Decode(payload);
bytes_remaining_ -= payload.length();
return decoder_.error_code() == QUIC_NO_ERROR;
}
bool MetadataDecoder::EndHeaderBlock() {
QUIC_BUG_IF(METADATA bytes remaining, bytes_remaining_ != 0)
<< "More metadata remaining: " << bytes_remaining_;
accumulator_.EndHeaderBlock();
return !decoder_.header_list_size_limit_exceeded();
}
void MetadataDecoder::MetadataHeadersDecoder::OnHeadersDecoded(
QuicHeaderList headers, bool header_list_size_limit_exceeded) {
header_list_size_limit_exceeded_ = header_list_size_limit_exceeded;
headers_ = std::move(headers);
}
void MetadataDecoder::MetadataHeadersDecoder::OnHeaderDecodingError(
QuicErrorCode error_code, absl::string_view error_message) {
error_code_ = error_code;
error_message_ = absl::StrCat("Error decoding metadata: ", error_message);
}
} | #include "quiche/quic/core/http/metadata_decoder.h"
#include <string>
#include "absl/strings/escaping.h"
#include "quiche/quic/core/qpack/qpack_encoder.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
namespace {
class MetadataDecoderTest : public QuicTest {
protected:
std::string EncodeHeaders(quiche::HttpHeaderBlock& headers) {
quic::NoopDecoderStreamErrorDelegate delegate;
quic::QpackEncoder encoder(&delegate, quic::HuffmanEncoding::kDisabled,
quic::CookieCrumbling::kDisabled);
return encoder.EncodeHeaderList(id_, headers,
nullptr);
}
size_t max_header_list_size = 1 << 20;
const QuicStreamId id_ = 1;
};
TEST_F(MetadataDecoderTest, Initialize) {
const size_t frame_header_len = 4;
const size_t payload_len = 123;
MetadataDecoder decoder(id_, max_header_list_size, frame_header_len,
payload_len);
EXPECT_EQ(frame_header_len + payload_len, decoder.frame_len());
EXPECT_EQ("", decoder.error_message());
EXPECT_TRUE(decoder.headers().empty());
}
TEST_F(MetadataDecoderTest, Decode) {
quiche::HttpHeaderBlock headers;
headers["key1"] = "val1";
headers["key2"] = "val2";
headers["key3"] = "val3";
std::string data = EncodeHeaders(headers);
const size_t frame_header_len = 4;
MetadataDecoder decoder(id_, max_header_list_size, frame_header_len,
data.length());
EXPECT_TRUE(decoder.Decode(data));
EXPECT_TRUE(decoder.EndHeaderBlock());
EXPECT_EQ(quic::test::AsHeaderList(headers), decoder.headers());
}
TEST_F(MetadataDecoderTest, DecodeInvalidHeaders) {
std::string data = "aaaaaaaaaa";
const size_t frame_header_len = 4;
MetadataDecoder decoder(id_, max_header_list_size, frame_header_len,
data.length());
EXPECT_FALSE(decoder.Decode(data));
EXPECT_EQ("Error decoding metadata: Error decoding Required Insert Count.",
decoder.error_message());
}
TEST_F(MetadataDecoderTest, TooLarge) {
quiche::HttpHeaderBlock headers;
for (int i = 0; i < 1024; ++i) {
headers.AppendValueOrAddHeader(absl::StrCat(i), std::string(1024, 'a'));
}
std::string data = EncodeHeaders(headers);
EXPECT_GT(data.length(), 1 << 20);
const size_t frame_header_len = 4;
MetadataDecoder decoder(id_, max_header_list_size, frame_header_len,
data.length());
EXPECT_TRUE(decoder.Decode(data));
EXPECT_FALSE(decoder.EndHeaderBlock());
EXPECT_TRUE(decoder.error_message().empty());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/metadata_decoder.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/http/metadata_decoder_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
7c94decb-6d82-4473-ad0d-6c905e6bb8ac | cpp | tensorflow/tensorflow | lookup_ops | tensorflow/core/ops/lookup_ops.cc | tensorflow/core/ops/lookup_ops_test.cc | #include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/dataset_stateful_op_allowlist.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def_builder.h"
#include "tensorflow/core/framework/shape_inference.h"
namespace tensorflow {
using shape_inference::DimensionHandle;
using shape_inference::InferenceContext;
using shape_inference::ShapeAndType;
using shape_inference::ShapeHandle;
namespace {
Status TwoElementVectorInputsAndScalarOutputs(InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_handle;
for (int i = 0; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status ScalarAndTwoElementVectorInputsAndScalarOutputs(InferenceContext* c) {
ShapeHandle handle;
DimensionHandle unused_handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
for (int i = 1; i < c->num_inputs(); ++i) {
TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &handle));
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_handle));
}
for (int i = 0; i < c->num_outputs(); ++i) {
c->set_output(i, c->Scalar());
}
return absl::OkStatus();
}
Status TwoElementOutput(InferenceContext* c) {
c->set_output(0, c->Vector(2));
return absl::OkStatus();
}
Status ScalarOutput(InferenceContext* c) {
c->set_output(0, c->Scalar());
return absl::OkStatus();
}
}
REGISTER_OP("LookupTableFind")
.Input("table_handle: Ref(string)")
.Input("keys: Tin")
.Input("default_value: Tout")
.Output("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle unused;
TF_RETURN_IF_ERROR(c->WithRankAtMost(c->input(2), 1, &unused));
c->set_output(0, c->UnknownShape());
return absl::OkStatus();
});
Status ValidateTableType(InferenceContext* c,
const ShapeAndType& key_shape_and_type,
const string& key_dtype_attr,
const ShapeAndType& value_shape_and_type,
const string& value_dtype_attr) {
DataType key_dtype;
TF_RETURN_IF_ERROR(c->GetAttr(key_dtype_attr, &key_dtype));
if (key_shape_and_type.dtype != key_dtype) {
return errors::InvalidArgument(
"Trying to read value with wrong dtype. "
"Expected ",
DataTypeString(key_shape_and_type.dtype), " got ",
DataTypeString(key_dtype));
}
DataType value_dtype;
TF_RETURN_IF_ERROR(c->GetAttr(value_dtype_attr, &value_dtype));
if (value_shape_and_type.dtype != value_dtype) {
return errors::InvalidArgument(
"Trying to read value with wrong dtype. "
"Expected ",
DataTypeString(value_shape_and_type.dtype), " got ",
DataTypeString(value_dtype));
}
return absl::OkStatus();
}
Status ValidateTableResourceHandle(InferenceContext* c, ShapeHandle keys,
const string& key_dtype_attr,
const string& value_dtype_attr,
ShapeAndType* output_shape_and_type) {
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data == nullptr || handle_data->size() != 2) {
output_shape_and_type->shape = c->UnknownShape();
output_shape_and_type->dtype = DT_INVALID;
} else {
const ShapeAndType& key_shape_and_type = (*handle_data)[0];
const ShapeAndType& value_shape_and_type = (*handle_data)[1];
TF_RETURN_IF_ERROR(ValidateTableType(c, key_shape_and_type, key_dtype_attr,
value_shape_and_type,
value_dtype_attr));
output_shape_and_type->dtype = value_shape_and_type.dtype;
if (c->RankKnown(key_shape_and_type.shape) && c->RankKnown(keys)) {
int keys_rank = c->Rank(keys);
int key_suffix_rank = c->Rank(key_shape_and_type.shape);
if (keys_rank < key_suffix_rank) {
return errors::InvalidArgument(
"Expected keys to have suffix ",
c->DebugString(key_shape_and_type.shape),
" but saw shape: ", c->DebugString(keys));
}
for (int d = 0; d < key_suffix_rank; d++) {
DimensionHandle dim = c->Dim(key_shape_and_type.shape, d);
TF_RETURN_IF_ERROR(
c->ReplaceDim(keys, keys_rank - key_suffix_rank + d, dim, &keys));
}
std::vector<DimensionHandle> keys_prefix_vec;
keys_prefix_vec.reserve(keys_rank - key_suffix_rank);
for (int d = 0; d < keys_rank - key_suffix_rank; ++d) {
keys_prefix_vec.push_back(c->Dim(keys, d));
}
ShapeHandle keys_prefix = c->MakeShape(keys_prefix_vec);
TF_RETURN_IF_ERROR(c->Concatenate(keys_prefix, value_shape_and_type.shape,
&output_shape_and_type->shape));
} else {
output_shape_and_type->shape = c->UnknownShape();
}
}
return absl::OkStatus();
}
REGISTER_OP("LookupTableFindV2")
.Input("table_handle: resource")
.Input("keys: Tin")
.Input("default_value: Tout")
.Output("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
ShapeAndType value_shape_and_type;
TF_RETURN_IF_ERROR(ValidateTableResourceHandle(
c,
c->input(1),
"Tin",
"Tout", &value_shape_and_type));
c->set_output(0, value_shape_and_type.shape);
return absl::OkStatus();
});
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableFindV2");
REGISTER_OP("LookupTableInsert")
.Input("table_handle: Ref(string)")
.Input("keys: Tin")
.Input("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return absl::OkStatus();
});
REGISTER_OP("LookupTableInsertV2")
.Input("table_handle: resource")
.Input("keys: Tin")
.Input("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
return absl::OkStatus();
});
REGISTER_OP("LookupTableRemoveV2")
.Input("table_handle: resource")
.Input("keys: Tin")
.Attr("Tin: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(1), 1, &handle));
return absl::OkStatus();
});
REGISTER_OP("LookupTableSize")
.Input("table_handle: Ref(string)")
.Output("size: int64")
.SetShapeFn(TwoElementVectorInputsAndScalarOutputs);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableSize");
REGISTER_OP("LookupTableSizeV2")
.Input("table_handle: resource")
.Output("size: int64")
.SetShapeFn(ScalarAndTwoElementVectorInputsAndScalarOutputs);
ALLOW_STATEFUL_OP_FOR_DATASET_FUNCTIONS("LookupTableSizeV2");
REGISTER_OP("LookupTableExport")
.Input("table_handle: Ref(string)")
.Output("keys: Tkeys")
.Output("values: Tvalues")
.Attr("Tkeys: type")
.Attr("Tvalues: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle values = c->UnknownShape();
TF_RETURN_IF_ERROR(c->WithRankAtLeast(values, 1, &values));
ShapeHandle keys = c->Vector(c->Dim(values, 0));
c->set_output(0, keys);
c->set_output(1, values);
return absl::OkStatus();
});
REGISTER_OP("LookupTableExportV2")
.Input("table_handle: resource")
.Output("keys: Tkeys")
.Output("values: Tvalues")
.Attr("Tkeys: type")
.Attr("Tvalues: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
auto* handle_data = c->input_handle_shapes_and_types(0);
if (handle_data != nullptr && handle_data->size() == 2) {
const ShapeAndType& key_shape_and_type = (*handle_data)[0];
const ShapeAndType& value_shape_and_type = (*handle_data)[1];
TF_RETURN_IF_ERROR(ValidateTableType(c, key_shape_and_type,
"Tkeys",
value_shape_and_type,
"Tvalues"));
}
c->set_output(0, c->UnknownShape());
c->set_output(1, c->UnknownShape());
return absl::OkStatus();
});
REGISTER_OP("LookupTableImport")
.Input("table_handle: Ref(string)")
.Input("keys: Tin")
.Input("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
return absl::OkStatus();
});
REGISTER_OP("LookupTableImportV2")
.Input("table_handle: resource")
.Input("keys: Tin")
.Input("values: Tout")
.Attr("Tin: type")
.Attr("Tout: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
ShapeHandle keys;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys));
ShapeHandle values;
TF_RETURN_IF_ERROR(c->WithRankAtLeast(c->input(2), 1, &values));
DimensionHandle unused;
TF_RETURN_IF_ERROR(c->Merge(c->Dim(keys, 0), c->Dim(values, 0), &unused));
return absl::OkStatus();
});
Status MutableHashTableShape(InferenceContext* c, const ShapeHandle& key,
const ShapeHandle& value) {
c->set_output(0, c->Scalar());
ShapeHandle key_s;
TF_RETURN_IF_ERROR(c->WithRankAtMost(key, 1, &key_s));
DataType key_t;
TF_RETURN_IF_ERROR(c->GetAttr("key_dtype", &key_t));
DataType value_t;
TF_RETURN_IF_ERROR(c->GetAttr("value_dtype", &value_t));
c->set_output_handle_shapes_and_types(
0, std::vector<ShapeAndType>{{key_s, key_t}, {value, value_t}});
return absl::OkStatus();
}
Status MutableHashTableShapeFn(InferenceContext* c) {
return MutableHashTableShape(c, c->Scalar(),
c->Scalar());
}
Status MutableHashTableOfTensorsShapeFn(InferenceContext* c) {
PartialTensorShape value_p;
TF_RETURN_IF_ERROR(c->GetAttr("value_shape", &value_p));
ShapeHandle value_s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(value_p, &value_s));
return MutableHashTableShape(c, c->Scalar(), value_s);
}
Status MutableDenseHashTableShapeFn(InferenceContext* c) {
PartialTensorShape value_p;
TF_RETURN_IF_ERROR(c->GetAttr("value_shape", &value_p));
ShapeHandle value_s;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(value_p, &value_s));
return MutableHashTableShape(c, c->input(0), value_s);
}
REGISTER_OP("HashTable")
.Output("table_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("HashTableV2")
.Output("table_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(ScalarOutput);
REGISTER_OP("AnonymousHashTable")
.Output("table_handle: resource")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(ScalarOutput);
REGISTER_OP("MutableHashTable")
.Output("table_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("MutableHashTableV2")
.Output("table_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(MutableHashTableShapeFn);
REGISTER_OP("AnonymousMutableHashTable")
.Output("table_handle: resource")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.SetIsStateful()
.SetShapeFn(MutableHashTableShapeFn);
REGISTER_OP("MutableHashTableOfTensors")
.Output("table_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("MutableHashTableOfTensorsV2")
.Output("table_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.SetIsStateful()
.SetShapeFn(MutableHashTableOfTensorsShapeFn);
REGISTER_OP("AnonymousMutableHashTableOfTensors")
.Output("table_handle: resource")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.SetIsStateful()
.SetShapeFn(MutableHashTableOfTensorsShapeFn);
REGISTER_OP("MutableDenseHashTable")
.Input("empty_key: key_dtype")
.Output("table_handle: Ref(string)")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.Attr("initial_num_buckets: int = 131072")
.Attr("max_load_factor: float = 0.8")
.SetIsStateful()
.SetShapeFn(TwoElementOutput);
REGISTER_OP("MutableDenseHashTableV2")
.Input("empty_key: key_dtype")
.Input("deleted_key: key_dtype")
.Output("table_handle: resource")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.Attr("use_node_name_sharing: bool = false")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.Attr("initial_num_buckets: int = 131072")
.Attr("max_load_factor: float = 0.8")
.SetIsStateful()
.SetShapeFn(MutableDenseHashTableShapeFn);
REGISTER_OP("AnonymousMutableDenseHashTable")
.Input("empty_key: key_dtype")
.Input("deleted_key: key_dtype")
.Output("table_handle: resource")
.Attr("key_dtype: type")
.Attr("value_dtype: type")
.Attr("value_shape: shape = {}")
.Attr("initial_num_buckets: int = 131072")
.Attr("max_load_factor: float = 0.8")
.SetIsStateful()
.SetShapeFn(MutableDenseHashTableShapeFn);
REGISTER_OP("InitializeTable")
.Input("table_handle: Ref(string)")
.Input("keys: Tkey")
.Input("values: Tval")
.Attr("Tkey: type")
.Attr("Tval: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
ShapeHandle keys;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys));
TF_RETURN_IF_ERROR(c->Merge(keys, c->input(2), &keys));
return absl::OkStatus();
});
REGISTER_OP("InitializeTableV2")
.Input("table_handle: resource")
.Input("keys: Tkey")
.Input("values: Tval")
.Attr("Tkey: type")
.Attr("Tval: type")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
ShapeHandle keys;
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &keys));
TF_RETURN_IF_ERROR(c->Merge(keys, c->input(2), &keys));
return absl::OkStatus();
});
REGISTER_OP("InitializeTableFromTextFile")
.Input("table_handle: Ref(string)")
.Input("filename: string")
.Attr("key_index: int >= -2")
.Attr("value_index: int >= -2")
.Attr("vocab_size: int >= -1 = -1")
.Attr("delimiter: string = '\t'")
.Attr("offset: int = 0")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &handle));
DimensionHandle unused_dim;
TF_RETURN_IF_ERROR(c->WithValue(c->Dim(handle, 0), 2, &unused_dim));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &handle));
return absl::OkStatus();
});
REGISTER_OP("InitializeTableFromTextFileV2")
.Input("table_handle: resource")
.Input("filename: string")
.Attr("key_index: int >= -2")
.Attr("value_index: int >= -2")
.Attr("vocab_size: int >= -1 = -1")
.Attr("delimiter: string = '\t'")
.Attr("offset: int = 0")
.SetShapeFn([](InferenceContext* c) {
ShapeHandle handle;
TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 0, &handle));
TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &handle));
return absl::OkStatus();
});
} | #include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
TEST(LookupOpsTest, LookupTableFindV2_ShapeFn) {
ShapeInferenceTestOp op("LookupTableFindV2");
INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[?];?;?");
TF_ASSERT_OK(NodeDefBuilder("test", "LookupTableFindV2")
.Input({"table_handle", 0, DT_RESOURCE})
.Input({"keys", 0, DT_INT64})
.Input({"default_value", 0, DT_FLOAT})
.Attr("Tin", DT_INT64)
.Attr("Tout", DT_FLOAT)
.Finalize(&op.node_def));
std::vector<std::vector<ShapeInferenceTestOp::ShapeAndType>> types;
auto set_types = [&op, &types](DataType key_type, DataType value_type) {
types.emplace_back();
auto& table = types.back();
table.emplace_back("[3]", key_type);
table.emplace_back("[4]", value_type);
op.input_resource_handle_shapes_and_types = {&table, nullptr, nullptr};
};
INFER_OK(op, "[];[?,3];[4]", "?");
set_types(DT_INT32, DT_FLOAT);
INFER_ERROR("read value with wrong dtype", op, "[];[?,3];[4]");
set_types(DT_INT64, DT_INT64);
INFER_ERROR("read value with wrong dtype", op, "[];[?,3];[4]");
set_types(DT_INT64, DT_FLOAT);
INFER_OK(op, "[];[?,3];[4]", "[d1_0,4]");
INFER_OK(op, "[];[1,3];[4]", "[d1_0,4]");
INFER_OK(op, "[];[1,?];[4]", "[d1_0,4]");
}
TEST(LookupOpsTest, LookupTableExportV2_ShapeFn) {
ShapeInferenceTestOp op("LookupTableExportV2");
TF_ASSERT_OK(NodeDefBuilder("test", "LookupTableExportV2")
.Input({"table_handle", 0, DT_RESOURCE})
.Attr("Tkeys", DT_INT64)
.Attr("Tvalues", DT_FLOAT)
.Finalize(&op.node_def));
std::vector<std::vector<ShapeInferenceTestOp::ShapeAndType>> types;
auto set_types = [&op, &types](DataType key_type, DataType value_type) {
types.emplace_back();
auto& table = types.back();
table.emplace_back("[3]", key_type);
table.emplace_back("[4]", value_type);
op.input_resource_handle_shapes_and_types = {&table};
};
set_types(DT_INT32, DT_FLOAT);
INFER_ERROR("read value with wrong dtype", op, "[]");
set_types(DT_INT64, DT_INT64);
INFER_ERROR("read value with wrong dtype", op, "[]");
set_types(DT_INT64, DT_FLOAT);
INFER_OK(op, "[]", "?;?");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/lookup_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ops/lookup_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3f724e4c-5886-46ce-b51a-9bca38c3cd16 | cpp | google/arolla | type_meta_eval_strategies | arolla/expr/operators/type_meta_eval_strategies.cc | arolla/expr/operators/type_meta_eval_strategies_test.cc | #include "arolla/expr/operators/type_meta_eval_strategies.h"
#include <algorithm>
#include <cstddef>
#include <functional>
#include <initializer_list>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/backend_wrapping_operator.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/operators/casting_registry.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/qtype/standard_type_properties/properties.h"
#include "arolla/qtype/weak_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr_operators {
using ::arolla::expr::BackendWrappingOperator;
using ::arolla::expr::ExprNodePtr;
using ::arolla::expr::ExprOperatorPtr;
using ::arolla::expr_operators::CastingRegistry;
bool IsIntegral(const QType* qtype) {
return IsIntegralScalarQType(GetScalarQTypeOrNull(qtype));
}
bool IsFloatingPoint(QTypePtr qtype) {
return IsFloatingPointScalarQType(GetScalarQTypeOrNull(qtype));
}
bool IsNumeric(const QType* qtype) {
return IsNumericScalarQType(GetScalarQTypeOrNull(qtype));
}
bool IsBoolean(QTypePtr qtype) {
return GetScalarQTypeOrNull(qtype) == GetQType<bool>();
}
bool IsString(QTypePtr qtype) {
ASSIGN_OR_RETURN(qtype, GetScalarQType(qtype), false);
return qtype == GetQType<Bytes>() || qtype == GetQType<Text>();
}
bool IsText(QTypePtr qtype) {
return GetScalarQTypeOrNull(qtype) == GetQType<Text>();
}
namespace {
absl::Status InvalidArgTypeError(absl::Span<const QTypePtr> qtypes, int index,
absl::string_view msg) {
absl::string_view name =
qtypes[index] == nullptr ? "null" : qtypes[index]->name();
return absl::InvalidArgumentError(absl::StrFormat(
"expected all arguments to %s, but got %s for %i-th argument", msg, name,
index));
}
}
namespace type_meta {
Strategy ArgCount(int n) {
return [n](absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
if (types.size() != n) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected to have %d arguments, got %d", n, types.size()));
}
return QTypes(types.begin(), types.end());
};
}
absl::StatusOr<QTypePtr> ApplyStrategy(const Strategy& strategy,
absl::Span<const QTypePtr> qtypes) {
if (std::find(qtypes.begin(), qtypes.end(), nullptr) != qtypes.end()) {
return nullptr;
}
ASSIGN_OR_RETURN(auto result, strategy(qtypes));
if (result.size() != 1) {
return absl::FailedPreconditionError(absl::StrFormat(
"unexpected number of resulting qtypes from MetaEval strategy: "
"expected 1, got %d; probably the strategy is incorrect",
result.size()));
}
return result[0];
}
BackendWrappingOperator::TypeMetaEvalStrategy CallableStrategy(
type_meta::Strategy strategy) {
return [strategy = std::move(strategy)](absl::Span<const QTypePtr> ts) {
return type_meta::ApplyStrategy(strategy, ts);
};
}
template <>
Strategy Chain(absl::Span<const Strategy> strategies) {
return [strategies_ =
std::vector<Strategy>(strategies.begin(), strategies.end())](
absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
QTypes result(types.begin(), types.end());
for (const auto& s : strategies_) {
ASSIGN_OR_RETURN(result, s(result));
}
return result;
};
}
template <>
Strategy Or(absl::Span<const Strategy> strategies) {
return [strategies_ =
std::vector<Strategy>{strategies.begin(), strategies.end()}](
absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
std::vector<std::string> errors;
for (const auto& s : strategies_) {
auto result = s(types);
if (result.ok()) {
return result;
}
errors.push_back(result.status().ToString());
}
return absl::InvalidArgumentError(
absl::StrFormat("none of meta eval strategies matches types %s: %s",
FormatTypeVector(types), absl::StrJoin(errors, "; ")));
};
}
namespace {
absl::StatusOr<QTypes> AllTypesAre(
absl::Span<const QTypePtr> types,
std::function<bool(QTypePtr qtype)> predicate,
absl::string_view predicate_str) {
for (size_t i = 0; i < types.size(); ++i) {
if (!predicate(types[i])) {
return InvalidArgTypeError(types, i,
absl::StrFormat("be %s", predicate_str));
}
}
return QTypes(types.begin(), types.end());
}
}
absl::StatusOr<QTypes> AllSame(absl::Span<const QTypePtr> types) {
if (types.empty()) return QTypes{};
for (size_t i = 1; i < types.size(); ++i) {
if (types[i] != types[0]) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("expected all types to be equal, got %s and %s",
types[0]->name(), types[i]->name()));
}
}
return QTypes{types.begin(), types.end()};
}
absl::StatusOr<QTypes> AllSameScalarType(absl::Span<const QTypePtr> types) {
if (types.empty()) return QTypes{};
ASSIGN_OR_RETURN(auto qtype_0, GetScalarQType(types[0]));
for (size_t i = 1; i < types.size(); ++i) {
ASSIGN_OR_RETURN(auto qtype, GetScalarQType(types[i]));
if (qtype != qtype_0) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat(
"expected all scalar types to be equal, got %s and %s",
types[0]->name(), types[i]->name()));
}
}
return QTypes{types.begin(), types.end()};
}
absl::StatusOr<QTypes> Array(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsArrayLikeQType, "array");
}
absl::StatusOr<QTypes> Numeric(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsNumeric, "numeric");
}
absl::StatusOr<QTypes> Integral(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsIntegral, "integral");
}
absl::StatusOr<QTypes> Floating(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsFloatingPoint, "floating point");
}
absl::StatusOr<QTypes> Boolean(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsBoolean, "boolean");
}
absl::StatusOr<QTypes> String(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsString, "Text or Bytes");
}
absl::StatusOr<QTypes> Text(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsText, "Text");
}
absl::StatusOr<QTypes> Optional(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsOptionalQType, "optional");
}
absl::StatusOr<QTypes> OptionalLike(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsOptionalLikeQType, "optional");
}
absl::StatusOr<QTypes> Scalar(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsScalarQType, "scalar");
}
absl::StatusOr<QTypes> ScalarOrOptional(absl::Span<const QTypePtr> types) {
return AllTypesAre(
types, [](QTypePtr t) { return IsScalarQType(t) || IsOptionalQType(t); },
"scalar or optional scalar");
}
absl::StatusOr<QTypes> IntegralScalar(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsIntegralScalarQType, "integral");
}
absl::StatusOr<QTypes> FloatingScalar(absl::Span<const QTypePtr> types) {
return AllTypesAre(types, IsFloatingPointScalarQType, "floating point");
}
absl::StatusOr<QTypes> Unary(absl::Span<const QTypePtr> types) {
if (types.size() != 1) {
return absl::InvalidArgumentError(
absl::StrCat("expected to have one argument, got ", types.size()));
}
return QTypes(types.begin(), types.end());
}
absl::StatusOr<QTypes> Binary(absl::Span<const QTypePtr> types) {
if (types.size() != 2) {
return absl::InvalidArgumentError(
absl::StrCat("expected to have two arguments, got ", types.size()));
}
return QTypes(types.begin(), types.end());
}
absl::StatusOr<QTypes> Ternary(absl::Span<const QTypePtr> types) {
if (types.size() != 3) {
return absl::InvalidArgumentError(
absl::StrCat("expected to have three arguments, got ", types.size()));
}
return QTypes(types.begin(), types.end());
}
absl::StatusOr<QTypes> CommonType(absl::Span<const QTypePtr> types) {
const CastingRegistry* registry = CastingRegistry::GetInstance();
ASSIGN_OR_RETURN(auto common_type,
registry->CommonType(types, true));
return QTypes{common_type};
}
absl::StatusOr<QTypes> CommonFloatType(absl::Span<const QTypePtr> types) {
std::vector<QTypePtr> extended_types;
extended_types.reserve(types.size() + 1);
extended_types.assign(types.begin(), types.end());
extended_types.push_back(GetWeakFloatQType());
return CommonType(extended_types);
}
namespace {
absl::StatusOr<QTypes> TakeArguments(absl::Span<const int> index_list,
absl::Span<const QTypePtr> types) {
if (index_list.empty()) {
return QTypes{};
}
QTypes arg_types;
arg_types.reserve(index_list.size());
for (int arg : index_list) {
if (arg < 0) {
return absl::InvalidArgumentError(
absl::StrFormat("invalid argument index: %d", arg));
}
if (arg >= types.size()) {
size_t max_i = *std::max_element(index_list.begin(), index_list.end());
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("expected to have at least %d argument(s), got %d",
max_i + 1, types.size()));
}
arg_types.push_back(types[arg]);
}
return arg_types;
}
}
Strategy Nth(std::initializer_list<int> index_list) {
absl::InlinedVector<int, 8> indexes(index_list);
return [indexes](absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
return TakeArguments(indexes, types);
};
}
Strategy NthMatch(std::initializer_list<int> index_list, Strategy strategy) {
absl::InlinedVector<int, 8> indexes(index_list);
return [indexes, strategy](
absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
ASSIGN_OR_RETURN(auto arg_types, TakeArguments(indexes, types));
RETURN_IF_ERROR(strategy(arg_types).status())
<< "for arguments (" << absl::StrJoin(indexes, ", ") << ")";
return QTypes{types.begin(), types.end()};
};
}
Strategy NthMatch(int n, Strategy strategy) { return NthMatch({n}, strategy); }
Strategy NthApply(std::initializer_list<int> index_list, Strategy strategy) {
absl::InlinedVector<int, 8> indexes(index_list);
return [indexes, strategy](
absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
ASSIGN_OR_RETURN(auto arg_types, TakeArguments(indexes, types));
ASSIGN_OR_RETURN(
auto applied_args, strategy(arg_types),
_ << "for arguments (" << absl::StrJoin(indexes, ", ") << ")");
QTypes res(types.begin(), types.end());
for (int i = 0; i < indexes.size(); i++) {
res[indexes[i]] = applied_args[i];
}
return res;
};
}
Strategy NthApply(int n, Strategy strategy) { return NthApply({n}, strategy); }
Strategy FirstMatchingTypeStrategy(std::function<bool(QTypePtr)> predicate_fn,
Strategy default_fn) {
return [=](absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
if (auto it = std::find_if(types.begin(), types.end(), predicate_fn);
it != types.end()) {
return QTypes{*it};
} else {
return default_fn(types);
}
};
}
absl::StatusOr<QTypes> ToOptional(absl::Span<const QTypePtr> types) {
QTypes result(types.size(), nullptr);
for (size_t i = 0; i < types.size(); ++i) {
ASSIGN_OR_RETURN(result[i], ToOptionalLikeQType(types[i]),
_ << "in argument " << i);
}
return result;
}
absl::StatusOr<QTypes> ToTestResult(absl::Span<const QTypePtr> types) {
QTypes result(types.size(), nullptr);
for (size_t i = 0; i < types.size(); ++i) {
ASSIGN_OR_RETURN(auto opt_type, ToOptionalLikeQType(types[i]),
_ << "in argument " << i);
ASSIGN_OR_RETURN(result[i], GetPresenceQType(opt_type),
_ << "in argument " << i);
}
return result;
}
absl::StatusOr<QTypes> ToShape(absl::Span<const QTypePtr> types) {
QTypes result(types.size(), nullptr);
for (size_t i = 0; i < types.size(); ++i) {
ASSIGN_OR_RETURN(result[i], GetShapeQType(types[i]),
_ << "in argument " << i);
}
return result;
}
absl::StatusOr<QTypes> IsShape(absl::Span<const QTypePtr> qtypes) {
for (auto qtype : qtypes) {
if (!IsShapeQType(qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected all arguments to be shapes, got %s", qtype->name()));
}
}
return QTypes{qtypes.begin(), qtypes.end()};
}
absl::StatusOr<QTypes> IsArrayShape(absl::Span<const QTypePtr> qtypes) {
for (auto qtype : qtypes) {
if (!IsArrayLikeShapeQType(qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected all arguments to be array shapes, got %s", qtype->name()));
}
}
return QTypes{qtypes.begin(), qtypes.end()};
}
absl::StatusOr<QTypes> IsEdge(absl::Span<const QTypePtr> qtypes) {
for (auto qtype : qtypes) {
if (dynamic_cast<const EdgeQType*>(qtype) == nullptr) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected all arguments to be edges, got %s", qtype->name()));
}
}
return QTypes{qtypes.begin(), qtypes.end()};
}
absl::StatusOr<QTypes> IsArray(absl::Span<const QTypePtr> qtypes) {
for (auto qtype : qtypes) {
if (!IsArrayQType(qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected all arguments to be Arrays, got %s", qtype->name()));
}
}
return QTypes{qtypes.begin(), qtypes.end()};
}
absl::StatusOr<QTypes> IsDenseArray(absl::Span<const QTypePtr> qtypes) {
for (auto qtype : qtypes) {
if (!IsDenseArrayQType(qtype)) {
return absl::InvalidArgumentError(absl::StrFormat(
"expected all arguments to be DenseArrays, got %s", qtype->name()));
}
}
return QTypes{qtypes.begin(), qtypes.end()};
}
Strategy LiftResultType(QTypePtr scalar_type) {
return [scalar_type](
absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
for (auto type : types) {
if (IsArrayLikeQType(type)) {
ASSIGN_OR_RETURN(auto result_type, WithScalarQType(type, scalar_type));
return QTypes{result_type};
}
}
for (auto type : types) {
if (IsOptionalLikeQType(type)) {
ASSIGN_OR_RETURN(auto result_type, WithScalarQType(type, scalar_type));
return QTypes{result_type};
}
}
return QTypes{scalar_type};
};
}
Strategy LiftNthType(int n) {
return [n](absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
if (n >= types.size()) {
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("expected at least %d arguments, got %d", n + 1,
types.size()));
}
ASSIGN_OR_RETURN(auto scalar_type, GetScalarQType(types[n]));
return LiftResultType(scalar_type)(types);
};
}
absl::StatusOr<QTypes> Broadcast(absl::Span<const QTypePtr> qtypes) {
const auto is_scalar_like_shape_qtype = [](const ShapeQType* qtype) {
return qtype == GetQType<ScalarShape>() ||
qtype == GetQType<OptionalScalarShape>();
};
const auto combine_shape_qtypes =
[&](const ShapeQType* lhs,
const ShapeQType* rhs) -> absl::StatusOr<const ShapeQType*> {
if (lhs == rhs) {
return lhs;
}
if (is_scalar_like_shape_qtype(lhs)) {
return rhs;
} else if (is_scalar_like_shape_qtype(rhs)) {
return lhs;
}
return absl::InvalidArgumentError("unable to broadcast arguments");
};
const ShapeQType* common_shape_qtype =
static_cast<const ShapeQType*>(GetQType<ScalarShape>());
for (auto qtype : qtypes) {
ASSIGN_OR_RETURN(const ShapeQType* shape_qtype, GetShapeQType(qtype));
ASSIGN_OR_RETURN(common_shape_qtype,
combine_shape_qtypes(common_shape_qtype, shape_qtype),
_ << JoinTypeNames(qtypes));
}
if (is_scalar_like_shape_qtype(common_shape_qtype)) {
return QTypes{qtypes.begin(), qtypes.end()};
}
QTypes result;
result.reserve(qtypes.size());
for (QTypePtr qtype : qtypes) {
ASSIGN_OR_RETURN(qtype, GetScalarQType(qtype));
ASSIGN_OR_RETURN(qtype, common_shape_qtype->WithValueQType(qtype));
result.push_back(qtype);
}
return result;
}
Strategy Is(QTypePtr desired_type) {
return [desired_type](
absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
for (size_t i = 0; i < types.size(); ++i) {
if (types[i] != desired_type) {
std::string arg_msg =
types.size() == 1 ? "" : absl::StrFormat(" of argument %d", i);
return absl::Status(
absl::StatusCode::kInvalidArgument,
absl::StrFormat("expected type%s to be %s, got %s", arg_msg,
desired_type->name(), types[i]->name()));
}
}
return QTypes{types.begin(), types.end()};
};
}
Strategy IsNot(QTypePtr undesired_type) {
return [undesired_type](
absl::Span<const QTypePtr> types) -> absl::StatusOr<QTypes> {
for (size_t i = 0; i < types.size(); ++i) {
if (types[i] == undesired_type) {
std::string arg_msg =
types.size() == 1 ? "" : absl::StrFormat(" of argument %d", i);
return absl::Status(absl::StatusCode::kInvalidArgument,
absl::StrFormat("expected type%s to be not %s",
arg_msg, undesired_type->name()));
}
}
return QTypes{types.begin(), types.end()};
};
}
absl::StatusOr<QTypes> EdgeParentShapeQType(absl::Span<const QTypePtr> types) {
QTypes result(types.size(), nullptr);
for (size_t i = 0; i < types.size(); ++i) {
if (auto edge_type = dynamic_cast<const EdgeQType*>(types[i]);
edge_type != nullptr) {
result[i] = edge_type->parent_shape_qtype();
} else {
return absl::InvalidArgumentError(
absl::StrFormat("invalid argument %d: expected an edge, got %s", i,
types[i]->name()));
}
}
return result;
}
absl::StatusOr<QTypes> PresenceOrType(absl::Span<const QTypePtr> types) {
QTypes scalar_types;
for (const auto& type : types) {
ASSIGN_OR_RETURN(auto scalar_type, GetScalarQType(type));
scalar_types.push_back(scalar_type);
}
ASSIGN_OR_RETURN(auto common_scalar_type, CommonType(scalar_types));
auto* shape_type = &types[0];
for (size_t i = 1; i < types.size(); ++i) {
if (!IsOptionalLikeQType(types[i])) {
shape_type = &types[i];
break;
}
}
ASSIGN_OR_RETURN(auto result,
WithScalarQType(*shape_type, common_scalar_type[0]));
return QTypes{result};
}
}
absl::StatusOr<expr::ExprOperatorPtr> RegisterBackendOperator(
absl::string_view op_name, type_meta::Strategy strategy,
absl::string_view doc) {
return expr::RegisterBackendOperator(
op_name,
[strategy = std::move(strategy)](absl::Span<const QTypePtr> ts) {
return type_meta::ApplyStrategy(strategy, ts);
},
doc);
}
absl::StatusOr<expr::ExprOperatorPtr> RegisterBackendOperator(
absl::string_view op_name, const expr::ExprOperatorSignature& signature,
type_meta::Strategy strategy, absl::string_view doc) {
return expr::RegisterBackendOperator(
op_name, signature,
[strategy = std::move(strategy)](absl::Span<const QTypePtr> ts) {
return type_meta::ApplyStrategy(strategy, ts);
},
doc);
}
} | #include "arolla/expr/operators/type_meta_eval_strategies.h"
#include <cstdint>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/array/array.h"
#include "arolla/array/edge.h"
#include "arolla/array/qtype/types.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/edge.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/optional_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/shape_qtype.h"
#include "arolla/util/bytes.h"
#include "arolla/util/text.h"
namespace arolla::expr_operators {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::HasSubstr;
using ::arolla::expr_operators::type_meta::AllSame;
using ::arolla::expr_operators::type_meta::AllSameScalarType;
using ::arolla::expr_operators::type_meta::ArgCount;
using ::arolla::expr_operators::type_meta::Broadcast;
using ::arolla::expr_operators::type_meta::CallableStrategy;
using ::arolla::expr_operators::type_meta::FirstMatchingTypeStrategy;
using ::arolla::expr_operators::type_meta::Is;
using ::arolla::expr_operators::type_meta::IsArrayShape;
using ::arolla::expr_operators::type_meta::IsDenseArray;
using ::arolla::expr_operators::type_meta::IsEdge;
using ::arolla::expr_operators::type_meta::IsNot;
using ::arolla::expr_operators::type_meta::IsShape;
using ::arolla::expr_operators::type_meta::LiftResultType;
using ::arolla::expr_operators::type_meta::Nth;
using ::arolla::expr_operators::type_meta::NthApply;
using ::arolla::expr_operators::type_meta::NthMatch;
using ::arolla::expr_operators::type_meta::Optional;
using ::arolla::expr_operators::type_meta::OptionalLike;
using ::arolla::expr_operators::type_meta::Scalar;
using ::arolla::expr_operators::type_meta::ScalarOrOptional;
using ::arolla::expr_operators::type_meta::ScalarTypeIs;
using ::arolla::expr_operators::type_meta::ToOptional;
using ::arolla::expr_operators::type_meta::ToShape;
using ::arolla::expr_operators::type_meta::Unary;
TEST(TypeMetaEvalStrategiesTest, ArgCount) {
std::vector<QTypePtr> i32_types = {GetQType<int32_t>(), GetQType<int32_t>(),
GetQType<int32_t>()};
std::vector<QTypePtr> empty = {};
EXPECT_THAT(ArgCount(3)(i32_types),
IsOkAndHolds(ElementsAreArray(i32_types)));
EXPECT_THAT(ArgCount(1)(empty),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected to have 1 arguments, got 0")));
EXPECT_THAT(ArgCount(0)(i32_types),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected to have 0 arguments, got 3")));
}
TEST(TypeMetaEvalStrategiesTest, NthSingleArg) {
auto second_type = CallableStrategy(Nth(1));
EXPECT_THAT(second_type({GetQType<int32_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(
second_type({}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected to have at least 2 argument(s), got 0")));
}
TEST(TypeMetaEvalStrategiesTest, NthMultipleArgs) {
auto i32 = GetQType<int32_t>();
auto oi32 = GetQType<OptionalValue<int32_t>>();
auto f32 = GetQType<float>();
auto of32 = GetQType<OptionalValue<float>>();
std::vector<QTypePtr> types = {i32, oi32, f32, of32};
EXPECT_THAT((Nth({0, 2})(types)), IsOkAndHolds(ElementsAre(i32, f32)));
EXPECT_THAT((Nth({1, 3})(types)), IsOkAndHolds(ElementsAre(oi32, of32)));
EXPECT_THAT((Nth({0, 2, 4})(types)),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected to have at least 5 argument(s), got 4"));
}
TEST(TypeMetaEvalStrategiesTest, Scalar) {
EXPECT_THAT(
Scalar({GetQType<int32_t>(), GetQType<float>()}),
IsOkAndHolds(ElementsAre(GetQType<int32_t>(), GetQType<float>())));
EXPECT_THAT(
Scalar({GetQType<int32_t>(), GetOptionalQType<int32_t>()}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"expected all arguments to be scalar, but got OPTIONAL_INT32")));
EXPECT_THAT(Scalar({GetQType<int32_t>(), GetDenseArrayQType<int32_t>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected all arguments to be scalar, but got "
"DENSE_ARRAY_INT32")));
}
TEST(TypeMetaEvalStrategiesTest, Optional) {
EXPECT_THAT(
Optional({GetOptionalQType<int32_t>(), GetOptionalQType<float>()}),
IsOkAndHolds(
ElementsAre(GetOptionalQType<int32_t>(), GetOptionalQType<float>())));
EXPECT_THAT(
Optional({GetOptionalQType<int32_t>(), GetQType<int32_t>()}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("expected all arguments to be optional, but got INT32")));
EXPECT_THAT(
Optional({GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected all arguments to be optional, but got "
"DENSE_ARRAY_INT32")));
}
TEST(TypeMetaEvalStrategiesTest, ScalarOrOptional) {
EXPECT_THAT(
ScalarOrOptional({GetOptionalQType<int32_t>(), GetQType<float>()}),
IsOkAndHolds(
ElementsAre(GetOptionalQType<int32_t>(), GetQType<float>())));
EXPECT_THAT(
ScalarOrOptional(
{GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>()}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"expected all arguments to be scalar or optional scalar, but got "
"DENSE_ARRAY_INT32")));
}
TEST(TypeMetaEvalStrategiesTest, OptionalLike) {
EXPECT_THAT(OptionalLike(
{GetOptionalQType<int32_t>(), GetDenseArrayQType<int32_t>()}),
IsOkAndHolds(ElementsAre(GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>())));
EXPECT_THAT(
OptionalLike({GetOptionalQType<int32_t>(), GetQType<int32_t>()}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("expected all arguments to be optional, but got INT32")));
EXPECT_THAT(
OptionalLike({GetOptionalQType<int32_t>(), GetQType<DenseArrayEdge>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected all arguments to be optional, but got "
"DENSE_ARRAY_EDGE")));
}
TEST(TypeMetaEvalStrategiesTest, FirstMatchingTypeStrategy) {
auto first_numeric =
CallableStrategy(FirstMatchingTypeStrategy(IsNumeric, Nth(0)));
EXPECT_THAT(first_numeric({GetQType<int32_t>(), GetQType<int64_t>()}),
IsOkAndHolds(GetQType<int32_t>()));
EXPECT_THAT(first_numeric({GetQType<Text>(), GetQType<int64_t>()}),
IsOkAndHolds(GetQType<int64_t>()));
EXPECT_THAT(first_numeric({GetQType<int32_t>(), GetQType<Text>()}),
IsOkAndHolds(GetQType<int32_t>()));
EXPECT_THAT(first_numeric({GetQType<Text>(), GetQType<Text>()}),
IsOkAndHolds(GetQType<Text>()));
EXPECT_THAT(
first_numeric({}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected to have at least 1 argument(s), got 0")));
}
TEST(TypeMetaEvalStrategiesTest, IsNumeric) {
EXPECT_TRUE(IsNumeric(GetQType<int32_t>()));
EXPECT_TRUE(IsNumeric(GetQType<float>()));
EXPECT_TRUE(IsNumeric(GetOptionalQType<float>()));
EXPECT_TRUE(IsNumeric(GetArrayQType<float>()));
EXPECT_TRUE(IsNumeric(GetDenseArrayQType<float>()));
EXPECT_FALSE(IsNumeric(GetQType<bool>()));
EXPECT_FALSE(IsNumeric(GetQType<Bytes>()));
EXPECT_FALSE(IsNumeric(GetArrayQType<bool>()));
EXPECT_FALSE(IsNumeric(GetDenseArrayQType<bool>()));
EXPECT_FALSE(IsNumeric(GetOptionalQType<bool>()));
}
TEST(TypeMetaEvalStrategiesTest, NthMatch) {
std::vector<QTypePtr> i32_types = {GetQType<int32_t>(), GetQType<int32_t>(),
GetQType<int32_t>()};
std::vector<QTypePtr> i32_type = {GetQType<int32_t>()};
std::vector<QTypePtr> i32_types_len_2 = {GetQType<int32_t>(),
GetQType<int32_t>()};
EXPECT_THAT((NthMatch(1, Is<int32_t>)(i32_types)),
IsOkAndHolds(ElementsAreArray(i32_types)));
EXPECT_THAT(
(NthMatch(1, Is<int64_t>)(i32_types)),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected type to be INT64, got INT32; for arguments (1)"));
EXPECT_THAT((NthMatch(1, Is<int64_t>)(i32_type)),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected to have at least 2 argument(s), got 1"));
EXPECT_THAT((NthMatch(2, Is<int32_t>)(i32_types)),
IsOkAndHolds(ElementsAreArray(i32_types)));
EXPECT_THAT(
(NthMatch(2, Is<int64_t>)(i32_types)),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected type to be INT64, got INT32; for arguments (2)"));
EXPECT_THAT((NthMatch(2, Is<int64_t>)(i32_types_len_2)),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected to have at least 3 argument(s), got 2"));
std::vector<QTypePtr> types1 = {GetQType<int32_t>(), GetQType<int32_t>(),
GetQType<OptionalValue<int32_t>>(),
GetQType<OptionalValue<int32_t>>(),
GetQType<float>()};
EXPECT_THAT((NthMatch({0, 1}, AllSame)(types1)),
IsOkAndHolds(ElementsAreArray(types1)));
EXPECT_THAT((NthMatch({2, 3}, AllSame)(types1)),
IsOkAndHolds(ElementsAreArray(types1)));
EXPECT_THAT((NthMatch({0, 1, 2, 3}, AllSameScalarType)(types1)),
IsOkAndHolds(ElementsAreArray(types1)));
EXPECT_THAT((NthMatch({0, 2}, AllSame)(types1)),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected all types to be equal, got INT32 and "
"OPTIONAL_INT32; for arguments (0, 2)"));
EXPECT_THAT((NthMatch({0, 2}, AllSame)({GetQType<int32_t>()})),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected to have at least 3 argument(s), got 1"));
}
TEST(TypeMetaEvalStrategiesTest, NthApply) {
std::vector<QTypePtr> types = {GetQType<int32_t>(),
GetDenseArrayQType<int32_t>(),
GetArrayQType<int32_t>()};
{
std::vector<QTypePtr> res_types = {GetDenseArrayQType<int32_t>(),
GetDenseArrayQType<int32_t>(),
GetArrayQType<int32_t>()};
EXPECT_THAT(NthApply({0, 1}, Broadcast)(types),
IsOkAndHolds(ElementsAreArray(res_types)));
}
{
std::vector<QTypePtr> res_types = {GetArrayQType<int32_t>(),
GetDenseArrayQType<int32_t>(),
GetArrayQType<int32_t>()};
EXPECT_THAT(NthApply({0, 2}, Broadcast)(types),
IsOkAndHolds(ElementsAreArray(res_types)));
}
{
std::vector<QTypePtr> res_types = {GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>(),
GetArrayQType<int32_t>()};
EXPECT_THAT(NthApply(0, ToOptional)(types),
IsOkAndHolds(ElementsAreArray(res_types)));
}
EXPECT_THAT(NthApply({1, 2}, Broadcast)(types),
StatusIs(absl::StatusCode::kInvalidArgument,
"unable to broadcast arguments; "
"DENSE_ARRAY_INT32,ARRAY_INT32; for arguments (1, 2)"));
EXPECT_THAT(NthApply({2, 3}, Broadcast)(types),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected to have at least 4 argument(s), got 3"));
}
TEST(TypeMetaEvalStrategiesTest, LiftResultType) {
auto i32 = GetQType<int32_t>();
auto f32 = GetQType<float>();
auto oi32 = GetOptionalQType<int32_t>();
auto of32 = GetOptionalQType<float>();
auto ai32 = GetArrayQType<int32_t>();
auto af32 = GetArrayQType<float>();
auto lift_f32 = CallableStrategy(LiftResultType(f32));
EXPECT_THAT(lift_f32({}), IsOkAndHolds(f32));
EXPECT_THAT(lift_f32({i32}), IsOkAndHolds(f32));
EXPECT_THAT(lift_f32({i32, f32}), IsOkAndHolds(f32));
EXPECT_THAT(lift_f32({oi32}), IsOkAndHolds(of32));
EXPECT_THAT(lift_f32({i32, oi32}), IsOkAndHolds(of32));
EXPECT_THAT(lift_f32({ai32}), IsOkAndHolds(af32));
EXPECT_THAT(lift_f32({oi32, ai32}), IsOkAndHolds(af32));
EXPECT_THAT(lift_f32({i32, oi32, ai32}), IsOkAndHolds(af32));
}
TEST(TypeMetaEvalStrategiesTest, Broadcast) {
auto i32 = GetQType<int32_t>();
auto f32 = GetQType<float>();
auto oi32 = GetOptionalQType<int32_t>();
auto ai32 = GetArrayQType<int32_t>();
auto af32 = GetArrayQType<float>();
auto di32 = GetDenseArrayQType<int32_t>();
auto df32 = GetDenseArrayQType<float>();
EXPECT_THAT(Broadcast({}), IsOkAndHolds(ElementsAre()));
EXPECT_THAT(Broadcast({i32}), IsOkAndHolds(ElementsAre(i32)));
EXPECT_THAT(Broadcast({i32, f32}), IsOkAndHolds(ElementsAre(i32, f32)));
EXPECT_THAT(Broadcast({i32, oi32}), IsOkAndHolds(ElementsAre(i32, oi32)));
EXPECT_THAT(Broadcast({ai32}), IsOkAndHolds(ElementsAre(ai32)));
EXPECT_THAT(Broadcast({ai32, f32}), IsOkAndHolds(ElementsAre(ai32, af32)));
EXPECT_THAT(Broadcast({i32, oi32, af32}),
IsOkAndHolds(ElementsAre(ai32, ai32, af32)));
EXPECT_THAT(Broadcast({i32, oi32, af32, ai32}),
IsOkAndHolds(ElementsAre(ai32, ai32, af32, ai32)));
EXPECT_THAT(
Broadcast({df32, GetQType<int32_t>(), GetOptionalQType<int32_t>()}),
IsOkAndHolds(ElementsAre(df32, di32, di32)));
EXPECT_THAT(Broadcast({af32, df32}),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(TypeMetaEvalStrategiesTest, Is) {
std::vector<QTypePtr> i32_types = {GetQType<int32_t>(), GetQType<int32_t>()};
EXPECT_THAT(Is<int32_t>(i32_types),
IsOkAndHolds(ElementsAreArray(i32_types)));
EXPECT_THAT(Is(GetQType<int32_t>())(i32_types),
IsOkAndHolds(ElementsAreArray(i32_types)));
EXPECT_THAT(Is<int64_t>(i32_types),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected type of argument 0 to be INT64, got INT32"));
EXPECT_THAT(Is(GetQType<int64_t>())(i32_types),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected type of argument 0 to be INT64, got INT32"));
}
TEST(TypeMetaEvalStrategiesTest, IsNot) {
std::vector<QTypePtr> i32_types = {GetQType<int32_t>(), GetQType<int32_t>()};
EXPECT_THAT(IsNot<int64_t>(i32_types),
IsOkAndHolds(ElementsAreArray(i32_types)));
EXPECT_THAT(IsNot(GetQType<int64_t>())(i32_types),
IsOkAndHolds(ElementsAreArray(i32_types)));
EXPECT_THAT(IsNot<int32_t>(i32_types),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected type of argument 0 to be not INT32"));
EXPECT_THAT(IsNot(GetQType<int32_t>())(i32_types),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected type of argument 0 to be not INT32"));
}
TEST(TypeMetaEvalStrategiesTest, ScalarTypeIs) {
std::vector<QTypePtr> i32_types = {
GetQType<int32_t>(), GetOptionalQType<int32_t>(),
GetDenseArrayQType<int32_t>(), GetDenseArrayQType<int32_t>(),
GetArrayQType<int32_t>()};
EXPECT_THAT(ScalarTypeIs<int32_t>(i32_types),
IsOkAndHolds(ElementsAreArray(i32_types)));
EXPECT_THAT(
ScalarTypeIs<int64_t>(i32_types),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected scalar type of argument 0 to be INT64, got INT32"));
}
TEST(TypeMetaEvalStrategiesTest, Unary) {
auto single_arg_type = CallableStrategy(Unary);
EXPECT_THAT(single_arg_type({GetQType<int32_t>()}),
IsOkAndHolds(GetQType<int32_t>()));
EXPECT_THAT(single_arg_type({GetQType<int32_t>(), GetQType<int32_t>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected to have one argument")));
}
TEST(TypeMetaEvalStrategiesTest, ToShape) {
auto shape_type = CallableStrategy(ToShape);
EXPECT_THAT(shape_type({GetQType<int32_t>()}),
IsOkAndHolds(GetQType<ScalarShape>()));
EXPECT_THAT(shape_type({GetArrayQType<bool>()}),
IsOkAndHolds(GetQType<ArrayShape>()));
EXPECT_THAT(shape_type({GetDenseArrayQType<bool>()}),
IsOkAndHolds(GetQType<DenseArrayShape>()));
EXPECT_THAT(shape_type({GetQType<OptionalValue<bool>>()}),
IsOkAndHolds(GetQType<OptionalScalarShape>()));
EXPECT_THAT(shape_type({GetQType<OptionalScalarShape>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("no shape type for")));
}
TEST(TypeMetaEvalStrategiesTest, ToOptional) {
auto to_optional = CallableStrategy(ToOptional);
EXPECT_THAT(to_optional({GetArrayQType<int32_t>()}),
IsOkAndHolds(GetArrayQType<int32_t>()));
EXPECT_THAT(
to_optional({GetQType<ArrayEdge>()}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("no optional-like qtype for ARRAY_EDGE; in argument 0")));
}
TEST(TypeMetaEvalStrategiesTest, AllSame) {
EXPECT_THAT(AllSame({GetArrayQType<int32_t>(), GetArrayQType<int32_t>()}),
IsOkAndHolds(ElementsAreArray(
{GetArrayQType<int32_t>(), GetArrayQType<int32_t>()})));
EXPECT_THAT(AllSame({}), IsOkAndHolds(ElementsAre()));
EXPECT_THAT(AllSame({GetArrayQType<int32_t>(), GetArrayQType<int64_t>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected all types to be equal, got "
"ARRAY_INT32 and ARRAY_INT64")));
}
TEST(TypeMetaEvalStrategiesTest, AllSameScalarType) {
EXPECT_THAT(AllSameScalarType(
{GetQType<int32_t>(), GetQType<OptionalValue<int32_t>>()}),
IsOkAndHolds(ElementsAre(GetQType<int32_t>(),
GetQType<OptionalValue<int32_t>>())));
EXPECT_THAT(AllSameScalarType({}), IsOkAndHolds(ElementsAre()));
EXPECT_THAT(AllSameScalarType({GetQType<int32_t>(), GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected all scalar types to be equal, got INT32 and "
"FLOAT32"));
}
TEST(TypeMetaEvalStrategiesTest, IsShape) {
auto shape_qtypes = {GetQType<ScalarShape>(), GetQType<ArrayShape>()};
auto non_shape_qtypes = {GetQType<OptionalScalarShape>(),
GetQType<int32_t>()};
EXPECT_THAT(IsShape(shape_qtypes),
IsOkAndHolds(ElementsAreArray(shape_qtypes)));
EXPECT_THAT(
IsShape(non_shape_qtypes),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected all arguments to be shapes, got INT32")));
}
TEST(TypeMetaEvalStrategiesTest, IsArrayShape) {
auto shape_qtypes = {GetQType<ArrayShape>(), GetQType<DenseArrayShape>()};
auto non_shape_qtypes = {GetQType<ArrayShape>(), GetQType<ScalarShape>()};
EXPECT_THAT(IsArrayShape(shape_qtypes),
IsOkAndHolds(ElementsAreArray(shape_qtypes)));
EXPECT_THAT(
IsArrayShape(non_shape_qtypes),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"expected all arguments to be array shapes, got SCALAR_SHAPE")));
}
TEST(TypeMetaEvalStrategiesTest, IsEdge) {
auto edge_qtypes = {GetQType<ArrayEdge>(), GetQType<DenseArrayEdge>()};
EXPECT_THAT(IsEdge(edge_qtypes), IsOkAndHolds(ElementsAreArray(edge_qtypes)));
EXPECT_THAT(
IsEdge({GetQType<ArrayEdge>(), GetQType<int32_t>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("expected all arguments to be edges, got INT32")));
}
TEST(TypeMetaEvalStrategiesTest, IsDenseArray) {
auto da_qtypes = {GetDenseArrayQType<int64_t>(), GetDenseArrayQType<float>()};
EXPECT_THAT(IsDenseArray(da_qtypes),
IsOkAndHolds(ElementsAreArray(da_qtypes)));
EXPECT_THAT(
IsDenseArray({GetArrayQType<int64_t>(), GetDenseArrayQType<int64_t>()}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr(
"expected all arguments to be DenseArrays, got ARRAY_INT64")));
}
TEST(TypeMetaEvalStrategiesTest, EdgeParentShapeQType) {
auto edge_qtypes = {GetQType<ArrayEdge>(), GetQType<DenseArrayEdge>(),
GetQType<ArrayGroupScalarEdge>(),
GetQType<DenseArrayGroupScalarEdge>()};
auto shape_qtypes = {GetQType<ArrayShape>(), GetQType<DenseArrayShape>(),
GetQType<OptionalScalarShape>(),
GetQType<OptionalScalarShape>()};
EXPECT_THAT(type_meta::EdgeParentShapeQType(edge_qtypes),
IsOkAndHolds(ElementsAreArray(shape_qtypes)));
EXPECT_THAT(
type_meta::EdgeParentShapeQType({GetArrayQType<int64_t>()}),
StatusIs(
absl::StatusCode::kInvalidArgument,
HasSubstr("invalid argument 0: expected an edge, got ARRAY_INT64")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/type_meta_eval_strategies.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operators/type_meta_eval_strategies_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
6a8fcf3d-0f76-41b8-a604-cc75abf82d40 | cpp | google/arolla | compile_while_operator | arolla/expr/eval/compile_while_operator.cc | arolla/expr/eval/compile_while_operator_test.cc | #include "arolla/expr/eval/compile_while_operator.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/evaluator_operators.h"
#include "arolla/expr/eval/executable_builder.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/operators/while_loop/while_loop.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/optional_value.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qexpr/evaluation_engine.h"
#include "arolla/qexpr/operators.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
namespace {
struct BoundLoopOperators {
std::shared_ptr<const BoundExpr> condition;
std::shared_ptr<const BoundExpr> body;
};
class WhileLoopBoundOperator : public BoundOperator {
public:
WhileLoopBoundOperator(BoundLoopOperators operators_on_out,
BoundLoopOperators operators_on_tmp,
FrameLayout::Slot<OptionalUnit> condition_slot,
TypedSlot initial_state_slot, TypedSlot tmp_state_slot,
TypedSlot output_state_slot)
: operators_on_out_(std::move(operators_on_out)),
operators_on_tmp_(std::move(operators_on_tmp)),
condition_slot_(condition_slot),
initial_state_slot_(initial_state_slot),
tmp_state_slot_(tmp_state_slot),
output_state_slot_(output_state_slot) {}
void Run(EvaluationContext* ctx, FramePtr frame) const override {
initial_state_slot_.CopyTo(frame, output_state_slot_, frame);
for (;;) {
operators_on_out_.condition->Execute(ctx, frame);
if (!ctx->status().ok() || !frame.Get(condition_slot_)) {
break;
}
operators_on_out_.body->Execute(ctx, frame);
if (!ctx->status().ok()) {
break;
}
operators_on_tmp_.condition->Execute(ctx, frame);
if (!ctx->status().ok() || !frame.Get(condition_slot_)) {
tmp_state_slot_.CopyTo(frame, output_state_slot_, frame);
break;
}
operators_on_tmp_.body->Execute(ctx, frame);
if (!ctx->status().ok()) {
break;
}
}
}
private:
BoundLoopOperators operators_on_out_;
BoundLoopOperators operators_on_tmp_;
FrameLayout::Slot<OptionalUnit> condition_slot_;
TypedSlot initial_state_slot_;
TypedSlot tmp_state_slot_;
TypedSlot output_state_slot_;
};
absl::StatusOr<std::shared_ptr<BoundExpr>> CompileAndBindExprOperator(
const DynamicEvaluationEngineOptions& options, const ExprOperatorPtr& op,
absl::Span<const TypedSlot> input_slots,
std::optional<TypedSlot> output_slot,
ExecutableBuilder& executable_builder) {
ASSIGN_OR_RETURN(
auto evaluator,
CompileAndBindExprOperator(options, executable_builder.layout_builder(),
op, input_slots, output_slot),
_ << "in loop condition");
executable_builder.AddInitOp(
std::make_unique<InitializeAstLiteralsBoundOperator>(evaluator),
"internal.while_loop:initialize_literals()");
return evaluator;
}
absl::StatusOr<BoundLoopOperators> BindLoopOperators(
const DynamicEvaluationEngineOptions& options,
const expr_operators::WhileLoopOperator& while_op,
absl::Span<const TypedSlot> constant_slots, TypedSlot current_state_slot,
TypedSlot next_state_slot, FrameLayout::Slot<OptionalUnit> condition_slot,
ExecutableBuilder& executable_builder) {
std::vector<TypedSlot> input_slots;
input_slots.reserve(1 + constant_slots.size());
input_slots.push_back(current_state_slot);
input_slots.insert(input_slots.end(), constant_slots.begin(),
constant_slots.end());
ASSIGN_OR_RETURN(auto condition_on_out_op,
CompileAndBindExprOperator(
options, while_op.condition(), input_slots,
TypedSlot::FromSlot(condition_slot), executable_builder),
_ << "in loop condition");
ASSIGN_OR_RETURN(
auto body_out_to_tmp_op,
CompileAndBindExprOperator(options, while_op.body(), input_slots,
next_state_slot, executable_builder),
_ << "in loop body");
return BoundLoopOperators{std::move(condition_on_out_op),
std::move(body_out_to_tmp_op)};
}
}
absl::Status CompileWhileOperator(
const DynamicEvaluationEngineOptions& options,
const expr_operators::WhileLoopOperator& while_op,
absl::Span<const TypedSlot> input_slots, TypedSlot output_slot,
ExecutableBuilder& executable_builder) {
if (input_slots.empty()) {
return absl::InvalidArgumentError(
"unexpected number of input slots: expected at least 1 slot, got 0");
}
TypedSlot initial_state_slot = input_slots[0];
if (output_slot.GetType() != initial_state_slot.GetType()) {
return absl::InvalidArgumentError(absl::StrFormat(
"unexpected type of output slot: expected %s slot, got %s",
initial_state_slot.GetType()->name(), output_slot.GetType()->name()));
}
FrameLayout::Slot<OptionalUnit> condition_slot =
executable_builder.layout_builder()->AddSlot<OptionalUnit>();
TypedSlot tmp_state_slot =
AddSlot(output_slot.GetType(), executable_builder.layout_builder());
DynamicEvaluationEngineOptions subexpression_options(options);
subexpression_options.enabled_preparation_stages =
DynamicEvaluationEngineOptions::PreparationStage::kAll;
ASSIGN_OR_RETURN(auto operators_on_out,
BindLoopOperators(subexpression_options, while_op,
input_slots.subspan(1),
output_slot,
tmp_state_slot,
condition_slot, executable_builder));
ASSIGN_OR_RETURN(auto operators_on_tmp,
BindLoopOperators(subexpression_options, while_op,
input_slots.subspan(1),
tmp_state_slot,
output_slot,
condition_slot, executable_builder));
std::vector<TypedSlot> used_slots(input_slots.begin(), input_slots.end());
used_slots.push_back(tmp_state_slot);
used_slots.push_back(TypedSlot::FromSlot(condition_slot));
executable_builder.AddEvalOp(
std::make_unique<WhileLoopBoundOperator>(
std::move(operators_on_out), std::move(operators_on_tmp),
condition_slot, initial_state_slot, tmp_state_slot, output_slot),
eval_internal::FormatOperatorCall("internal.while_loop", input_slots,
{output_slot}),
"internal.while_loop");
return absl::OkStatus();
}
} | #include <cstddef>
#include <cstdint>
#include <utility>
#include "benchmark/benchmark.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/expr/eval/eval.h"
#include "arolla/expr/eval/invoke.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/operators/while_loop/while_loop.h"
#include "arolla/expr/visitors/substitution.h"
#include "arolla/memory/frame.h"
#include "arolla/qexpr/eval_context.h"
#include "arolla/qtype/testing/qtype.h"
#include "arolla/qtype/typed_slot.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/init_arolla.h"
#include "arolla/util/text.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla::expr::eval_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::arolla::testing::TypedValueWith;
using ::testing::ElementsAre;
using ::testing::Eq;
class WhileOperatorTest
: public ::testing::TestWithParam<DynamicEvaluationEngineOptions> {
protected:
DynamicEvaluationEngineOptions GetOptions() const { return GetParam(); }
};
INSTANTIATE_TEST_SUITE_P(
GarbageCollection, WhileOperatorTest,
::testing::Values(
DynamicEvaluationEngineOptions{.allow_overriding_input_slots = false},
DynamicEvaluationEngineOptions{.allow_overriding_input_slots = true}));
TEST_P(WhileOperatorTest, SimpleWhile) {
auto init_x = Leaf("x");
auto init_y = Leaf("y");
ASSERT_OK_AND_ASSIGN(
auto loop_condition,
CallOp("core.not_equal", {Placeholder("y"), Literal<int64_t>(0)}));
auto new_x = Placeholder("y");
ASSERT_OK_AND_ASSIGN(
auto new_y, CallOp("math.mod", {Placeholder("x"), Placeholder("y")}));
ASSERT_OK_AND_ASSIGN(ExprNodePtr while_loop,
expr_operators::MakeWhileLoop(
{{"x", init_x}, {"y", init_y}}, loop_condition,
{{"x", new_x}, {"y", new_y}}));
ASSERT_OK_AND_ASSIGN(
ExprNodePtr gcd,
CallOp("namedtuple.get_field", {while_loop, Literal(Text("x"))}));
EXPECT_THAT(Invoke(gcd,
{{"x", TypedValue::FromValue<int64_t>(57)},
{"y", TypedValue::FromValue<int64_t>(58)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int64_t>(Eq(1))));
EXPECT_THAT(Invoke(gcd,
{{"x", TypedValue::FromValue<int64_t>(171)},
{"y", TypedValue::FromValue<int64_t>(285)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int64_t>(Eq(57))));
}
absl::StatusOr<ExprNodePtr> SumOfXs(int64_t number_of_xs) {
auto init_n = Literal<int64_t>(1);
auto init_x = Leaf("x");
auto init_accumulator = Leaf("x");
ASSIGN_OR_RETURN(auto loop_condition,
CallOp("core.not_equal",
{Placeholder("n"), Literal<int64_t>(number_of_xs)}));
ASSIGN_OR_RETURN(auto new_n,
CallOp("math.add", {Placeholder("n"), Literal<int64_t>(1)}));
ASSIGN_OR_RETURN(
auto new_accumulator,
CallOp("math.add", {Placeholder("accumulator"), Placeholder("x")}));
return CallOp(
"namedtuple.get_field",
{expr_operators::MakeWhileLoop(
{{"n", init_n}, {"x", init_x}, {"accumulator", init_accumulator}},
loop_condition, {{"n", new_n}, {"accumulator", new_accumulator}}),
Literal(Text("accumulator"))});
}
TEST_P(WhileOperatorTest, LoopWithDifferentNumberOfIterations) {
for (int64_t iterations = 0; iterations < 10; ++iterations) {
ASSERT_OK_AND_ASSIGN(ExprNodePtr sum, SumOfXs(iterations + 1));
EXPECT_THAT(
Invoke(sum, {{"x", TypedValue::FromValue<int64_t>(57)}}, GetOptions()),
IsOkAndHolds(TypedValueWith<int64_t>(57 * (iterations + 1))));
}
}
TEST_P(WhileOperatorTest, LoopWithDenseArray) {
ASSERT_OK_AND_ASSIGN(ExprNodePtr sum_of_1000, SumOfXs(1000));
EXPECT_THAT(Invoke(sum_of_1000, {{"x", TypedValue::FromValue<int64_t>(1)}},
GetOptions()),
IsOkAndHolds(TypedValueWith<int64_t>(1000)));
EXPECT_THAT(
Invoke(
sum_of_1000,
{{"x", TypedValue::FromValue(CreateDenseArray<int64_t>({0, 1, 2}))}},
GetOptions()),
IsOkAndHolds(
TypedValueWith<DenseArray<int64_t>>(ElementsAre(0, 1000, 2000))));
auto init_x = Leaf("x");
ASSERT_OK_AND_ASSIGN(
ExprNodePtr sum_of_1000_1000,
SubstituteByFingerprint(sum_of_1000,
{{init_x->fingerprint(), sum_of_1000}}));
EXPECT_THAT(
Invoke(
sum_of_1000_1000,
{{"x", TypedValue::FromValue(CreateDenseArray<int64_t>({0, 1, 2}))}},
GetOptions()),
IsOkAndHolds(TypedValueWith<DenseArray<int64_t>>(
ElementsAre(0, 1000000, 2000000))));
}
template <typename T>
void BM_WhileOperator(benchmark::State& state, T initial_value) {
InitArolla();
auto sum_of_1000_x = SumOfXs(1000).value();
FrameLayout::Builder builder;
auto x_slot = builder.AddSlot<T>();
auto sum_of_1000_x_expr =
CompileAndBindForDynamicEvaluation(DynamicEvaluationEngineOptions(),
&builder, sum_of_1000_x,
{{"x", TypedSlot::FromSlot(x_slot)}})
.value();
FrameLayout layout = std::move(builder).Build();
RootEvaluationContext ctx(&layout);
CHECK_OK(sum_of_1000_x_expr->InitializeLiterals(&ctx));
for (auto _ : state) {
CHECK_OK(sum_of_1000_x_expr->Execute(&ctx));
}
}
void BM_WhileOperator_Scalar(benchmark::State& state) {
BM_WhileOperator(state, int64_t{57});
}
BENCHMARK(BM_WhileOperator_Scalar);
void BM_WhileOperator_DenseArray(benchmark::State& state) {
constexpr size_t kArraySize = 100;
BM_WhileOperator(state, CreateConstDenseArray<int64_t>(kArraySize, 57));
}
BENCHMARK(BM_WhileOperator_DenseArray);
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/compile_while_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/eval/compile_while_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
e8706a30-8422-4dae-a5da-72cc6621a425 | cpp | tensorflow/tensorflow | trt_engine_resource_ops | tensorflow/compiler/tf2tensorrt/ops/trt_engine_resource_ops.cc | tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor_shape.h"
namespace tensorflow {
REGISTER_OP("CreateTRTResourceHandle")
.Attr("resource_name: string")
.Output("resource_handle: resource")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);
REGISTER_OP("InitializeTRTResource")
.Attr("max_cached_engines_count: int = 1")
.Input("resource_handle: resource")
.Input("filename: string")
.SetIsStateful()
.SetShapeFn(shape_inference::NoOutputs);
REGISTER_OP("SerializeTRTResource")
.Attr("delete_resource: bool = false")
.Attr("save_gpu_specific_engines: bool = True")
.Input("resource_name: string")
.Input("filename: string")
.SetIsStateful()
.SetShapeFn(shape_inference::NoOutputs);
}
#endif | #include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/memory/memory.h"
#include "absl/strings/str_join.h"
#include "tensorflow/compiler/tf2tensorrt/common/datavec.h"
#include "tensorflow/compiler/tf2tensorrt/common/utils.h"
#include "tensorflow/compiler/tf2tensorrt/convert/utils.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_engine_instance.pb.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_logger.h"
#include "tensorflow/compiler/tf2tensorrt/utils/trt_lru_cache.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/resource_mgr.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/tensor_types.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/record_reader.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/file_system.h"
#include "tensorflow/core/platform/path.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/tstring.h"
#include "tensorflow/core/platform/types.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
struct TestParam {
nvinfer1::Dims dims;
bool dynamic_shape;
int n_inputs;
};
class TRTEngineResourceOpsTest
: public OpsTestBase,
public ::testing::WithParamInterface<TestParam> {
public:
TRTEngineResourceOpsTest() : param_(GetParam()) {}
protected:
void Reset() {
for (auto& temp : tensors_) {
delete temp;
}
for (auto& temp : managed_outputs_) {
delete temp;
}
tensors_.clear();
managed_outputs_.clear();
inputs_.clear();
}
ITensorProxyPtr NetworkWith1Input(nvinfer1::INetworkDefinition* network,
ITensorProxyPtr input) {
nvinfer1::IUnaryLayer* layer =
network->addUnary(*input->trt_tensor(), nvinfer1::UnaryOperation::kEXP);
EXPECT_NE(nullptr, layer);
return layer->getOutput(0);
}
ITensorProxyPtr NetworkWith2Inputs(nvinfer1::INetworkDefinition* network,
ITensorProxyPtr input) {
nvinfer1::Dims dims2{1, {2}};
ITensorProxyPtr input2 =
network->addInput(absl::StrCat(IONamePrefixes::kInputPHName, 1).c_str(),
nvinfer1::DataType::kINT32, dims2);
EXPECT_NE(nullptr, input2->trt_tensor());
nvinfer1::Dims start{2, {0, 0}};
nvinfer1::Dims stride{2, {1, 1}};
auto slice_layer =
network->addSlice(*input->trt_tensor(), start, stride, stride);
EXPECT_NE(nullptr, slice_layer);
slice_layer->setInput(2, *input2->trt_tensor());
ITensorProxyPtr sliced_input = slice_layer->getOutput(0);
EXPECT_NE(nullptr, sliced_input->trt_tensor());
auto layer = network->addElementWise(*sliced_input->trt_tensor(),
*sliced_input->trt_tensor(),
nvinfer1::ElementWiseOperation::kSUM);
EXPECT_NE(nullptr, layer);
return layer->getOutput(0);
}
TrtUniquePtrType<nvinfer1::ICudaEngine> CreateTRTEngine() {
TrtUniquePtrType<nvinfer1::IBuilder> builder(
nvinfer1::createInferBuilder(logger_));
TrtUniquePtrType<nvinfer1::INetworkDefinition> network;
#if IS_TRT_VERSION_GE(8, 0, 0, 0)
network =
TrtUniquePtrType<nvinfer1::INetworkDefinition>(builder->createNetworkV2(
1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
#else
network =
TrtUniquePtrType<nvinfer1::INetworkDefinition>(builder->createNetworkV2(
1U << static_cast<int>(
nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH)));
#endif
nvinfer1::Dims dims = this->param_.dims;
if (this->param_.dynamic_shape) {
std::fill(dims.d, dims.d + dims.nbDims, -1);
}
const std::string in_name = StrCat(IONamePrefixes::kInputPHName, 0);
ITensorProxyPtr input =
network->addInput(in_name.c_str(), nvinfer1::DataType::kFLOAT, dims);
EXPECT_NE(nullptr, input->trt_tensor());
ITensorProxyPtr output =
this->param_.n_inputs == 1
? this->NetworkWith1Input(network.get(), input)
: this->NetworkWith2Inputs(network.get(), input);
output->setName("output");
network->markOutput(*output->trt_tensor());
TrtUniquePtrType<nvinfer1::IBuilderConfig> builder_config(
builder->createBuilderConfig());
builder_config->setMaxWorkspaceSize(1 << 10);
builder->setMaxBatchSize(1);
if (this->param_.dynamic_shape) {
TrtShapeOptimizationProfile profile;
profile.SetShapeTensorMask(network.get());
const int n_input = param_.n_inputs;
std::vector<bool> input_mask(n_input, true);
profile.SetInputMask(input_mask);
for (int i = 1; i <= 3; i++) {
std::vector<TensorShape> shape_vec(n_input);
std::vector<int> dimvec(this->param_.dims.nbDims, 3 * i);
TensorShape shape;
TF_CHECK_OK(
TensorShapeUtils::MakeShape(dimvec.data(), dimvec.size(), &shape));
const ITensorProxyPtr input = network->getInput(0);
const char* name = input->getName();
VLOG(2) << "Defining profile for input " << name;
shape_vec[0] = shape;
if (this->param_.n_inputs == 2) {
TF_CHECK_OK(TensorShapeUtils::MakeShape(
std::vector<int32>{param_.dims.nbDims}, &shape));
shape_vec[1] = shape;
Tensor shape_tensor(DT_INT32, shape);
std::vector<int32> vals{1, i};
std::copy_n(vals.data(), vals.size(),
shape_tensor.flat<int32_t>().data());
DataVec shape_values{{"one", {}}, {"two", shape_tensor}};
TF_CHECK_OK(profile.CollectShapeValues(shape_values));
} else {
TF_CHECK_OK(profile.CollectShapeValues({{"one", {}}}));
}
profile.AddShape(shape_vec);
}
std::vector<PartialTensorShape> input_partial_shapes;
TF_CHECK_OK(GetNetworkInputShapes(network.get(), &input_partial_shapes));
profile.InitProfiles(input_partial_shapes, ProfileStrategy::kOptimal);
TF_CHECK_OK(profile.ConfigureBuilder(builder.get(), builder_config.get(),
network.get()));
}
VLOG(2) << "ConfigureBuilder Finished";
TrtUniquePtrType<nvinfer1::ICudaEngine> engine(
builder->buildEngineWithConfig(*network, *builder_config));
VLOG(2) << "Engine constructed";
EXPECT_NE(nullptr, engine);
return engine;
}
Logger& logger_ = *Logger::GetLogger();
TestParam param_;
};
#if IS_TRT_VERSION_GE(7, 1, 3, 0)
constexpr std::array<TestParam, 3> TestParameters = {
TestParam{nvinfer1::Dims{1, {1}}, false, 1},
TestParam{nvinfer1::Dims{1, {1}}, true, 1},
TestParam{nvinfer1::Dims{2, {3, 3}}, true, 2}};
#else
constexpr std::array<TestParam, 2> TestParameters = {
TestParam{nvinfer1::Dims{1, {1}}, false, 1},
TestParam{nvinfer1::Dims{1, {1}}, true, 1}};
#endif
INSTANTIATE_TEST_CASE_P(EngineResourceOpsTestInstantiation,
TRTEngineResourceOpsTest,
::testing::ValuesIn(TestParameters));
TEST_P(TRTEngineResourceOpsTest, Basic) {
std::unique_ptr<Device> device(
DeviceFactory::NewDevice("GPU", {}, "/job:worker/replica:0/task:0"));
ResourceMgr* rm = device->resource_manager();
SetDevice(DEVICE_GPU, std::move(device));
const string container(kTfTrtContainerName);
const string resource_name = "myresource";
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "CreateTRTResourceHandle")
.Attr("resource_name", resource_name)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
TF_ASSERT_OK(RunOpKernel());
ResourceHandle handle =
context_->mutable_output(0)->scalar<ResourceHandle>()();
TRTEngineCacheResource* resource = nullptr;
EXPECT_TRUE(
errors::IsNotFound(rm->Lookup(container, resource_name, &resource)));
Reset();
Env* env = Env::Default();
const string filename = io::JoinPath(testing::TmpDir(), "trt_engine_file");
{
std::unique_ptr<WritableFile> file;
TF_ASSERT_OK(env->NewWritableFile(filename, &file));
}
TF_ASSERT_OK(NodeDefBuilder("op", "InitializeTRTResource")
.Input(FakeInput(DT_RESOURCE))
.Input(FakeInput(DT_STRING))
.Attr("max_cached_engines_count", 1)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<ResourceHandle>(TensorShape({}), {handle});
AddInputFromArray<tstring>(TensorShape({}), {filename});
TF_ASSERT_OK(RunOpKernel());
EXPECT_TRUE(rm->Lookup(container, resource_name, &resource).ok());
EXPECT_EQ(0, resource->cache_.size());
TrtUniquePtrType<nvinfer1::ICudaEngine> engine = CreateTRTEngine();
ExecutionContext context = ExecutionContext::Create(engine.get());
std::vector<TensorShape> engine_input_shape(1);
TF_ASSERT_OK(DimsAdapter(param_.dims).TensorShape(&(engine_input_shape[0])));
if (param_.n_inputs > 1) {
engine_input_shape.push_back(TensorShape({1, 1}));
}
resource->cache_.emplace(
engine_input_shape,
std::make_unique<EngineContext>(std::move(engine), std::move(context)));
EXPECT_FALSE(resource->RefCountIsOne());
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "SerializeTRTResource")
.Attr("delete_resource", true)
.Input(FakeInput(DT_STRING))
.Input(FakeInput(DT_STRING))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<tstring>(TensorShape({}), {resource_name});
AddInputFromArray<tstring>(TensorShape({}), {filename});
TF_ASSERT_OK(RunOpKernel());
EXPECT_TRUE(resource->RefCountIsOne());
resource->Unref();
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "DestroyResourceOp")
.Attr("ignore_lookup_error", false)
.Input(FakeInput(DT_RESOURCE))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<ResourceHandle>(TensorShape({}), {handle});
EXPECT_TRUE(errors::IsNotFound(RunOpKernel()));
std::unique_ptr<RandomAccessFile> file;
TF_ASSERT_OK(env->NewRandomAccessFile(filename, &file));
auto reader = std::make_unique<io::RecordReader>(file.get());
uint64 offset = 0;
tstring record;
TF_ASSERT_OK(reader->ReadRecord(&offset, &record));
TRTEngineInstance engine_instance;
engine_instance.ParseFromString(record);
EXPECT_EQ(param_.n_inputs, engine_instance.input_shapes_size());
EXPECT_EQ(param_.dims.nbDims, engine_instance.input_shapes(0).dim_size());
for (int i = 0; i < param_.dims.nbDims; i++) {
EXPECT_EQ(param_.dims.d[i], engine_instance.input_shapes(0).dim(i).size());
}
EXPECT_TRUE(errors::IsOutOfRange(reader->ReadRecord(&offset, &record)));
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "InitializeTRTResource")
.Input(FakeInput(DT_RESOURCE))
.Input(FakeInput(DT_STRING))
.Attr("max_cached_engines_count", 1)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<ResourceHandle>(TensorShape({}), {handle});
AddInputFromArray<tstring>(TensorShape({}), {filename});
TF_ASSERT_OK(RunOpKernel());
EXPECT_TRUE(rm->Lookup(container, resource_name, &resource).ok());
EXPECT_EQ(1, resource->cache_.size());
if (this->param_.dynamic_shape) {
EXPECT_EQ(3, resource->profiles_.GetNumProfiles());
EXPECT_EQ(3, resource->cache_.begin()->second->GetNumContexts());
if (this->param_.n_inputs == 1) {
std::vector<TensorShape> shapes(1);
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{6}, &shapes[0]));
EXPECT_EQ(1, resource->profiles_.GetProfileNumber(shapes));
} else {
std::vector<TensorShape> shapes(2);
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{9, 9}, &shapes[0]));
TF_CHECK_OK(
TensorShapeUtils::MakeShape(std::vector<int32>{2}, &shapes[1]));
Tensor shape_tensor(DT_INT32, shapes[1]);
std::vector<int32> vals{1, 3};
std::copy_n(vals.data(), vals.size(),
shape_tensor.flat<int32_t>().data());
DataVec shape_values{{"one", {}}, {"two", shape_tensor}};
TF_CHECK_OK(resource->profiles_.CollectShapeValues(shape_values));
EXPECT_EQ(2, resource->profiles_.GetProfileNumber(shapes));
}
}
EXPECT_FALSE(resource->RefCountIsOne());
Reset();
TF_ASSERT_OK(NodeDefBuilder("op", "DestroyResourceOp")
.Attr("ignore_lookup_error", false)
.Input(FakeInput(DT_RESOURCE))
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
AddInputFromArray<ResourceHandle>(TensorShape({}), {handle});
TF_ASSERT_OK(RunOpKernel());
EXPECT_TRUE(errors::IsNotFound(RunOpKernel()));
EXPECT_TRUE(resource->RefCountIsOne());
resource->Unref();
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/ops/trt_engine_resource_ops.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/kernels/trt_engine_resource_ops_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
589b93b3-2eb7-49ee-946c-2062dc9b29b0 | cpp | google/quiche | legacy_quic_stream_id_manager | quiche/quic/core/legacy_quic_stream_id_manager.cc | quiche/quic/core/legacy_quic_stream_id_manager_test.cc | #include "quiche/quic/core/legacy_quic_stream_id_manager.h"
#include "quiche/quic/core/quic_session.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
namespace quic {
LegacyQuicStreamIdManager::LegacyQuicStreamIdManager(
Perspective perspective, QuicTransportVersion transport_version,
size_t max_open_outgoing_streams, size_t max_open_incoming_streams)
: perspective_(perspective),
transport_version_(transport_version),
max_open_outgoing_streams_(max_open_outgoing_streams),
max_open_incoming_streams_(max_open_incoming_streams),
next_outgoing_stream_id_(QuicUtils::GetFirstBidirectionalStreamId(
transport_version_, perspective_)),
largest_peer_created_stream_id_(
perspective_ == Perspective::IS_SERVER
? (QuicVersionUsesCryptoFrames(transport_version_)
? QuicUtils::GetInvalidStreamId(transport_version_)
: QuicUtils::GetCryptoStreamId(transport_version_))
: QuicUtils::GetInvalidStreamId(transport_version_)),
num_open_incoming_streams_(0),
num_open_outgoing_streams_(0) {}
LegacyQuicStreamIdManager::~LegacyQuicStreamIdManager() {}
bool LegacyQuicStreamIdManager::CanOpenNextOutgoingStream() const {
QUICHE_DCHECK_LE(num_open_outgoing_streams_, max_open_outgoing_streams_);
QUIC_DLOG_IF(INFO, num_open_outgoing_streams_ == max_open_outgoing_streams_)
<< "Failed to create a new outgoing stream. "
<< "Already " << num_open_outgoing_streams_ << " open.";
return num_open_outgoing_streams_ < max_open_outgoing_streams_;
}
bool LegacyQuicStreamIdManager::CanOpenIncomingStream() const {
return num_open_incoming_streams_ < max_open_incoming_streams_;
}
bool LegacyQuicStreamIdManager::MaybeIncreaseLargestPeerStreamId(
const QuicStreamId stream_id) {
available_streams_.erase(stream_id);
if (largest_peer_created_stream_id_ !=
QuicUtils::GetInvalidStreamId(transport_version_) &&
stream_id <= largest_peer_created_stream_id_) {
return true;
}
size_t additional_available_streams =
(stream_id - largest_peer_created_stream_id_) / 2 - 1;
if (largest_peer_created_stream_id_ ==
QuicUtils::GetInvalidStreamId(transport_version_)) {
additional_available_streams = (stream_id + 1) / 2 - 1;
}
size_t new_num_available_streams =
GetNumAvailableStreams() + additional_available_streams;
if (new_num_available_streams > MaxAvailableStreams()) {
QUIC_DLOG(INFO) << perspective_
<< "Failed to create a new incoming stream with id:"
<< stream_id << ". There are already "
<< GetNumAvailableStreams()
<< " streams available, which would become "
<< new_num_available_streams << ", which exceeds the limit "
<< MaxAvailableStreams() << ".";
return false;
}
QuicStreamId first_available_stream = largest_peer_created_stream_id_ + 2;
if (largest_peer_created_stream_id_ ==
QuicUtils::GetInvalidStreamId(transport_version_)) {
first_available_stream = QuicUtils::GetFirstBidirectionalStreamId(
transport_version_, QuicUtils::InvertPerspective(perspective_));
}
for (QuicStreamId id = first_available_stream; id < stream_id; id += 2) {
available_streams_.insert(id);
}
largest_peer_created_stream_id_ = stream_id;
return true;
}
QuicStreamId LegacyQuicStreamIdManager::GetNextOutgoingStreamId() {
QuicStreamId id = next_outgoing_stream_id_;
next_outgoing_stream_id_ += 2;
return id;
}
void LegacyQuicStreamIdManager::ActivateStream(bool is_incoming) {
if (is_incoming) {
++num_open_incoming_streams_;
return;
}
++num_open_outgoing_streams_;
}
void LegacyQuicStreamIdManager::OnStreamClosed(bool is_incoming) {
if (is_incoming) {
QUIC_BUG_IF(quic_bug_12720_1, num_open_incoming_streams_ == 0);
--num_open_incoming_streams_;
return;
}
QUIC_BUG_IF(quic_bug_12720_2, num_open_outgoing_streams_ == 0);
--num_open_outgoing_streams_;
}
bool LegacyQuicStreamIdManager::IsAvailableStream(QuicStreamId id) const {
if (!IsIncomingStream(id)) {
return id >= next_outgoing_stream_id_;
}
return largest_peer_created_stream_id_ ==
QuicUtils::GetInvalidStreamId(transport_version_) ||
id > largest_peer_created_stream_id_ ||
available_streams_.contains(id);
}
bool LegacyQuicStreamIdManager::IsIncomingStream(QuicStreamId id) const {
return id % 2 != next_outgoing_stream_id_ % 2;
}
size_t LegacyQuicStreamIdManager::GetNumAvailableStreams() const {
return available_streams_.size();
}
size_t LegacyQuicStreamIdManager::MaxAvailableStreams() const {
return max_open_incoming_streams_ * kMaxAvailableStreamsMultiplier;
}
} | #include "quiche/quic/core/legacy_quic_stream_id_manager.h"
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_session_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
namespace {
using testing::_;
using testing::StrictMock;
struct TestParams {
TestParams(ParsedQuicVersion version, Perspective perspective)
: version(version), perspective(perspective) {}
ParsedQuicVersion version;
Perspective perspective;
};
std::string PrintToString(const TestParams& p) {
return absl::StrCat(
ParsedQuicVersionToString(p.version),
(p.perspective == Perspective::IS_CLIENT ? "Client" : "Server"));
}
std::vector<TestParams> GetTestParams() {
std::vector<TestParams> params;
for (ParsedQuicVersion version : AllSupportedVersions()) {
for (auto perspective : {Perspective::IS_CLIENT, Perspective::IS_SERVER}) {
if (!VersionHasIetfQuicFrames(version.transport_version)) {
params.push_back(TestParams(version, perspective));
}
}
}
return params;
}
class LegacyQuicStreamIdManagerTest : public QuicTestWithParam<TestParams> {
public:
LegacyQuicStreamIdManagerTest()
: manager_(GetParam().perspective, GetParam().version.transport_version,
kDefaultMaxStreamsPerConnection,
kDefaultMaxStreamsPerConnection) {}
protected:
QuicStreamId GetNthPeerInitiatedId(int n) {
if (GetParam().perspective == Perspective::IS_SERVER) {
return QuicUtils::GetFirstBidirectionalStreamId(
GetParam().version.transport_version, Perspective::IS_CLIENT) +
2 * n;
} else {
return 2 + 2 * n;
}
}
LegacyQuicStreamIdManager manager_;
};
INSTANTIATE_TEST_SUITE_P(Tests, LegacyQuicStreamIdManagerTest,
::testing::ValuesIn(GetTestParams()),
::testing::PrintToStringParamName());
TEST_P(LegacyQuicStreamIdManagerTest, CanOpenNextOutgoingStream) {
for (size_t i = 0; i < manager_.max_open_outgoing_streams() - 1; ++i) {
manager_.ActivateStream(false);
}
EXPECT_TRUE(manager_.CanOpenNextOutgoingStream());
manager_.ActivateStream(false);
EXPECT_FALSE(manager_.CanOpenNextOutgoingStream());
}
TEST_P(LegacyQuicStreamIdManagerTest, CanOpenIncomingStream) {
for (size_t i = 0; i < manager_.max_open_incoming_streams() - 1; ++i) {
manager_.ActivateStream(true);
}
EXPECT_TRUE(manager_.CanOpenIncomingStream());
manager_.ActivateStream(true);
EXPECT_FALSE(manager_.CanOpenIncomingStream());
}
TEST_P(LegacyQuicStreamIdManagerTest, AvailableStreams) {
ASSERT_TRUE(
manager_.MaybeIncreaseLargestPeerStreamId(GetNthPeerInitiatedId(3)));
EXPECT_TRUE(manager_.IsAvailableStream(GetNthPeerInitiatedId(1)));
EXPECT_TRUE(manager_.IsAvailableStream(GetNthPeerInitiatedId(2)));
ASSERT_TRUE(
manager_.MaybeIncreaseLargestPeerStreamId(GetNthPeerInitiatedId(2)));
ASSERT_TRUE(
manager_.MaybeIncreaseLargestPeerStreamId(GetNthPeerInitiatedId(1)));
}
TEST_P(LegacyQuicStreamIdManagerTest, MaxAvailableStreams) {
const size_t kMaxStreamsForTest = 10;
const size_t kAvailableStreamLimit = manager_.MaxAvailableStreams();
EXPECT_EQ(
manager_.max_open_incoming_streams() * kMaxAvailableStreamsMultiplier,
manager_.MaxAvailableStreams());
EXPECT_LE(10 * kMaxStreamsForTest, kAvailableStreamLimit);
EXPECT_TRUE(
manager_.MaybeIncreaseLargestPeerStreamId(GetNthPeerInitiatedId(0)));
const int kLimitingStreamId =
GetNthPeerInitiatedId(kAvailableStreamLimit + 1);
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(kLimitingStreamId));
EXPECT_FALSE(
manager_.MaybeIncreaseLargestPeerStreamId(kLimitingStreamId + 2 * 2));
}
TEST_P(LegacyQuicStreamIdManagerTest, MaximumAvailableOpenedStreams) {
QuicStreamId stream_id = GetNthPeerInitiatedId(0);
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(stream_id));
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(
stream_id + 2 * (manager_.max_open_incoming_streams() - 1)));
}
TEST_P(LegacyQuicStreamIdManagerTest, TooManyAvailableStreams) {
QuicStreamId stream_id = GetNthPeerInitiatedId(0);
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(stream_id));
QuicStreamId stream_id2 =
GetNthPeerInitiatedId(2 * manager_.MaxAvailableStreams() + 4);
EXPECT_FALSE(manager_.MaybeIncreaseLargestPeerStreamId(stream_id2));
}
TEST_P(LegacyQuicStreamIdManagerTest, ManyAvailableStreams) {
manager_.set_max_open_incoming_streams(200);
QuicStreamId stream_id = GetNthPeerInitiatedId(0);
EXPECT_TRUE(manager_.MaybeIncreaseLargestPeerStreamId(stream_id));
EXPECT_TRUE(
manager_.MaybeIncreaseLargestPeerStreamId(GetNthPeerInitiatedId(199)));
}
TEST_P(LegacyQuicStreamIdManagerTest,
TestMaxIncomingAndOutgoingStreamsAllowed) {
EXPECT_EQ(manager_.max_open_incoming_streams(),
kDefaultMaxStreamsPerConnection);
EXPECT_EQ(manager_.max_open_outgoing_streams(),
kDefaultMaxStreamsPerConnection);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/legacy_quic_stream_id_manager.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/legacy_quic_stream_id_manager_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
2707fc17-810b-4e82-b824-5dc398f336ce | cpp | google/tensorstore | multi_vector_view | tensorstore/internal/multi_vector_view.h | tensorstore/internal/multi_vector_view_test.cc | #ifndef TENSORSTORE_INTERNAL_MULTI_VECTOR_VIEW_H_
#define TENSORSTORE_INTERNAL_MULTI_VECTOR_VIEW_H_
#include <cassert>
#include <cstddef>
#include "tensorstore/index.h"
#include "tensorstore/internal/gdb_scripting.h"
#include "tensorstore/internal/meta.h"
#include "tensorstore/internal/type_traits.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
TENSORSTORE_GDB_AUTO_SCRIPT("multi_vector_gdb.py")
namespace tensorstore {
namespace internal {
template <DimensionIndex Extent, typename... Ts>
class MultiVectorViewStorage;
template <typename StorageT>
class MultiVectorAccess;
template <ptrdiff_t Extent, typename... Ts>
class MultiVectorViewStorage {
private:
friend class MultiVectorAccess<MultiVectorViewStorage>;
constexpr static StaticRank<Extent> InternalGetExtent() { return {}; }
void InternalSetExtent(StaticRank<Extent>) {}
void* InternalGetDataPointer(size_t i) const {
return const_cast<void*>(data_[i]);
}
void InternalSetDataPointer(size_t i, const void* ptr) { data_[i] = ptr; }
const void* data_[sizeof...(Ts)]{};
};
template <typename... Ts>
class MultiVectorViewStorage<0, Ts...> {
private:
friend class MultiVectorAccess<MultiVectorViewStorage>;
constexpr static StaticRank<0> InternalGetExtent() { return {}; }
void InternalSetExtent(StaticRank<0>) {}
void* InternalGetDataPointer(size_t i) const { return nullptr; }
void InternalSetDataPointer(size_t i, const void* ptr) {}
};
template <typename... Ts>
class MultiVectorViewStorage<dynamic_rank, Ts...> {
private:
friend class MultiVectorAccess<MultiVectorViewStorage>;
ptrdiff_t InternalGetExtent() const { return extent_; }
void InternalSetExtent(ptrdiff_t extent) { extent_ = extent; }
void* InternalGetDataPointer(size_t i) const {
return const_cast<void*>(data_[i]);
}
void InternalSetDataPointer(size_t i, const void* ptr) { data_[i] = ptr; }
const void* data_[sizeof...(Ts)]{};
ptrdiff_t extent_ = 0;
};
template <DimensionIndex Extent, typename... Ts>
class MultiVectorAccess<MultiVectorViewStorage<Extent, Ts...>> {
public:
using StorageType = MultiVectorViewStorage<Extent, Ts...>;
using ExtentType = StaticOrDynamicRank<Extent>;
constexpr static ptrdiff_t static_extent = Extent;
constexpr static size_t num_vectors = sizeof...(Ts);
template <size_t I>
using ElementType = TypePackElement<I, Ts...>;
template <size_t I>
using ConstElementType = TypePackElement<I, Ts...>;
static ExtentType GetExtent(const StorageType& storage) {
return storage.InternalGetExtent();
}
template <size_t I>
static tensorstore::span<ElementType<I>, Extent> get(
const StorageType* array) noexcept {
return {static_cast<ElementType<I>*>(array->InternalGetDataPointer(I)),
array->InternalGetExtent()};
}
static void Assign(StorageType* array, ExtentType extent, Ts*... pointers) {
array->InternalSetExtent(extent);
size_t i = 0;
(array->InternalSetDataPointer(i++, pointers), ...);
}
static void Assign(StorageType* array,
tensorstore::span<Ts, Extent>... spans) {
const ExtentType extent =
GetFirstArgument(GetStaticOrDynamicExtent(spans)...);
assert(((spans.size() == extent) && ...));
Assign(array, extent, spans.data()...);
}
};
}
}
#endif | #include "tensorstore/internal/multi_vector_view.h"
#include <cstddef>
#include <type_traits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/rank.h"
#include "tensorstore/util/span.h"
namespace {
using ::tensorstore::dynamic_rank;
using ::tensorstore::internal::MultiVectorAccess;
using ::tensorstore::internal::MultiVectorViewStorage;
using ::testing::ElementsAre;
static_assert(
MultiVectorAccess<MultiVectorViewStorage<3, int, float>>::static_extent ==
3);
static_assert(
MultiVectorAccess<MultiVectorViewStorage<3, int, float>>::num_vectors == 2);
TEST(MultiVectorViewStorageTest, StaticExtent2) {
using Container = MultiVectorViewStorage<2, float, int>;
using Access = MultiVectorAccess<Container>;
static_assert(
std::is_same_v<float, typename Access::template ElementType<0>>);
static_assert(std::is_same_v<int, typename Access::template ElementType<1>>);
static_assert(
std::is_same_v<float, typename Access::template ConstElementType<0>>);
static_assert(
std::is_same_v<int, typename Access::template ConstElementType<1>>);
Container vec;
static_assert(std::is_same_v<std::integral_constant<ptrdiff_t, 2>,
decltype(Access::GetExtent(vec))>);
EXPECT_EQ(2, Access::GetExtent(vec));
EXPECT_EQ(nullptr, Access::template get<0>(&vec).data());
EXPECT_EQ(nullptr, Access::template get<1>(&vec).data());
float float_arr[] = {1, 2};
int int_arr[] = {3, 4};
Access::Assign(&vec, std::integral_constant<ptrdiff_t, 2>(), float_arr,
int_arr);
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(1, 2));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(3, 4));
EXPECT_EQ(&float_arr[0], Access::template get<0>(&vec).data());
EXPECT_EQ(&int_arr[0], Access::template get<1>(&vec).data());
float float_arr2[] = {5, 6};
int int_arr2[] = {7, 8};
Access::Assign(&vec, tensorstore::span(float_arr2),
tensorstore::span(int_arr2));
EXPECT_EQ(&float_arr2[0], Access::template get<0>(&vec).data());
EXPECT_EQ(&int_arr2[0], Access::template get<1>(&vec).data());
}
TEST(MultiVectorViewStorageTest, StaticExtent0) {
using Container = MultiVectorViewStorage<0, float, int>;
using Access = MultiVectorAccess<Container>;
static_assert(
std::is_same_v<float, typename Access::template ElementType<0>>);
static_assert(std::is_same_v<int, typename Access::template ElementType<1>>);
static_assert(
std::is_same_v<float, typename Access::template ConstElementType<0>>);
static_assert(
std::is_same_v<int, typename Access::template ConstElementType<1>>);
Container vec;
static_assert(std::is_same_v<std::integral_constant<ptrdiff_t, 0>,
decltype(Access::GetExtent(vec))>);
EXPECT_EQ(0, Access::GetExtent(vec));
EXPECT_EQ(nullptr, Access::template get<0>(&vec).data());
EXPECT_EQ(nullptr, Access::template get<1>(&vec).data());
Access::Assign(&vec, std::integral_constant<ptrdiff_t, 0>(), nullptr,
nullptr);
EXPECT_EQ(nullptr, Access::template get<0>(&vec).data());
EXPECT_EQ(nullptr, Access::template get<1>(&vec).data());
Access::Assign(&vec, tensorstore::span<float, 0>{},
tensorstore::span<int, 0>{});
}
TEST(MultiVectorViewStorageTest, DynamicExtent) {
using Container = MultiVectorViewStorage<dynamic_rank, float, int>;
using Access = MultiVectorAccess<Container>;
static_assert(
std::is_same_v<float, typename Access::template ElementType<0>>);
static_assert(std::is_same_v<int, typename Access::template ElementType<1>>);
static_assert(
std::is_same_v<float, typename Access::template ConstElementType<0>>);
static_assert(
std::is_same_v<int, typename Access::template ConstElementType<1>>);
Container vec;
static_assert(std::is_same_v<ptrdiff_t, decltype(Access::GetExtent(vec))>);
EXPECT_EQ(0, Access::GetExtent(vec));
EXPECT_EQ(nullptr, Access::template get<0>(&vec).data());
EXPECT_EQ(nullptr, Access::template get<1>(&vec).data());
float float_arr[] = {1, 2};
int int_arr[] = {3, 4};
Access::Assign(&vec, std::integral_constant<ptrdiff_t, 2>(), float_arr,
int_arr);
EXPECT_EQ(2, Access::GetExtent(vec));
EXPECT_THAT(Access::template get<0>(&vec), ElementsAre(1, 2));
EXPECT_THAT(Access::template get<1>(&vec), ElementsAre(3, 4));
EXPECT_EQ(&float_arr[0], Access::template get<0>(&vec).data());
EXPECT_EQ(&int_arr[0], Access::template get<1>(&vec).data());
float float_arr2[] = {5, 6, 7};
int int_arr2[] = {7, 8, 9};
Access::Assign(&vec, tensorstore::span<float>(float_arr2),
tensorstore::span<int>(int_arr2));
EXPECT_EQ(3, Access::GetExtent(vec));
EXPECT_EQ(&float_arr2[0], Access::template get<0>(&vec).data());
EXPECT_EQ(&int_arr2[0], Access::template get<1>(&vec).data());
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/multi_vector_view.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/multi_vector_view_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4e5bcc50-72d6-4339-883c-ccbd053d6c39 | cpp | google/arolla | binary_search | arolla/util/binary_search.cc | arolla/qexpr/operators/math/binary_search_test.cc | #include "arolla/util/binary_search.h"
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include "absl/types/span.h"
#include "arolla/util/bits.h"
#include "arolla/util/switch_index.h"
namespace arolla::binary_search_details {
namespace {
template <size_t kArraySize, typename T, class Predicate>
size_t FastBinarySearchT(const T* const array, Predicate predicate) {
static_assert((kArraySize & (kArraySize + 1)) == 0);
size_t offset = 0;
for (size_t k = kArraySize; k > 0;) {
k >>= 1;
offset = (!predicate(array[offset + k]) ? offset + k + 1 : offset);
}
return offset;
}
template <typename T, typename Predicate>
size_t BinarySearchT(absl::Span<const T> array, Predicate predicate) {
assert(!array.empty());
const int log2_size = BitScanReverse(array.size());
return switch_index<8 * sizeof(size_t)>(
log2_size, [array, predicate](auto constexpr_log2_size) {
constexpr size_t size =
(1ULL << static_cast<int>(constexpr_log2_size)) - 1;
size_t offset = 0;
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Warray-bounds"
#endif
offset = (!predicate(array[size]) ? array.size() - size : offset);
#if !defined(__clang__) && defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
return offset +
FastBinarySearchT<size>(array.begin() + offset, predicate);
});
}
}
size_t LowerBoundImpl(float value, absl::Span<const float> array) {
return BinarySearchT(array, [value](auto arg) { return !(arg < value); });
}
size_t LowerBoundImpl(double value, absl::Span<const double> array) {
return BinarySearchT(array, [value](auto arg) { return !(arg < value); });
}
size_t LowerBoundImpl(int32_t value, absl::Span<const int32_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg >= value; });
}
size_t LowerBoundImpl(int64_t value, absl::Span<const int64_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg >= value; });
}
size_t UpperBoundImpl(float value, absl::Span<const float> array) {
if (std::isnan(value)) {
return array.size();
}
return BinarySearchT(array, [value](auto arg) { return !(arg <= value); });
}
size_t UpperBoundImpl(double value, absl::Span<const double> array) {
if (std::isnan(value)) {
return array.size();
}
return BinarySearchT(array, [value](auto arg) { return !(arg <= value); });
}
size_t UpperBoundImpl(int32_t value, absl::Span<const int32_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg > value; });
}
size_t UpperBoundImpl(int64_t value, absl::Span<const int64_t> array) {
return BinarySearchT(array, [value](auto arg) { return arg > value; });
}
} | #include "arolla/qexpr/operators/math/binary_search.h"
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/dense_array/bitmap.h"
#include "arolla/dense_array/dense_array.h"
#include "arolla/memory/buffer.h"
namespace arolla {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
TEST(BinarySearch, VerifyHaystackFullBitmap) {
Buffer<float> values(CreateBuffer(std::vector<float>{1., 2., 3.}));
Buffer<bitmap::Word> bitmask(CreateBuffer(std::vector<bitmap::Word>{7}));
DenseArray<float> array{std::move(values), std::move(bitmask)};
EXPECT_THAT(SearchSortedOp::VerifyHaystack(array), IsOk());
}
TEST(BinarySearch, VerifyHaystackEmptyBitmap) {
Buffer<float> values(CreateBuffer(std::vector<float>{1., 2., 3.}));
Buffer<bitmap::Word> bitmask(CreateBuffer(std::vector<bitmap::Word>{}));
DenseArray<float> array{std::move(values), std::move(bitmask)};
EXPECT_THAT(SearchSortedOp::VerifyHaystack(array), IsOk());
}
TEST(BinarySearch, VerifyHaystackRaisesNotFullBitmap) {
Buffer<float> values(CreateBuffer(std::vector<float>{1., 2., 3.}));
Buffer<bitmap::Word> bitmask(CreateBuffer(std::vector<bitmap::Word>{5}));
DenseArray<float> array{std::move(values), std::move(bitmask)};
EXPECT_THAT(
SearchSortedOp::VerifyHaystack(array),
StatusIs(absl::StatusCode::kUnimplemented,
"math.searchsorted operator supports only full haystacks"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/binary_search.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qexpr/operators/math/binary_search_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
01d2348a-58d6-42fa-82b6-d0616d993f89 | cpp | tensorflow/tensorflow | conditional_thunk | third_party/xla/xla/service/gpu/runtime/conditional_thunk.cc | third_party/xla/xla/backends/cpu/runtime/conditional_thunk_test.cc | #include "xla/service/gpu/runtime/conditional_thunk.h"
#include <cstdint>
#include <memory>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
ConditionalThunk::ConditionalThunk(
ThunkInfo thunk_info, ConditionalThunkConfig config,
const BufferAllocation::Slice& branch_index_buffer_index)
: Thunk(Kind::kConditional, thunk_info),
config_(std::move(config)),
branch_index_buffer_index_(branch_index_buffer_index) {}
absl::Status ConditionalThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
if (config_.branch_index_is_bool) {
TF_RET_CHECK(config_.branch_thunks.size() == 2);
} else {
TF_RET_CHECK(!config_.branch_thunks.empty());
}
for (auto& branch_thunk : config_.branch_thunks) {
TF_RETURN_IF_ERROR(branch_thunk->Prepare(params, resource_requests));
}
return absl::OkStatus();
}
absl::Status ConditionalThunk::Initialize(const InitializeParams& params) {
if (config_.branch_index_is_bool) {
TF_RET_CHECK(config_.branch_thunks.size() == 2);
} else {
TF_RET_CHECK(!config_.branch_thunks.empty());
}
for (auto& branch_thunk : config_.branch_thunks) {
TF_RETURN_IF_ERROR(branch_thunk->Initialize(params));
}
absl::MutexLock lock(&mutex_);
if (auto it = predicates_.find(params.executor); it == predicates_.end()) {
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(
config_.branch_index_is_bool ? sizeof(bool) : sizeof(int32_t)));
predicates_.emplace(params.executor, std::move(allocation));
}
return absl::OkStatus();
}
absl::Status ConditionalThunk::ExecuteOnStream(const ExecuteParams& params) {
auto& stream = *params.stream;
auto branch_index_or_pred = [&]() -> std::variant<int32_t*, bool*> {
absl::MutexLock lock(&mutex_);
se::StreamExecutor* executor = stream.parent();
if (config_.branch_index_is_bool) {
return reinterpret_cast<bool*>(predicates_.at(executor)->opaque());
} else {
return reinterpret_cast<int32_t*>(predicates_.at(executor)->opaque());
}
}();
se::DeviceMemoryBase branch_index_address =
params.buffer_allocations->GetDeviceAddress(branch_index_buffer_index_);
if (config_.branch_index_is_bool) {
TF_RETURN_IF_ERROR(stream.Memcpy(std::get<bool*>(branch_index_or_pred),
branch_index_address, sizeof(bool)));
} else {
TF_RETURN_IF_ERROR(stream.Memcpy(std::get<int32_t*>(branch_index_or_pred),
branch_index_address, sizeof(int32_t)));
}
if (absl::Status blocked = stream.BlockHostUntilDone(); !blocked.ok()) {
return Internal("Failed to retrieve branch_index value on stream %p: %s.",
&stream, blocked.message());
}
int32_t branch_index = std::visit(
VariantVisitor{[](int32_t* branch_index) { return *branch_index; },
[](bool* pred) { return *pred ? 0 : 1; }},
branch_index_or_pred);
std::string_view branch_kind =
std::visit(VariantVisitor{[](int32_t*) { return "index"; },
[](bool*) { return "pred"; }},
branch_index_or_pred);
VLOG(3) << "ConditionalThunk: branch_index=" << branch_index
<< " (kind: " << branch_kind << ")";
if (branch_index < 0 || branch_index >= config_.branch_count) {
branch_index = config_.branch_count - 1;
}
TF_RETURN_IF_ERROR(
config_.branch_thunks[branch_index]->ExecuteOnStream(params));
return absl::OkStatus();
}
}
} | #include "xla/backends/cpu/runtime/conditional_thunk.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "xla/backends/cpu/runtime/resource_use.h"
#include "xla/backends/cpu/runtime/thunk.h"
#include "xla/backends/cpu/runtime/thunk_testlib.h"
#include "xla/runtime/buffer_use.h"
#include "xla/service/buffer_assignment.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::cpu {
namespace {
TEST(ConditionalThunkTest, BufferUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice branch_index_slice(&alloc, 0, sizeof(int32_t));
BufferAllocation::Slice read_slice(&alloc, 10, 10);
std::vector<ThunkSequence> branch_sequences(1);
branch_sequences[0].push_back(
std::make_unique<BufferUseThunk>(BufferUse::Read(read_slice)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, ConditionalThunk::Create({"conditional"}, branch_index_slice,
std::move(branch_sequences)));
EXPECT_EQ(thunk->buffer_uses().size(), 2);
EXPECT_EQ(thunk->buffer_uses()[0], BufferUse::Read(branch_index_slice));
EXPECT_EQ(thunk->buffer_uses()[1], BufferUse::Read(read_slice));
}
TEST(ConditionalThunkTest, ResourceUses) {
BufferAllocation alloc(0, 1024, 0);
BufferAllocation::Slice branch_index_slice(&alloc, 0, sizeof(int32_t));
auto token = Resource::Create(Resource::kToken);
std::vector<ThunkSequence> branch_sequences(1);
branch_sequences[0].push_back(
std::make_unique<ResourceUseThunk>(ResourceUse::Read(token)));
TF_ASSERT_OK_AND_ASSIGN(
auto thunk, ConditionalThunk::Create({"conditional"}, branch_index_slice,
std::move(branch_sequences)));
EXPECT_EQ(thunk->resource_uses().size(), 1);
EXPECT_EQ(thunk->resource_uses()[0], ResourceUse::Read(token));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/conditional_thunk.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/backends/cpu/runtime/conditional_thunk_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21725ba1-c718-4024-a3ae-27f4d851549c | cpp | tensorflow/tensorflow | tf_executor_to_graph | tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc | tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph_test.cc | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Types.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/DebugStringHelper.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#include "tensorflow/compiler/mlir/op_or_arg_name_mapper.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_executor.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/export_tf_dialect_op.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/export_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/translate_utils.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/verify_suitable_for_graph_export.h"
#include "tensorflow/compiler/mlir/utils/name_utils.h"
#include "xla/status_macros.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/graph_to_functiondef.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/regularization/util.h"
#include "tensorflow/core/graph/tensor_id.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
using mlir::BlockArgument;
using mlir::Dialect;
using mlir::Operation;
using mlir::SymbolTable;
using mlir::Value;
using mlir::func::FuncOp;
using tsl::StatusOr;
namespace {
constexpr char kDeviceAttr[] = "tf.device";
constexpr char kResourceArgUniqueIdAttr[] = "tf._resource_arg_unique_id";
constexpr char kEntryFuncAttr[] = "tf.entry_function";
constexpr char kAliasingAttr[] = "tf.aliasing_output";
class LegalizedOpOrValLocNameMapper : public OpOrArgLocNameMapper {
private:
std::string GetName(OpOrVal op_or_val) override {
std::string name = OpOrArgLocNameMapper::GetName(op_or_val);
assert(!name.empty() && "expected non-empty name");
mlir::LegalizeNodeName(name);
return name;
}
};
Operation* GetIslandInnerOpOrSelf(mlir::Operation* op) {
auto island = llvm::dyn_cast<mlir::tf_executor::IslandOp>(op);
if (island) return &island.GetBody().front();
return op;
}
class Exporter {
public:
static Status Convert(mlir::ModuleOp module, const GraphExportConfig& configs,
std::unique_ptr<Graph>* graph,
FunctionLibraryDefinition* flib_def,
absl::flat_hash_set<Node*>* control_ret_nodes);
static Status ConvertLibFunction(
const GraphExportConfig& configs, const Dialect* tf_dialect,
const SymbolTable& symbol_table, FuncOp function,
FunctionLibraryDefinition* flib_def,
llvm::SmallDenseSet<FuncOp>& visited_functions);
static absl::StatusOr<std::unique_ptr<Graph>> Convert(
const GraphExportConfig& configs, const Dialect* tf_dialect,
const SymbolTable& symbol_table, FuncOp function,
FunctionLibraryDefinition* flib_def,
llvm::SmallDenseSet<FuncOp>& visited_functions,
absl::flat_hash_set<Node*>* control_ret_nodes);
private:
explicit Exporter(const GraphExportConfig* configs, Graph* graph,
const Dialect* tf_dialect, const SymbolTable* symbol_table)
: configs_(*configs),
graph_(graph),
tf_dialect_(tf_dialect),
symbol_table_(*symbol_table) {
graph_->ToGraphDef(&graphdef_);
}
Status AddArgumentNode(BlockArgument arg, unsigned index,
llvm::StringRef name);
Status AddFetchNode(FuncOp function, mlir::tf_executor::FetchOp fetch,
llvm::ArrayRef<llvm::StringRef> names);
Status AddInstructionNode(Operation* inst);
void UseOriginalFunctionNames(NodeDef& node_def);
Status AddEdge(Operation* inst);
absl::StatusOr<std::unique_ptr<NodeDef>> GetArgumentNode(
BlockArgument arg, unsigned index, llvm::StringRef name);
absl::StatusOr<std::unique_ptr<NodeDef>> GetReturnNode(FuncOp function,
Value operand,
unsigned index,
llvm::StringRef name);
Status GetControlRetNodes(mlir::tf_executor::FetchOp fetch,
absl::flat_hash_set<Node*>* control_ret_nodes);
Status AddEdgeBetweenNodes(Value src, Node* dst_node, unsigned dst_index);
const GraphExportConfig& configs_;
Graph* graph_;
GraphDef graphdef_;
LegalizedOpOrValLocNameMapper op_to_name_;
absl::flat_hash_map<Operation*, Node*> nodes_;
llvm::DenseMap<BlockArgument, Node*> args_;
typedef absl::InlinedVector<Node*, 4> NodeVector;
absl::flat_hash_map<Operation*, NodeVector> returns_;
const mlir::Dialect* tf_dialect_;
const SymbolTable& symbol_table_;
};
std::string FindFunctionName(const GraphExportConfig& configs, FuncOp func) {
if (auto original_func_name =
func->getAttrOfType<mlir::StringAttr>("tf._original_func_name");
configs.export_original_tf_func_name && original_func_name) {
return original_func_name.str();
}
return func.getName().str();
}
absl::StatusOr<std::unique_ptr<NodeDef>> Exporter::GetArgumentNode(
BlockArgument arg, unsigned index, llvm::StringRef name) {
auto func = arg.getParentRegion()->getParentOfType<FuncOp>();
auto node_def = std::make_unique<NodeDef>();
if (!name.empty())
node_def->set_name(std::string(ParseTensorName(name.str()).node()));
else
node_def->set_name(
std::string(op_to_name_.GetUniqueName(func.getName().str())));
node_def->set_op(FunctionLibraryDefinition::kArgOp);
mlir::TensorType arg_type = mlir::cast<mlir::TensorType>(arg.getType());
if (auto resource_type =
mlir::dyn_cast<mlir::TF::ResourceType>(arg_type.getElementType())) {
llvm::ArrayRef<mlir::TensorType> subtypes = resource_type.getSubtypes();
if (!subtypes.empty()) {
AttrValue handle_dtypes_attr;
AttrValue handle_shapes_attr;
for (mlir::TensorType subtype : subtypes) {
DataType dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(subtype.getElementType(), &dtype));
handle_dtypes_attr.mutable_list()->add_type(dtype);
SetTensorShapeProto(subtype,
handle_shapes_attr.mutable_list()->add_shape());
}
(*node_def->mutable_attr())["_handle_dtypes"] = handle_dtypes_attr;
(*node_def->mutable_attr())["_handle_shapes"] = handle_shapes_attr;
}
}
TF_RETURN_IF_ERROR(
SetShapeAttribute("_output_shapes", arg_type, node_def->mutable_attr()));
DataType dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(arg_type.getElementType(), &dtype));
AttrValue type_attr;
type_attr.set_type(dtype);
(*node_def->mutable_attr())["T"] = type_attr;
AttrValue index_attr;
index_attr.set_i(index);
(*node_def->mutable_attr())["index"] = index_attr;
if (auto device_attr =
func.getArgAttrOfType<mlir::StringAttr>(index, kDeviceAttr))
*node_def->mutable_device() = device_attr.getValue().str();
llvm::ArrayRef<mlir::NamedAttribute> func_arg_i_attrs =
mlir::function_interface_impl::getArgAttrs(func, index);
absl::flat_hash_set<absl::string_view> attrs_to_ignore = {kDeviceAttr,
kAliasingAttr};
TF_RETURN_IF_ERROR(ConvertAttributes(func_arg_i_attrs, attrs_to_ignore,
false,
node_def->mutable_attr()));
return node_def;
}
absl::StatusOr<std::unique_ptr<NodeDef>> Exporter::GetReturnNode(
FuncOp function, Value operand, unsigned index, llvm::StringRef name) {
auto node_def = std::make_unique<NodeDef>();
if (!name.empty())
node_def->set_name(std::string(ParseTensorName(name.str()).node()));
else
node_def->set_name(
std::string(op_to_name_.GetUniqueName(function.getName().str())));
node_def->set_op(FunctionLibraryDefinition::kRetOp);
DataType dtype;
TF_RETURN_IF_ERROR(ConvertToDataType(
mlir::cast<mlir::TensorType>(operand.getType()).getElementType(),
&dtype));
AttrValue type_attr;
type_attr.set_type(dtype);
(*node_def->mutable_attr())["T"] = type_attr;
AttrValue index_attr;
index_attr.set_i(index);
(*node_def->mutable_attr())["index"] = index_attr;
if (auto device_attr =
function.getResultAttrOfType<mlir::StringAttr>(index, kDeviceAttr))
*node_def->mutable_device() = device_attr.getValue().str();
llvm::ArrayRef<mlir::NamedAttribute> func_res_i_attrs =
function.getResultAttrs(index);
absl::flat_hash_set<absl::string_view> attrs_to_ignore = {kDeviceAttr};
TF_RETURN_IF_ERROR(ConvertAttributes(func_res_i_attrs, attrs_to_ignore,
false,
node_def->mutable_attr()));
return node_def;
}
Status Exporter::AddEdgeBetweenNodes(Value src, Node* dst_node,
unsigned dst_index) {
if (auto input_result = mlir::dyn_cast<mlir::OpResult>(src)) {
auto* input_inst = GetIslandInnerOpOrSelf(input_result.getOwner());
if (auto next_iter_source =
llvm::dyn_cast<mlir::tf_executor::NextIterationSourceOp>(
input_inst))
input_inst = next_iter_source.GetSink();
auto node_it = nodes_.find(input_inst);
TF_RET_CHECK(node_it != nodes_.end())
<< "Use of OpResult encountered before def!";
if (mlir::isa<mlir::tf_executor::ControlType>(input_result.getType())) {
graph_->AddControlEdge(node_it->second, dst_node,
true);
} else {
graph_->AddEdge(node_it->second, input_result.getResultNumber(), dst_node,
dst_index);
}
return absl::OkStatus();
}
auto input_arg = mlir::cast<BlockArgument>(src);
auto input_node_it = args_.find(input_arg);
TF_RET_CHECK(input_node_it != args_.end())
<< "Use of BlockArgument encounted before def!";
graph_->AddEdge(input_node_it->second, 0, dst_node, dst_index);
return absl::OkStatus();
}
Status Exporter::AddEdge(Operation* inst) {
if (auto fetch = llvm::dyn_cast<mlir::tf_executor::FetchOp>(inst)) {
for (auto operand_and_idx : llvm::enumerate(fetch.getOperands())) {
Value operand = operand_and_idx.value();
if (mlir::isa<mlir::tf_executor::ControlType>(operand.getType())) break;
auto* dst_node = returns_[fetch][operand_and_idx.index()];
TF_RETURN_IF_ERROR(AddEdgeBetweenNodes(operand, dst_node, 0));
}
return absl::OkStatus();
}
if (auto next_iter_sink =
llvm::dyn_cast<mlir::tf_executor::NextIterationSinkOp>(inst)) {
auto* dst_node = nodes_[inst];
TF_RETURN_IF_ERROR(
AddEdgeBetweenNodes(next_iter_sink.getInput(), dst_node, 0));
for (auto control_and_idx :
llvm::enumerate(next_iter_sink.getControlInputs()))
TF_RETURN_IF_ERROR(AddEdgeBetweenNodes(control_and_idx.value(), dst_node,
control_and_idx.index() + 1));
return absl::OkStatus();
}
if (llvm::isa<mlir::tf_executor::NextIterationSourceOp>(inst)) {
assert(inst->getNumOperands() == 0);
return absl::OkStatus();
}
Operation* op = GetIslandInnerOpOrSelf(inst);
auto* dst_node = nodes_[op];
int operand_offset = 0;
if (auto island = llvm::dyn_cast<mlir::tf_executor::IslandOp>(inst)) {
for (auto operand_and_idx : llvm::enumerate(op->getOperands()))
TF_RETURN_IF_ERROR(AddEdgeBetweenNodes(operand_and_idx.value(), dst_node,
operand_and_idx.index()));
operand_offset = op->getNumOperands();
}
for (auto operand_and_idx : llvm::enumerate(inst->getOperands()))
TF_RETURN_IF_ERROR(
AddEdgeBetweenNodes(operand_and_idx.value(), dst_node,
operand_and_idx.index() + operand_offset));
return absl::OkStatus();
}
void Exporter::UseOriginalFunctionNames(NodeDef& node_def) {
if (!configs_.export_original_tf_func_name) return;
auto& attrs = *node_def.mutable_attr();
auto try_use_original_func_name = [this](std::string* name) {
if (auto func = symbol_table_.lookup<FuncOp>(*name)) {
if (auto original_func_name =
func->getAttrOfType<mlir::StringAttr>("tf._original_func_name")) {
*name = original_func_name.str();
}
}
};
try_use_original_func_name(node_def.mutable_op());
for (auto& iter : attrs) {
auto& attr = iter.second;
if (attr.has_func()) {
try_use_original_func_name(attr.mutable_func()->mutable_name());
} else if (attr.has_list()) {
for (auto& func_attr : *attr.mutable_list()->mutable_func()) {
try_use_original_func_name(func_attr.mutable_name());
}
}
}
}
Status Exporter::AddInstructionNode(Operation* inst) {
std::unique_ptr<NodeDef> node_def;
int graph_hash_value = graph_regularization::ComputeHash(graphdef_);
auto name = op_to_name_.GetUniqueName(inst, graph_hash_value);
TF_ASSIGN_OR_RETURN(node_def,
ConvertTFDialectOpToNodeDef(
inst, name, false));
UseOriginalFunctionNames(*node_def);
TF_ASSIGN_OR_RETURN(Node * node, graph_->AddNode(std::move(*node_def)));
DCHECK(node != nullptr);
nodes_[inst] = node;
return absl::OkStatus();
}
bool IsEntryFunctionArg(BlockArgument arg) {
return arg.getParentRegion()->getParentOfType<FuncOp>().getName() == "main";
}
Status Exporter::AddArgumentNode(BlockArgument arg, unsigned index,
llvm::StringRef name) {
TF_ASSIGN_OR_RETURN(auto node_def, GetArgumentNode(arg, index, name));
TF_ASSIGN_OR_RETURN(Node * node, graph_->AddNode(std::move(*node_def)));
args_[arg] = node;
return absl::OkStatus();
}
Status Exporter::AddFetchNode(FuncOp function, mlir::tf_executor::FetchOp fetch,
llvm::ArrayRef<llvm::StringRef> names) {
auto& return_nodes = returns_[fetch];
for (auto operand_and_idx : llvm::enumerate(fetch.getOperands())) {
if (mlir::isa<mlir::tf_executor::ControlType>(
operand_and_idx.value().getType()))
break;
TF_ASSIGN_OR_RETURN(
auto node_def,
GetReturnNode(function, operand_and_idx.value(),
operand_and_idx.index(),
names.empty() ? "" : names[operand_and_idx.index()]));
TF_ASSIGN_OR_RETURN(Node * node, graph_->AddNode(std::move(*node_def)));
return_nodes.push_back(node);
}
return absl::OkStatus();
}
Status Exporter::GetControlRetNodes(
mlir::tf_executor::FetchOp fetch,
absl::flat_hash_set<Node*>* control_ret_nodes) {
for (Value fetch_operand : fetch.getOperands()) {
if (mlir::isa<mlir::tf_executor::ControlType>(fetch_operand.getType())) {
Operation* defining_op =
GetIslandInnerOpOrSelf(fetch_operand.getDefiningOp());
auto node_it = nodes_.find(defining_op);
TF_RET_CHECK(node_it != nodes_.end());
control_ret_nodes->insert(node_it->second);
}
}
return absl::OkStatus();
}
void FixupInputNamesFromEdges(Graph* graph) {
for (Node* n : graph->nodes()) {
if (n->IsOp()) {
NodeDef* node_def = n->mutable_def();
node_def->clear_input();
for (const Edge* e : n->in_edges()) {
Node* src = e->src();
if (src->IsOp()) {
Graph::AddInput(node_def, src->name(), e->src_output());
}
}
}
}
}
absl::StatusOr<std::unique_ptr<Graph>> Exporter::Convert(
const GraphExportConfig& configs, const Dialect* tf_dialect,
const SymbolTable& symbol_table, FuncOp function,
FunctionLibraryDefinition* flib_def,
llvm::SmallDenseSet<FuncOp>& visited_functions,
absl::flat_hash_set<Node*>* control_ret_nodes) {
mlir::Block& block = function.front();
llvm::SmallVector<llvm::StringRef, 2> input_names;
llvm::SmallVector<llvm::StringRef, 2> output_names;
llvm::SmallVector<llvm::StringRef, 2> unique_output_names;
auto dict_attr =
function->getAttrOfType<mlir::DictionaryAttr>(kEntryFuncAttr);
if (dict_attr) {
TF_RET_CHECK(mlir::isa<mlir::StringAttr>(dict_attr.get("inputs")))
<< "inputs missing in entry function attribute";
TF_RET_CHECK(mlir::isa<mlir::StringAttr>(dict_attr.get("outputs")))
<< "outputs missing in entry function attribute";
mlir::cast<mlir::StringAttr>(dict_attr.get("inputs"))
.getValue()
.split(input_names, ',', -1, false);
mlir::cast<mlir::StringAttr>(dict_attr.get("outputs"))
.getValue()
.split(output_names, ',', -1, false);
}
auto graph = std::make_unique<Graph>(OpRegistry::Global());
VersionDef versions;
auto module = function->getParentOfType<mlir::ModuleOp>();
if (mlir::succeeded(ExtractTfVersions(module, &versions))) {
graph->set_versions(versions);
}
Exporter exporter(&configs, graph.get(), tf_dialect, &symbol_table);
auto graph_op = llvm::cast<mlir::tf_executor::GraphOp>(block.front());
if (!output_names.empty()) {
const int num_data_results = graph_op.getNumResults();
const int64_t output_names_size = output_names.size();
TF_RET_CHECK(output_names_size == num_data_results)
<< "output names (" << output_names.size()
<< ") != terminator operands (" << num_data_results << ")";
llvm::DenseMap<Operation*, llvm::StringRef> output_op_to_name;
llvm::StringMap<Operation*> name_to_op;
for (const auto& it : llvm::enumerate(graph_op.GetFetch().getOperands())) {
const int64_t index = it.index();
if (index >= num_data_results) break;
std::string name(output_names[index]);
auto tensor_id = ParseTensorName(name);
std::string tensor_id_node(tensor_id.node());
assert(!tensor_id_node.empty() && "expected non-empty name");
mlir::LegalizeNodeName(tensor_id_node);
unique_output_names.push_back(
exporter.op_to_name_.GetUniqueName(tensor_id_node));
}
}
if (!input_names.empty()) {
TF_RET_CHECK(input_names.size() == block.getNumArguments());
for (const auto& it : llvm::enumerate(function.getArguments())) {
std::string name(input_names[it.index()]);
assert(!name.empty() && "expected non-empty name");
mlir::LegalizeNodeName(name);
auto tensor_id = ParseTensorName(name);
TF_RET_CHECK(tensor_id.index() == 0)
<< "input port designation not supported";
(void)exporter.op_to_name_.GetUniqueName(name);
}
}
for (auto it : llvm::enumerate(block.getArguments())) {
int index = it.index();
auto arg = it.value();
mlir::Type type = arg.getType();
if (!mlir::isa<mlir::TensorType>(type)) {
return errors::InvalidArgument(
"FuncOps arguments must have tensor types. Found ",
mlir::debugString(type), " in function ", function.getName().str());
}
TF_RETURN_IF_ERROR(exporter.AddArgumentNode(
arg, index, !input_names.empty() ? input_names[index] : ""));
}
auto convert_called_function = [&](llvm::StringRef name) {
auto func = symbol_table.lookup<FuncOp>(name);
if (func != nullptr) {
TF_RETURN_IF_ERROR(ConvertLibFunction(configs, tf_dialect, symbol_table,
func, flib_def, visited_functions));
TF_RETURN_IF_ERROR(graph->mutable_flib_def()->AddLibrary(*flib_def));
}
return absl::OkStatus();
};
for (Operation& inst : graph_op.GetBody()) {
for (auto type : inst.getResultTypes())
if (!mlir::isa<mlir::TensorType, mlir::tf_executor::ControlType,
mlir::tf_executor::TokenType>(type))
return errors::InvalidArgument(
"Values must be of tensor type, TensorFlow control type, or "
"TensorFlow token type. Found ",
mlir::debugString(type));
if (llvm::isa<mlir::tf_executor::NextIterationSourceOp>(inst)) {
continue;
} else if (auto fetch = llvm::dyn_cast<mlir::tf_executor::FetchOp>(inst)) {
TF_RETURN_IF_ERROR(
exporter.AddFetchNode(function, fetch, unique_output_names));
} else if (auto island =
llvm::dyn_cast<mlir::tf_executor::IslandOp>(inst)) {
Operation& inner_op = island.GetBody().front();
auto op_name = GetTensorFlowOpName(inner_op.getName().getStringRef());
if (llvm::isa<FuncOp>(inner_op) && op_name.ok()) {
TF_RETURN_IF_ERROR(convert_called_function(op_name.value().str()));
}
if (IsLegacyCallInstruction(&inner_op)) {
TF_RETURN_IF_ERROR(convert_called_function(
inner_op.getAttrOfType<mlir::SymbolRefAttr>("f")
.getLeafReference()
.getValue()));
}
TF_RETURN_IF_ERROR(exporter.AddInstructionNode(&inner_op));
} else {
TF_RETURN_IF_ERROR(exporter.AddInstructionNode(&inst));
}
}
for (Operation& inst : graph_op.GetBody()) {
TF_RETURN_IF_ERROR(exporter.AddEdge(&inst));
}
FixupSourceAndSinkEdges(graph.get());
FixupInputNamesFromEdges(graph.get());
TF_RETURN_IF_ERROR(
exporter.GetControlRetNodes(graph_op.GetFetch(), control_ret_nodes));
return graph;
}
Status Exporter::ConvertLibFunction(
const GraphExportConfig& configs, const Dialect* tf_dialect,
const SymbolTable& symbol_table, FuncOp function,
FunctionLibraryDefinition* flib_def,
llvm::SmallDenseSet<FuncOp>& visited_functions) {
bool is_new_function = visited_functions.insert(function).second;
if (!is_new_function) return absl::OkStatus();
auto function_name = FindFunctionName(configs, function);
absl::flat_hash_set<Node*> control_ret_nodes;
TF_ASSIGN_OR_RETURN(
auto sub_graph,
Exporter::Convert(configs, tf_dialect, symbol_table, function, flib_def,
visited_functions, &control_ret_nodes));
const auto control_ret = [&](const Node* n) -> std::optional<string> {
return control_ret_nodes.contains(n) ? std::make_optional<string>(n->name())
: std::nullopt;
};
FunctionDef func_def;
TF_RETURN_IF_ERROR(
GraphToFunctionDef(*sub_graph, function_name, control_ret, &func_def));
auto grad_string = mlir::TF::TensorFlowDialect::GetGradientAttrName();
if (auto attr =
function->getAttrOfType<mlir::FlatSymbolRefAttr>(grad_string)) {
auto grad_func = symbol_table.lookup<FuncOp>(attr.getValue());
TF_RETURN_IF_ERROR(ConvertLibFunction(configs, tf_dialect, symbol_table,
grad_func, flib_def,
visited_functions));
GradientDef grad;
grad.set_function_name(function_name);
grad.set_gradient_func(grad_func.getName().str());
TF_RETURN_IF_ERROR(flib_def->AddGradientDef(grad));
}
auto stateful_string = mlir::TF::TensorFlowDialect::GetStatefulAttrName();
if (auto attr = function->getAttrOfType<mlir::UnitAttr>(stateful_string)) {
func_def.mutable_signature()->set_is_stateful(true);
}
absl::flat_hash_set<absl::string_view> attrs_to_ignore = {
grad_string.data(), stateful_string.data(), kEntryFuncAttr};
llvm::SmallVector<mlir::NamedAttribute, 8> funcAttrs(
function->getDialectAttrs());
TF_RETURN_IF_ERROR(ConvertAttributes(funcAttrs, attrs_to_ignore,
false,
func_def.mutable_attr()));
for (int i = 0, e = function.getNumArguments(); i < e; ++i) {
if (auto resource_arg_unique_id_attr =
function.getArgAttrOfType<mlir::IntegerAttr>(
i, kResourceArgUniqueIdAttr)) {
(*func_def.mutable_resource_arg_unique_id())[i] =
resource_arg_unique_id_attr.getInt();
}
}
return flib_def->AddFunctionDef(std::move(func_def));
}
Status Exporter::Convert(mlir::ModuleOp module,
const GraphExportConfig& configs,
std::unique_ptr<Graph>* graph,
FunctionLibraryDefinition* flib_def,
absl::flat_hash_set<Node*>* control_ret_nodes) {
mlir::StringAttr entry_func_id =
mlir::StringAttr::get(module.getContext(), "main");
std::optional<FuncOp> entry_func;
FunctionLibraryDefinition temp_flib_def(OpRegistry::Global(),
FunctionDefLibrary());
llvm::SmallDenseSet<FuncOp> visited_functions;
auto tf_dialect = module.getContext()->getLoadedDialect("tf");
SymbolTable symbol_table(module);
for (auto function : module.getOps<FuncOp>()) {
if (function.isExternal())
return errors::FailedPrecondition("External functions not supported");
if (function.getName() == entry_func_id &&
!configs.export_entry_func_to_flib) {
entry_func.emplace(function);
} else {
TF_RETURN_IF_ERROR(ConvertLibFunction(configs, tf_dialect, symbol_table,
function, &temp_flib_def,
visited_functions));
}
}
if (flib_def != nullptr) {
TF_RETURN_IF_ERROR(flib_def->AddLibrary(temp_flib_def));
}
if (!configs.export_entry_func_to_flib) {
if (!entry_func.has_value())
return errors::FailedPrecondition(
"entry function `main` must be present");
TF_ASSIGN_OR_RETURN(
*graph, Exporter::Convert(configs, tf_dialect, symbol_table,
entry_func.value(), &temp_flib_def,
visited_functions, control_ret_nodes));
TF_RETURN_IF_ERROR(
graph->get()->mutable_flib_def()->AddLibrary(temp_flib_def));
} else if (graph != nullptr) {
TF_RETURN_IF_ERROR(
graph->get()->mutable_flib_def()->AddLibrary(std::move(*flib_def)));
}
return absl::OkStatus();
}
}
Status ConvertTfExecutorToGraph(mlir::ModuleOp module,
const GraphExportConfig& configs,
std::unique_ptr<Graph>* graph,
FunctionLibraryDefinition* flib_def,
absl::flat_hash_set<Node*>* control_ret_nodes) {
mlir::StatusScopedDiagnosticHandler sh(module.getContext());
if (failed(VerifyExportSuitable(module))) return sh.ConsumeStatus();
return sh.Combine(
Exporter::Convert(module, configs, graph, flib_def, control_ret_nodes));
}
absl::Status ConvertMlirFunctionToFunctionLibraryDef(
FuncOp func, const GraphExportConfig& configs, FunctionDef* function_def) {
Dialect* tf_dialect = func.getContext()->getLoadedDialect("tf");
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
llvm::SmallDenseSet<FuncOp> visited_functions;
SymbolTable symbol_table(func->getParentOfType<mlir::ModuleOp>());
TF_RETURN_IF_ERROR(Exporter::ConvertLibFunction(
configs, tf_dialect, symbol_table, func, &flib_def, visited_functions));
auto name = FindFunctionName(configs, func);
const FunctionDef* func_def = flib_def.Find(name);
if (func_def != nullptr) {
*function_def = *func_def;
return absl::OkStatus();
}
return absl::InvalidArgumentError(
absl::StrCat("Function '", name,
"' couldn't be found in the FunctionDefLibrary after "
"converting from MLIR"));
}
}
}
} | #include "tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.h"
#include <stdlib.h>
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Parser/Parser.h"
#include "riegeli/bytes/fd_reader.h"
#include "riegeli/bytes/read_all.h"
#include "tensorflow/compiler/mlir/register_common_dialects.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tsl/platform/protobuf.h"
namespace tensorflow {
namespace tf2xla {
namespace v2 {
namespace {
using mlir::DialectRegistry;
using mlir::MLIRContext;
using mlir::ModuleOp;
using mlir::OwningOpRef;
std::string TestDataPath() {
return tensorflow::GetDataDependencyFilepath(
"tensorflow/compiler/mlir/tf2xla/api/v2/testdata/");
}
class TfExecutorToGraphTest : public ::testing::Test {
public:
TfExecutorToGraphTest() {
mlir::RegisterCommonToolingDialects(registry_);
context_.appendDialectRegistry(registry_);
context_.loadAllAvailableDialects();
}
absl::StatusOr<OwningOpRef<mlir::ModuleOp>> CreateMlirModule(
std::string mlir_module_filename) {
std::string mlir_module_path = TestDataPath() + mlir_module_filename;
return mlir::parseSourceFile<mlir::ModuleOp>(mlir_module_path, &context_);
}
GraphDef CreateGraphDef(std::string graphdef_filename) {
std::string file_path = TestDataPath() + graphdef_filename;
std::string contents;
GraphDef graph_def;
auto status = riegeli::ReadAll(riegeli::FdReader(file_path), contents);
if (!status.ok()) {
return graph_def;
}
tsl::protobuf::TextFormat::ParseFromString(contents, &graph_def);
return graph_def;
}
DialectRegistry registry_;
MLIRContext context_;
OwningOpRef<mlir::ModuleOp> mlir_module_;
};
TEST_F(TfExecutorToGraphTest, ConvertMlirToGraphSucceeds) {
auto valid_executor_module = CreateMlirModule("valid_executor.mlir");
GraphExportConfig confs;
absl::flat_hash_set<Node*> control_ret_nodes;
FunctionLibraryDefinition flib_def(OpRegistry::Global(),
FunctionDefLibrary());
auto result_graph = std::make_unique<Graph>(flib_def);
TF_ASSERT_OK(ConvertTfExecutorToGraph(valid_executor_module.value().get(),
confs, &result_graph, &flib_def,
&control_ret_nodes));
GraphDef result_graphdef;
result_graph->ToGraphDef(&result_graphdef);
GraphDef expected_graphdef = CreateGraphDef("valid_graph.txt");
EXPECT_EQ(result_graphdef.DebugString(), expected_graphdef.DebugString());
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/tf2xla/api/v2/tf_executor_to_graph_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
7f0424d3-3804-43b2-8e39-74cec9ac5e61 | cpp | tensorflow/tensorflow | conv_canonicalization | third_party/xla/xla/service/cpu/conv_canonicalization.cc | third_party/xla/xla/service/cpu/conv_canonicalization_test.cc | #include "xla/service/cpu/conv_canonicalization.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/cpu/cpu_runtime.h"
#include "xla/service/cpu/ir_emission_utils.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace cpu {
absl::StatusOr<bool> ConvCanonicalization::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloInstruction* hlo :
module->entry_computation()->MakeInstructionPostOrder()) {
if (hlo->opcode() == HloOpcode::kConvolution &&
!PotentiallyImplementedAsEigenConvolution(*hlo,
target_machine_features_)) {
const ConvolutionDimensionNumbers& dnums =
hlo->convolution_dimension_numbers();
auto input_batch_dim = dnums.input_batch_dimension();
auto input_feature_dim = dnums.input_feature_dimension();
auto kernel_input_feature_dim = dnums.kernel_input_feature_dimension();
auto kernel_output_feature_dim = dnums.kernel_output_feature_dimension();
const int64_t num_spatial_dims = dnums.output_spatial_dimensions_size();
const int64_t num_dims = num_spatial_dims + 2;
HloInstruction* input = hlo->mutable_operand(0);
std::vector<int64_t> new_input_dim_order(num_dims);
std::vector<int64_t> new_input_dims(num_dims);
new_input_dim_order[0] = input_batch_dim;
new_input_dims[0] = input->shape().dimensions(input_batch_dim);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_input_dim_order[i + 1] = dnums.input_spatial_dimensions(i);
new_input_dims[i + 1] =
input->shape().dimensions(dnums.input_spatial_dimensions(i));
}
new_input_dim_order[num_dims - 1] = input_feature_dim;
new_input_dims[num_dims - 1] =
input->shape().dimensions(input_feature_dim);
Shape new_input_shape =
ShapeUtil::MakeShape(input->shape().element_type(), new_input_dims);
HloInstruction* new_input = module->entry_computation()->AddInstruction(
HloInstruction::CreateTranspose(new_input_shape, input,
new_input_dim_order));
HloInstruction* kernel = hlo->mutable_operand(1);
std::vector<int64_t> new_kernel_dim_order(num_dims);
std::vector<int64_t> new_kernel_dims(num_dims);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_kernel_dim_order[i] = dnums.kernel_spatial_dimensions(i);
new_kernel_dims[i] =
kernel->shape().dimensions(dnums.kernel_spatial_dimensions(i));
}
new_kernel_dim_order[num_dims - 2] = kernel_input_feature_dim;
new_kernel_dims[num_dims - 2] =
kernel->shape().dimensions(kernel_input_feature_dim);
new_kernel_dim_order[num_dims - 1] = kernel_output_feature_dim;
new_kernel_dims[num_dims - 1] =
kernel->shape().dimensions(kernel_output_feature_dim);
Shape new_kernel_shape =
ShapeUtil::MakeShape(kernel->shape().element_type(), new_kernel_dims);
HloInstruction* new_kernel = module->entry_computation()->AddInstruction(
HloInstruction::CreateTranspose(new_kernel_shape, kernel,
new_kernel_dim_order));
std::vector<int64_t> new_output_dim_order(num_dims);
std::vector<int64_t> new_conv_dims(num_dims);
auto output_batch_dim = dnums.output_batch_dimension();
auto output_feature_dim = dnums.output_feature_dimension();
new_output_dim_order[0] = output_batch_dim;
new_conv_dims[0] = hlo->shape().dimensions(output_batch_dim);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_output_dim_order[i + 1] = dnums.output_spatial_dimensions(i);
new_conv_dims[i + 1] =
hlo->shape().dimensions(dnums.output_spatial_dimensions(i));
}
new_output_dim_order[num_dims - 1] = output_feature_dim;
new_conv_dims[num_dims - 1] = hlo->shape().dimensions(output_feature_dim);
Shape new_conv_shape =
ShapeUtil::MakeShape(hlo->shape().element_type(), new_conv_dims);
ConvolutionDimensionNumbers new_dnums;
new_dnums.set_input_batch_dimension(0);
new_dnums.set_output_batch_dimension(0);
for (int64_t i = 0; i < num_spatial_dims; ++i) {
new_dnums.add_input_spatial_dimensions(i + 1);
new_dnums.add_kernel_spatial_dimensions(i);
new_dnums.add_output_spatial_dimensions(i + 1);
}
new_dnums.set_input_feature_dimension(num_dims - 1);
new_dnums.set_output_feature_dimension(num_dims - 1);
new_dnums.set_kernel_input_feature_dimension(num_dims - 2);
new_dnums.set_kernel_output_feature_dimension(num_dims - 1);
HloInstruction* new_conv = module->entry_computation()->AddInstruction(
HloInstruction::CreateConvolve(
new_conv_shape, new_input, new_kernel, hlo->feature_group_count(),
hlo->batch_group_count(), hlo->window(), new_dnums,
hlo->precision_config()));
TF_RETURN_IF_ERROR(module->entry_computation()->ReplaceWithNewInstruction(
hlo, HloInstruction::CreateTranspose(
hlo->shape(), new_conv,
InversePermutation(new_output_dim_order))));
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/cpu/conv_canonicalization.h"
#include <vector>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/cpu/target_machine_features_fake.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
namespace xla {
namespace cpu {
using ::testing::ElementsAre;
class ConvCanonicalizationTest : public HloTestBase {
public:
ConvCanonicalizationTest() {
for (int i = 0; i < 2; ++i) {
auto dim = conv_window_.add_dimensions();
dim->set_size(kWindowSize);
dim->set_stride(1);
dim->set_padding_low(0);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
}
}
protected:
Window conv_window_;
static constexpr int kBatchSize = 50;
static constexpr int kInputSize = 28;
static constexpr int kWindowSize = 5;
static constexpr int kInputFeatureCount = 32;
static constexpr int kOutputFeatureCount = 64;
};
TEST_F(ConvCanonicalizationTest, NonCanonicalToCanonical) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kInputFeatureCount, kBatchSize, kInputSize, kInputSize))));
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kOutputFeatureCount, kInputFeatureCount, kWindowSize, kWindowSize))));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(1);
dnums.set_output_batch_dimension(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.add_input_spatial_dimensions(3);
dnums.add_output_spatial_dimensions(3);
dnums.set_input_feature_dimension(0);
dnums.set_output_feature_dimension(0);
dnums.add_kernel_spatial_dimensions(2);
dnums.add_kernel_spatial_dimensions(3);
dnums.set_kernel_input_feature_dimension(1);
dnums.set_kernel_output_feature_dimension(0);
auto output_size = kInputSize - kWindowSize + 1;
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(
F32, {kOutputFeatureCount, kBatchSize, output_size, output_size}),
input, kernel, 1, 1,
conv_window_, dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
ConvCanonicalization conv_canonicalization(&target_machine_features);
EXPECT_TRUE(conv_canonicalization.Run(module.get()).value());
const HloInstruction* output_reshape = entry_computation->root_instruction();
EXPECT_EQ(HloOpcode::kTranspose, output_reshape->opcode());
const HloInstruction* canonical_conv = output_reshape->operand(0);
EXPECT_EQ(HloOpcode::kConvolution, canonical_conv->opcode());
const HloInstruction* input_reshape = canonical_conv->operand(0);
EXPECT_EQ(HloOpcode::kTranspose, input_reshape->opcode());
const HloInstruction* kernel_reshape = canonical_conv->operand(1);
EXPECT_EQ(HloOpcode::kTranspose, kernel_reshape->opcode());
EXPECT_THAT(input_reshape->dimensions(), ElementsAre(1, 2, 3, 0));
EXPECT_THAT(kernel_reshape->dimensions(), ElementsAre(2, 3, 1, 0));
EXPECT_THAT(output_reshape->dimensions(), ElementsAre(3, 0, 1, 2));
}
TEST_F(ConvCanonicalizationTest, CanonicalStaysTheSame) {
auto builder = HloComputation::Builder(TestName());
auto input = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kBatchSize, kInputSize, kInputSize, kInputFeatureCount))));
auto kernel = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR4FromArray4D(Array4D<float>(
kWindowSize, kWindowSize, kInputFeatureCount, kOutputFeatureCount))));
ConvolutionDimensionNumbers dnums;
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(0);
dnums.add_input_spatial_dimensions(1);
dnums.add_output_spatial_dimensions(1);
dnums.add_input_spatial_dimensions(2);
dnums.add_output_spatial_dimensions(2);
dnums.set_input_feature_dimension(3);
dnums.set_output_feature_dimension(3);
dnums.add_kernel_spatial_dimensions(0);
dnums.add_kernel_spatial_dimensions(1);
dnums.set_kernel_input_feature_dimension(2);
dnums.set_kernel_output_feature_dimension(3);
auto output_size = kInputSize - kWindowSize + 1;
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(
F32, {kBatchSize, output_size, output_size, kOutputFeatureCount}),
input, kernel, 1, 1,
conv_window_, dnums, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
cpu::TargetMachineFeaturesWithFakeAlignmentLogic target_machine_features(
[](int64_t shape_size) {
return cpu::TargetMachineFeatures::kEigenExpectedTensorAlignment;
});
ConvCanonicalization conv_canonicalization(&target_machine_features);
EXPECT_FALSE(conv_canonicalization.Run(module.get()).value());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/conv_canonicalization.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/conv_canonicalization_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bea3fc56-baf4-4f52-a00f-73076a7b90ad | cpp | tensorflow/tensorflow | fusion_process_dump | third_party/xla/xla/service/gpu/fusion_process_dump.cc | third_party/xla/xla/service/gpu/fusion_process_dump_test.cc | #include "xla/service/gpu/fusion_process_dump.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tools/hlo_module_loader.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
HloInstruction* AddFusionInstruction(HloInstruction* producer,
HloInstruction* consumer,
HloComputation* computation,
std::string_view fusion_name) {
if (consumer->opcode() == HloOpcode::kFusion) {
return consumer;
}
auto kind = HloInstruction::FusionKind::kLoop;
auto fusion_instruction = computation->AddInstruction(
HloInstruction::CreateFusion(consumer->shape(), kind, consumer),
fusion_name);
TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction));
return fusion_instruction;
}
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,
HloComputation* computation,
std::string_view fusion_name) {
HloInstruction* fusion_instruction =
AddFusionInstruction(producer, consumer, computation, fusion_name);
if (producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(producer);
} else {
fusion_instruction->FuseInstruction(producer);
}
if (producer->user_count() == 0) {
TF_CHECK_OK(computation->RemoveInstruction(producer));
}
return fusion_instruction;
}
absl::string_view GetProducerName(const FusionStep& step) {
if (step.has_fusion()) {
return step.fusion().producer_name();
}
if (step.has_update_priority()) {
return step.update_priority().producer_name();
}
if (step.has_producer_ineligible()) {
return step.producer_ineligible().producer_name();
}
LOG(FATAL) << "Producer name not found in the current step.";
}
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromFile(
const std::string& path) {
std::string format = std::string(tsl::io::Extension(path));
std::string data;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), path, &data));
return FusionProcessDump::LoadFromData(data, format);
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromData(
const std::string& data, absl::string_view format) {
FusionProcessDumpProto fusion_process_dump_proto;
if (format == "txt" || format == "pbtxt") {
if (!tsl::protobuf::TextFormat::ParseFromString(
data, &fusion_process_dump_proto)) {
return InvalidArgument("Failed to parse input as HLO protobuf text");
}
} else if (format == "pb") {
if (!fusion_process_dump_proto.ParseFromString(data)) {
return InvalidArgument("Failed to parse input as HLO protobuf binary");
}
} else {
return InvalidArgument(
"Invalid format from file extension: '%s'. Expected: txt, pb, or pbtxt",
format);
}
return FusionProcessDump::LoadFromProto(fusion_process_dump_proto);
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromProto(
const FusionProcessDumpProto& fusion_process_dump_proto) {
TF_ASSIGN_OR_RETURN(
auto module,
LoadModuleFromData(fusion_process_dump_proto.hlo_module_before_fusion(),
"txt"));
se::DeviceDescription gpu_device_info(
fusion_process_dump_proto.gpu_device_info());
absl::flat_hash_map<std::string, HloComputation*>
instruction_name_to_computation_map;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instr : computation->instructions()) {
instruction_name_to_computation_map[instr->name()] = computation;
}
}
return FusionProcessDump(std::move(fusion_process_dump_proto),
std::move(module), std::move(gpu_device_info),
std::move(instruction_name_to_computation_map));
}
HloComputation* FusionProcessDump::GetCurrentComputation() {
return instruction_name_to_computation_map_.at(
GetProducerName(CurrentStep()));
}
HloInstruction* FusionProcessDump::GetInstructionWithName(
absl::string_view name) {
return instruction_name_to_computation_map_[name]->GetInstructionWithName(
name);
}
HloInstruction* FusionProcessDump::GetProducer() {
return GetInstructionWithName(GetProducerName(CurrentStep()));
}
absl::InlinedVector<HloInstruction*, 2> FusionProcessDump::GetConsumers() {
auto& step = CurrentStep();
if (step.has_fusion()) {
return {GetInstructionWithName(step.fusion().consumer_name())};
}
if (step.has_update_priority()) {
absl::InlinedVector<HloInstruction*, 2> consumers;
for (const auto& consumer_name : step.update_priority().consumer_names()) {
consumers.push_back(GetInstructionWithName(consumer_name));
}
return consumers;
}
return {};
}
const FusionStep& FusionProcessDump::CurrentStep() {
CHECK(HasNext());
return fusion_process_dump_proto_.fusion_steps(current_step_idx_);
}
bool FusionProcessDump::HasNext() {
return current_step_idx_ < fusion_process_dump_proto_.fusion_steps_size();
}
void FusionProcessDump::Advance() {
auto step = CurrentStep();
if (step.has_fusion()) {
const auto& fusion_step = step.fusion();
auto* computation = GetCurrentComputation();
HloInstruction* producer =
computation->GetInstructionWithName(fusion_step.producer_name());
HloInstruction* consumer =
computation->GetInstructionWithName(fusion_step.consumer_name());
HloInstruction* fusion =
Fuse(producer, consumer, computation, fusion_step.fusion_name());
instruction_name_to_computation_map_[fusion->name()] = computation;
last_fusion_ = fusion;
}
++current_step_idx_;
}
}
} | #include "xla/service/gpu/fusion_process_dump.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
using FusionProcessDumpTest = HloTestBase;
void AddFusion(FusionProcessDumpProto& dump_proto,
const std::string& fusion_name, const std::string& producer_name,
const std::string& consumer_name) {
auto step = dump_proto.add_fusion_steps();
auto fusion_step = step->mutable_fusion();
fusion_step->set_fusion_name(fusion_name);
fusion_step->set_producer_name(producer_name);
fusion_step->set_consumer_name(consumer_name);
}
TEST_F(FusionProcessDumpTest, MultipleFusionSteps) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
add = f32[] add(p0, p1)
subtract = f32[] subtract(p0, p1)
abs = f32[] abs(subtract)
ROOT multiply = f32[] multiply(add, abs)
})"));
FusionProcessDumpProto dump_proto;
*dump_proto.mutable_gpu_device_info() =
TestGpuDeviceInfo::RTXA6000DeviceInfo().ToGpuProto();
dump_proto.set_hlo_module_before_fusion(
module->ToString(HloPrintOptions::ShortParsable()));
AddFusion(dump_proto, "fusion.1", "subtract", "abs");
AddFusion(dump_proto, "fusion.2", "fusion.1", "multiply");
AddFusion(dump_proto, "fusion.2", "add", "fusion.2");
TF_ASSERT_OK_AND_ASSIGN(auto fusion_process_dump,
FusionProcessDump::LoadFromProto(dump_proto));
fusion_process_dump.Advance();
fusion_process_dump.Advance();
fusion_process_dump.Advance();
EXPECT_FALSE(fusion_process_dump.HasNext());
auto root =
fusion_process_dump.module()->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "fusion.2");
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Multiply(
m::Add(m::Parameter(), m::Parameter()),
m::Abs(m::Subtract(m::Parameter(), m::Parameter())))));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_process_dump.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusion_process_dump_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
dce4203b-7007-428f-b60f-a239e8cc6927 | cpp | google/cel-cpp | timestamp | extensions/protobuf/internal/timestamp.cc | extensions/protobuf/internal/timestamp_test.cc | #include "extensions/protobuf/internal/timestamp.h"
#include <cstdint>
#include "google/protobuf/timestamp.pb.h"
#include "absl/base/optimization.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "extensions/protobuf/internal/is_generated_message.h"
#include "extensions/protobuf/internal/is_message_lite.h"
#include "extensions/protobuf/internal/timestamp_lite.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/message.h"
namespace cel::extensions::protobuf_internal {
absl::StatusOr<absl::Time> UnwrapDynamicTimestampProto(
const google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Timestamp");
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if constexpr (NotMessageLite<google::protobuf::Timestamp>) {
if (IsGeneratedMessage(message)) {
return UnwrapGeneratedTimestampProto(
google::protobuf::DownCastMessage<google::protobuf::Timestamp>(message));
}
}
const auto* reflect = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflect == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* seconds_field =
desc->FindFieldByNumber(google::protobuf::Timestamp::kSecondsFieldNumber);
if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " missing seconds field descriptor"));
}
if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT64)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " has unexpected seconds field type: ",
seconds_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(seconds_field->is_map() ||
seconds_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
seconds_field->name(), " field cardinality: REPEATED"));
}
const auto* nanos_field =
desc->FindFieldByNumber(google::protobuf::Timestamp::kNanosFieldNumber);
if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing nanos field descriptor"));
}
if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT32)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected nanos field type: ", nanos_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
nanos_field->name(), " field cardinality: REPEATED"));
}
return absl::UnixEpoch() +
absl::Seconds(reflect->GetInt64(message, seconds_field)) +
absl::Nanoseconds(reflect->GetInt32(message, nanos_field));
}
absl::Status WrapDynamicTimestampProto(absl::Time value,
google::protobuf::Message& message) {
ABSL_DCHECK_EQ(message.GetTypeName(), "google.protobuf.Timestamp");
const auto* desc = message.GetDescriptor();
if (ABSL_PREDICT_FALSE(desc == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing descriptor"));
}
if constexpr (NotMessageLite<google::protobuf::Timestamp>) {
if (IsGeneratedMessage(message)) {
return WrapGeneratedTimestampProto(
value, google::protobuf::DownCastMessage<google::protobuf::Timestamp>(message));
}
}
const auto* reflect = message.GetReflection();
if (ABSL_PREDICT_FALSE(reflect == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing reflection"));
}
const auto* seconds_field =
desc->FindFieldByNumber(google::protobuf::Timestamp::kSecondsFieldNumber);
if (ABSL_PREDICT_FALSE(seconds_field == nullptr)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " missing seconds field descriptor"));
}
if (ABSL_PREDICT_FALSE(seconds_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT64)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(), " has unexpected seconds field type: ",
seconds_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(seconds_field->is_map() ||
seconds_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
seconds_field->name(), " field cardinality: REPEATED"));
}
const auto* nanos_field =
desc->FindFieldByNumber(google::protobuf::Timestamp::kNanosFieldNumber);
if (ABSL_PREDICT_FALSE(nanos_field == nullptr)) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " missing nanos field descriptor"));
}
if (ABSL_PREDICT_FALSE(nanos_field->cpp_type() !=
google::protobuf::FieldDescriptor::CPPTYPE_INT32)) {
return absl::InternalError(absl::StrCat(
message.GetTypeName(),
" has unexpected nanos field type: ", nanos_field->cpp_type_name()));
}
if (ABSL_PREDICT_FALSE(nanos_field->is_map() || nanos_field->is_repeated())) {
return absl::InternalError(
absl::StrCat(message.GetTypeName(), " has unexpected ",
nanos_field->name(), " field cardinality: REPEATED"));
}
auto duration = value - absl::UnixEpoch();
reflect->SetInt64(&message, seconds_field,
absl::IDivDuration(duration, absl::Seconds(1), &duration));
reflect->SetInt32(&message, nanos_field,
static_cast<int32_t>(absl::IDivDuration(
duration, absl::Nanoseconds(1), &duration)));
return absl::OkStatus();
}
} | #include "extensions/protobuf/internal/timestamp.h"
#include <memory>
#include "google/protobuf/timestamp.pb.h"
#include "google/protobuf/descriptor.pb.h"
#include "absl/memory/memory.h"
#include "absl/time/time.h"
#include "extensions/protobuf/internal/timestamp_lite.h"
#include "internal/testing.h"
#include "google/protobuf/descriptor.h"
#include "google/protobuf/descriptor_database.h"
#include "google/protobuf/dynamic_message.h"
namespace cel::extensions::protobuf_internal {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::Eq;
TEST(Timestamp, GeneratedFromProto) {
EXPECT_THAT(UnwrapGeneratedTimestampProto(google::protobuf::Timestamp()),
IsOkAndHolds(Eq(absl::UnixEpoch())));
}
TEST(Timestamp, CustomFromProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Timestamp::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
EXPECT_THAT(UnwrapDynamicTimestampProto(*factory.GetPrototype(
pool.FindMessageTypeByName("google.protobuf.Timestamp"))),
IsOkAndHolds(Eq(absl::UnixEpoch())));
}
TEST(Timestamp, GeneratedToProto) {
google::protobuf::Timestamp proto;
ASSERT_OK(WrapGeneratedTimestampProto(
absl::UnixEpoch() + absl::Seconds(1) + absl::Nanoseconds(2), proto));
EXPECT_EQ(proto.seconds(), 1);
EXPECT_EQ(proto.nanos(), 2);
}
TEST(Timestamp, CustomToProto) {
google::protobuf::SimpleDescriptorDatabase database;
{
google::protobuf::FileDescriptorProto fd;
google::protobuf::Timestamp::descriptor()->file()->CopyTo(&fd);
ASSERT_TRUE(database.Add(fd));
}
google::protobuf::DescriptorPool pool(&database);
pool.AllowUnknownDependencies();
google::protobuf::DynamicMessageFactory factory(&pool);
factory.SetDelegateToGeneratedFactory(false);
std::unique_ptr<google::protobuf::Message> proto = absl::WrapUnique(
factory
.GetPrototype(pool.FindMessageTypeByName("google.protobuf.Timestamp"))
->New());
const auto* descriptor = proto->GetDescriptor();
const auto* reflection = proto->GetReflection();
const auto* seconds_field = descriptor->FindFieldByName("seconds");
ASSERT_NE(seconds_field, nullptr);
const auto* nanos_field = descriptor->FindFieldByName("nanos");
ASSERT_NE(nanos_field, nullptr);
ASSERT_OK(WrapDynamicTimestampProto(
absl::UnixEpoch() + absl::Seconds(1) + absl::Nanoseconds(2), *proto));
EXPECT_EQ(reflection->GetInt64(*proto, seconds_field), 1);
EXPECT_EQ(reflection->GetInt32(*proto, nanos_field), 2);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/timestamp.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/extensions/protobuf/internal/timestamp_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
c0bbb1d9-7af7-45c5-8810-2ff63fa804c5 | cpp | google/arolla | tuple_input_loader | arolla/io/tuple_input_loader.h | arolla/io/tuple_input_loader_test.cc | #ifndef AROLLA_IO_TUPLE_INPUT_LOADER_H_
#define AROLLA_IO_TUPLE_INPUT_LOADER_H_
#include <cstddef>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "arolla/io/input_loader.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
namespace arolla {
template <typename Input>
class TupleInputLoader;
template <typename... Ts>
class TupleInputLoader<std::tuple<Ts...>> final
: public StaticInputLoader<std::tuple<Ts...>> {
public:
using Input = std::tuple<Ts...>;
static absl::StatusOr<InputLoaderPtr<Input>> Create(
std::vector<std::string> arg_names) {
if (arg_names.size() != sizeof...(Ts)) {
return absl::InvalidArgumentError(
absl::StrFormat("tuple size doesn't match arg_names size: %d vs %d",
sizeof...(Ts), arg_names.size()));
}
return InputLoaderPtr<Input>(
static_cast<InputLoader<Input>*>(new TupleInputLoader<Input>(
std::move(arg_names), std::index_sequence_for<Ts...>{})));
}
private:
template <size_t... Is>
explicit TupleInputLoader(std::vector<std::string> arg_names,
std::index_sequence<Is...>)
: StaticInputLoader<std::tuple<Ts...>>(
{{arg_names[Is], ::arolla::GetQType<Ts>()}...}) {}
absl::StatusOr<BoundInputLoader<Input>> BindImpl(
const absl::flat_hash_map<std::string, TypedSlot>& output_slots)
const override {
std::vector<TypedSlot> slots_in_order;
slots_in_order.reserve(this->types_in_order().size());
for (const auto& [name, _] : this->types_in_order()) {
auto it = output_slots.find(name);
if (it == output_slots.end()) {
return absl::FailedPreconditionError(absl::StrCat(
"TupleInputLoader doesn't support unused arguments; no slot for: ",
name));
}
slots_in_order.push_back(it->second);
}
return BoundInputLoader<Input>(
[slots_in_order](const Input& input, FramePtr frame,
RawBufferFactory*) -> absl::Status {
LoaderImpl(input, frame, slots_in_order,
std::index_sequence_for<Ts...>{});
return absl::OkStatus();
});
}
template <size_t... Is>
static void LoaderImpl(const Input& input, FramePtr frame,
const std::vector<TypedSlot>& slots,
std::index_sequence<Is...>) {
(frame.Set(slots[Is].UnsafeToSlot<Ts>(), std::get<Is>(input)), ...);
}
};
}
#endif | #include "arolla/io/tuple_input_loader.h"
#include <tuple>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "arolla/io/input_loader.h"
#include "arolla/io/testing/matchers.h"
#include "arolla/memory/frame.h"
#include "arolla/memory/memory_allocation.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/typed_slot.h"
namespace arolla {
namespace {
using ::absl_testing::IsOk;
using ::absl_testing::StatusIs;
using ::arolla::testing::InputLoaderSupports;
using ::testing::Eq;
using ::testing::HasSubstr;
TEST(TupleInputLoaderTest, Scalars) {
using Input = std::tuple<float, int>;
ASSERT_OK_AND_ASSIGN(std::unique_ptr<InputLoader<Input>> input_loader,
(TupleInputLoader<Input>::Create({"a", "b"})));
EXPECT_THAT(input_loader, InputLoaderSupports({{"a", GetQType<float>()},
{"b", GetQType<int>()}}));
FrameLayout::Builder layout_builder;
auto a_slot = layout_builder.AddSlot<float>();
auto b_slot = layout_builder.AddSlot<int>();
ASSERT_OK_AND_ASSIGN(BoundInputLoader<Input> bound_input_loader,
input_loader->Bind({
{"a", TypedSlot::FromSlot(a_slot)},
{"b", TypedSlot::FromSlot(b_slot)},
}));
FrameLayout memory_layout = std::move(layout_builder).Build();
MemoryAllocation alloc(&memory_layout);
ASSERT_THAT(bound_input_loader({5, 7}, alloc.frame()), IsOk());
EXPECT_THAT(alloc.frame().Get(a_slot), Eq(5));
EXPECT_THAT(alloc.frame().Get(b_slot), Eq(7));
EXPECT_THAT(input_loader->Bind({
{"b", TypedSlot::FromSlot(b_slot)},
}),
StatusIs(absl::StatusCode::kFailedPrecondition,
HasSubstr("TupleInputLoader doesn't support unused "
"arguments; no slot for: a")));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/tuple_input_loader.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/io/tuple_input_loader_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
9e90afce-2432-421a-af39-184f82dff8c1 | cpp | tensorflow/tensorflow | disable_intra_op_parallelism | tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.cc | tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism_test.cc | #include "tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/grappler/clusters/cluster.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/mutable_graph_view.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/custom_graph_optimizer_registry.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/platform/protobuf.h"
namespace tensorflow {
namespace grappler {
namespace {
constexpr char kMaxIntraOpParallelismDataset[] = "MaxIntraOpParallelismDataset";
constexpr char kModelDataset[] = "ModelDataset";
constexpr std::array<const char*, 2> kMaxIntraOpParallelismDatasetOps = {
"MaxIntraOpParallelismDataset",
"ExperimentalMaxIntraOpParallelismDataset",
};
}
Status DisableIntraOpParallelism::OptimizeAndCollectStats(
Cluster* cluster, const GrapplerItem& item, GraphDef* output,
OptimizationStats* stats) {
*output = item.graph;
MutableGraphView graph(output);
if (graph_utils::IsItemDerivedFromFunctionDef(item, graph))
return absl::OkStatus();
if (item.fetch.size() != 1) {
return errors::InvalidArgument(
"Expected only one fetch node but there were ", item.fetch.size(), ": ",
absl::StrJoin(item.fetch, ", "));
}
for (const NodeDef& node : item.graph.node()) {
for (const auto& target_dataset_op : kMaxIntraOpParallelismDatasetOps) {
if (node.op() == target_dataset_op) {
return absl::OkStatus();
}
}
}
NodeDef* sink_node = graph.GetNode(item.fetch.at(0));
NodeDef* last_node = graph_utils::GetInputNode(*sink_node, graph);
if (last_node->op() == kModelDataset) {
last_node = graph_utils::GetInputNode(*last_node, graph);
}
NodeDef* max_parallelism_value =
graph_utils::AddScalarConstNode(int64_t{1}, &graph);
NodeDef insert_node;
graph_utils::SetUniqueGraphNodeName("intra_op_parallelism", graph.graph(),
&insert_node);
insert_node.set_op(kMaxIntraOpParallelismDataset);
*insert_node.mutable_input()->Add() = last_node->name();
*insert_node.mutable_input()->Add() = max_parallelism_value->name();
if (!graph_utils::CopyShapesAndTypesAttrs(*last_node, &insert_node))
return absl::OkStatus();
auto* added_node = graph.AddNode(std::move(insert_node));
TF_RETURN_IF_ERROR(
graph.UpdateFanouts(last_node->name(), added_node->name()));
stats->num_changes++;
return absl::OkStatus();
}
REGISTER_GRAPH_OPTIMIZER_AS(DisableIntraOpParallelism,
"disable_intra_op_parallelism");
}
} | #include "tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.h"
#include "tensorflow/core/framework/attr_value_util.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/grappler_item.h"
#include "tensorflow/core/grappler/optimizers/data/graph_test_utils.h"
#include "tensorflow/core/grappler/optimizers/data/graph_utils.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace grappler {
namespace {
using test::function::NDef;
class IntraOpAlreadySetTest
: public ::testing::TestWithParam<std::tuple<string, int64_t>> {};
TEST_P(IntraOpAlreadySetTest, IntraOpParallelism) {
const string op = std::get<0>(GetParam());
const int64_t value = std::get<1>(GetParam());
GrapplerItem item;
MutableGraphView graph(&item.graph);
NodeDef *start_val = graph_utils::AddScalarConstNode<int64_t>(0, &graph);
NodeDef *stop_val = graph_utils::AddScalarConstNode<int64_t>(10, &graph);
NodeDef *step_val = graph_utils::AddScalarConstNode<int64_t>(1, &graph);
std::vector<string> range_inputs(3);
range_inputs[0] = start_val->name();
range_inputs[1] = stop_val->name();
range_inputs[2] = step_val->name();
std::vector<std::pair<string, AttrValue>> range_attrs;
NodeDef *range_node = graph_utils::AddNode("range", "RangeDataset",
range_inputs, range_attrs, &graph);
NodeDef *parallelism_val =
graph_utils::AddScalarConstNode<int64_t>(value, &graph);
std::vector<string> parallelism_inputs(2);
parallelism_inputs[0] = range_node->name();
parallelism_inputs[1] = parallelism_val->name();
std::vector<std::pair<string, AttrValue>> parallelism_attrs;
NodeDef *parallelism_node = graph_utils::AddNode(
"max_parallelism", op, parallelism_inputs, parallelism_attrs, &graph);
std::vector<string> sink_inputs(1);
sink_inputs[0] = parallelism_node->name();
std::vector<std::pair<string, AttrValue>> sink_attrs;
NodeDef *sink_node =
graph_utils::AddNode("Sink", "Identity", sink_inputs, sink_attrs, &graph);
item.fetch.push_back(sink_node->name());
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(op, item.graph));
EXPECT_EQ(item.graph.node_size(), 7);
EXPECT_EQ(parallelism_val->attr().at("value").tensor().int64_val(0), value);
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(graph_utils::ContainsNodeWithOp(op, output));
NodeDef new_parallelism_node =
output.node(graph_utils::FindGraphNodeWithOp(op, output));
NodeDef new_parallelism_val = output.node(graph_utils::FindGraphNodeWithName(
new_parallelism_node.input(1), output));
EXPECT_EQ(new_parallelism_val.attr().at("value").tensor().int64_val(0),
value);
}
INSTANTIATE_TEST_SUITE_P(
Test, IntraOpAlreadySetTest,
::testing::Combine(
::testing::Values("MaxIntraOpParallelismDataset",
"ExperimentalMaxIntraOpParallelismDataset"),
::testing::Values(1, 5)));
class IntraOpNotSetTest : public ::testing::TestWithParam<string> {};
TEST_P(IntraOpNotSetTest, IntraOpParallelism) {
const string op = GetParam();
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("Sink", op, {"range"}, {})});
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
item.graph));
EXPECT_EQ(item.graph.node_size(), 5);
item.fetch.push_back("Sink_fake");
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_FALSE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
EXPECT_EQ(output.node_size(), 5);
item.fetch[0] = "Sink";
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
if (op == "_Retval") {
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
output));
EXPECT_EQ(output.node_size(), 5);
return;
}
EXPECT_EQ(output.node_size(), 7);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef parallelism_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(parallelism_node.op(), "MaxIntraOpParallelismDataset");
EXPECT_EQ(parallelism_node.input_size(), 2);
NodeDef range_node = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(1), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), 1);
}
INSTANTIATE_TEST_SUITE_P(Test, IntraOpNotSetTest,
::testing::Values("Identity", "_Retval"));
TEST(AutotuneWithModelTest, IntraOpParallelism) {
GrapplerItem item;
item.graph = test::function::GDef(
{NDef("start", "Const", {}, {{"value", 0}, {"dtype", DT_INT32}}),
NDef("stop", "Const", {}, {{"value", 10}, {"dtype", DT_INT32}}),
NDef("step", "Const", {}, {{"value", 1}, {"dtype", DT_INT32}}),
NDef("range", "RangeDataset", {"start", "stop", "step"},
{{"output_shapes", absl::Span<const TensorShape>{}},
{"output_types", absl::Span<const DataType>{}}}),
NDef("model", "ModelDataset", {"range"}, {}),
NDef("Sink", "Identity", {"model"}, {})});
EXPECT_FALSE(graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset",
item.graph));
EXPECT_EQ(item.graph.node_size(), 6);
item.fetch.push_back("Sink");
DisableIntraOpParallelism optimizer;
GraphDef output;
TF_ASSERT_OK(optimizer.Optimize(nullptr, item, &output));
EXPECT_EQ(output.node_size(), 8);
EXPECT_TRUE(
graph_utils::ContainsNodeWithOp("MaxIntraOpParallelismDataset", output));
NodeDef sink_node =
output.node(graph_utils::FindGraphNodeWithName("Sink", output));
EXPECT_EQ(sink_node.input_size(), 1);
NodeDef model_node = output.node(
graph_utils::FindGraphNodeWithName(sink_node.input(0), output));
EXPECT_EQ(model_node.op(), "ModelDataset");
EXPECT_EQ(model_node.input_size(), 1);
NodeDef parallelism_node = output.node(
graph_utils::FindGraphNodeWithName(model_node.input(0), output));
EXPECT_EQ(parallelism_node.op(), "MaxIntraOpParallelismDataset");
EXPECT_EQ(parallelism_node.input_size(), 2);
NodeDef range_node = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(0), output));
EXPECT_EQ(range_node.name(), "range");
NodeDef parallelism_val = output.node(
graph_utils::FindGraphNodeWithName(parallelism_node.input(1), output));
EXPECT_EQ(parallelism_val.attr().at("value").tensor().int64_val(0), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/optimizers/data/disable_intra_op_parallelism_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
3e141fb0-20e1-44bb-9811-0160bc6ad953 | cpp | google/tsl | criticality | tsl/platform/default/criticality.h | tsl/platform/criticality_test.cc | #ifndef TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
#define TENSORFLOW_TSL_PLATFORM_DEFAULT_CRITICALITY_H_
namespace tsl {
namespace criticality {
inline Criticality GetCriticality() {
return Criticality::kCritical;
}
}
}
#endif | #include "tsl/platform/criticality.h"
#include "tsl/platform/test.h"
namespace tsl {
namespace criticality {
TEST(CriticalityTest, Basic) {
EXPECT_EQ(GetCriticality(), Criticality::kCritical);
}
}
} | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/default/criticality.h | https://github.com/google/tsl/blob/6d708fdcdd4f40537b7fa273371215a6fa3d4423/tsl/platform/criticality_test.cc | 6d708fdcdd4f40537b7fa273371215a6fa3d4423 |
f28ff54a-3cd8-49b4-8105-283d85478981 | cpp | google/libaddressinput | localization | cpp/src/localization.cc | cpp/test/localization_test.cc | #include <libaddressinput/localization.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_problem.h>
#include <cassert>
#include <cstddef>
#include <string>
#include <vector>
#include "messages.h"
#include "region_data_constants.h"
#include "rule.h"
#include "util/string_split.h"
#include "util/string_util.h"
namespace {
void PushBackUrl(const std::string& url, std::vector<std::string>* parameters) {
assert(parameters != nullptr);
parameters->push_back("<a href=\"" + url + "\">");
parameters->emplace_back("</a>");
}
}
namespace i18n {
namespace addressinput {
namespace {
#include "en_messages.cc"
std::string GetEnglishString(int message_id) {
const char* str = GetString(message_id);
return str != nullptr ? std::string(str) : std::string();
}
}
Localization::Localization() : get_string_(&GetEnglishString) {}
std::string Localization::GetString(int message_id) const {
return get_string_(message_id);
}
std::string Localization::GetErrorMessage(const AddressData& address,
AddressField field,
AddressProblem problem,
bool enable_examples,
bool enable_links) const {
if (field == POSTAL_CODE) {
Rule rule;
rule.CopyFrom(Rule::GetDefault());
std::string postal_code_example, post_service_url;
if (rule.ParseSerializedRule(
RegionDataConstants::GetRegionData(address.region_code))) {
if (enable_examples) {
std::vector<std::string> examples_list;
SplitString(rule.GetPostalCodeExample(), ',', &examples_list);
if (!examples_list.empty()) {
postal_code_example = examples_list.front();
}
}
if (enable_links) {
post_service_url = rule.GetPostServiceUrl();
}
} else {
assert(false);
}
bool uses_postal_code_as_label =
rule.GetPostalCodeNameMessageId() ==
IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL;
return GetErrorMessageForPostalCode(problem, uses_postal_code_as_label,
postal_code_example, post_service_url);
} else {
if (problem == MISSING_REQUIRED_FIELD) {
return get_string_(IDS_LIBADDRESSINPUT_MISSING_REQUIRED_FIELD);
} else if (problem == UNKNOWN_VALUE) {
std::vector<std::string> parameters;
if (AddressData::IsRepeatedFieldValue(field)) {
const auto& values = address.GetRepeatedFieldValue(field);
assert(!values.empty());
parameters.push_back(values.front());
} else {
parameters.push_back(address.GetFieldValue(field));
}
return DoReplaceStringPlaceholders(
get_string_(IDS_LIBADDRESSINPUT_UNKNOWN_VALUE), parameters);
} else if (problem == USES_P_O_BOX) {
return get_string_(IDS_LIBADDRESSINPUT_PO_BOX_FORBIDDEN_VALUE);
} else {
assert(false);
return "";
}
}
}
void Localization::SetGetter(std::string (*getter)(int)) {
assert(getter != nullptr);
get_string_ = getter;
}
std::string Localization::GetErrorMessageForPostalCode(
AddressProblem problem,
bool uses_postal_code_as_label,
const std::string& postal_code_example,
const std::string& post_service_url) const {
int message_id;
std::vector<std::string> parameters;
if (problem == MISSING_REQUIRED_FIELD) {
if (!postal_code_example.empty() && !post_service_url.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_POSTAL_CODE_EXAMPLE_AND_URL :
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_ZIP_CODE_EXAMPLE_AND_URL;
parameters.push_back(postal_code_example);
PushBackUrl(post_service_url, ¶meters);
} else if (!postal_code_example.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_POSTAL_CODE_EXAMPLE :
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_ZIP_CODE_EXAMPLE;
parameters.push_back(postal_code_example);
} else {
message_id = IDS_LIBADDRESSINPUT_MISSING_REQUIRED_FIELD;
}
return DoReplaceStringPlaceholders(get_string_(message_id), parameters);
} else if (problem == INVALID_FORMAT) {
if (!postal_code_example.empty() && !post_service_url.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE_EXAMPLE_AND_URL :
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP_CODE_EXAMPLE_AND_URL;
parameters.push_back(postal_code_example);
PushBackUrl(post_service_url, ¶meters);
} else if (!postal_code_example.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE_EXAMPLE :
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP_CODE_EXAMPLE;
parameters.push_back(postal_code_example);
} else {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE :
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP;
}
return DoReplaceStringPlaceholders(get_string_(message_id), parameters);
} else if (problem == MISMATCHING_VALUE) {
if (!post_service_url.empty()) {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_POSTAL_CODE_URL :
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_ZIP_URL;
PushBackUrl(post_service_url, ¶meters);
} else {
message_id = uses_postal_code_as_label ?
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_POSTAL_CODE :
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_ZIP;
}
return DoReplaceStringPlaceholders(get_string_(message_id), parameters);
} else {
assert(false);
return "";
}
}
}
} | #include <libaddressinput/localization.h>
#include <libaddressinput/address_data.h>
#include <libaddressinput/address_field.h>
#include <libaddressinput/address_problem.h>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "grit.h"
#include "messages.h"
namespace {
using i18n::addressinput::AddressData;
using i18n::addressinput::AddressField;
using i18n::addressinput::INVALID_MESSAGE_ID;
using i18n::addressinput::Localization;
using i18n::addressinput::COUNTRY;
using i18n::addressinput::ADMIN_AREA;
using i18n::addressinput::LOCALITY;
using i18n::addressinput::DEPENDENT_LOCALITY;
using i18n::addressinput::SORTING_CODE;
using i18n::addressinput::POSTAL_CODE;
using i18n::addressinput::STREET_ADDRESS;
using i18n::addressinput::ORGANIZATION;
using i18n::addressinput::RECIPIENT;
using i18n::addressinput::MISSING_REQUIRED_FIELD;
using i18n::addressinput::UNKNOWN_VALUE;
using i18n::addressinput::INVALID_FORMAT;
using i18n::addressinput::MISMATCHING_VALUE;
using i18n::addressinput::USES_P_O_BOX;
class LocalizationTest : public testing::TestWithParam<int> {
public:
LocalizationTest(const LocalizationTest&) = delete;
LocalizationTest& operator=(const LocalizationTest&) = delete;
protected:
LocalizationTest() = default;
Localization localization_;
};
const char kValidMessage[] = "Data";
std::string GetValidMessage(int message_id) { return kValidMessage; }
TEST_P(LocalizationTest, ValidStringGetterCanBeUsed) {
localization_.SetGetter(&GetValidMessage);
EXPECT_EQ(kValidMessage, localization_.GetString(GetParam()));
}
TEST_P(LocalizationTest, DefaultStringIsNotEmpty) {
EXPECT_FALSE(localization_.GetString(GetParam()).empty());
}
TEST_P(LocalizationTest, NoNewline) {
EXPECT_EQ(std::string::npos, localization_.GetString(GetParam()).find('\n'));
}
TEST_P(LocalizationTest, NoDoubleSpace) {
EXPECT_EQ(std::string::npos,
localization_.GetString(GetParam()).find(std::string(2U, ' ')));
}
INSTANTIATE_TEST_SUITE_P(
AllMessages, LocalizationTest,
testing::Values(
IDS_LIBADDRESSINPUT_COUNTRY_OR_REGION_LABEL,
IDS_LIBADDRESSINPUT_LOCALITY_LABEL,
IDS_LIBADDRESSINPUT_ADDRESS_LINE_1_LABEL,
IDS_LIBADDRESSINPUT_PIN_CODE_LABEL,
IDS_LIBADDRESSINPUT_POSTAL_CODE_LABEL,
IDS_LIBADDRESSINPUT_ZIP_CODE_LABEL, IDS_LIBADDRESSINPUT_AREA,
IDS_LIBADDRESSINPUT_COUNTY, IDS_LIBADDRESSINPUT_DEPARTMENT,
IDS_LIBADDRESSINPUT_DISTRICT, IDS_LIBADDRESSINPUT_DO_SI,
IDS_LIBADDRESSINPUT_EMIRATE, IDS_LIBADDRESSINPUT_ISLAND,
IDS_LIBADDRESSINPUT_PARISH, IDS_LIBADDRESSINPUT_PREFECTURE,
IDS_LIBADDRESSINPUT_PROVINCE, IDS_LIBADDRESSINPUT_STATE,
IDS_LIBADDRESSINPUT_ORGANIZATION_LABEL,
IDS_LIBADDRESSINPUT_RECIPIENT_LABEL,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_FIELD,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_POSTAL_CODE_EXAMPLE_AND_URL,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_POSTAL_CODE_EXAMPLE,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_ZIP_CODE_EXAMPLE_AND_URL,
IDS_LIBADDRESSINPUT_MISSING_REQUIRED_ZIP_CODE_EXAMPLE,
IDS_LIBADDRESSINPUT_UNKNOWN_VALUE,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE_EXAMPLE_AND_URL,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE_EXAMPLE,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_POSTAL_CODE,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP_CODE_EXAMPLE_AND_URL,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP_CODE_EXAMPLE,
IDS_LIBADDRESSINPUT_UNRECOGNIZED_FORMAT_ZIP,
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_POSTAL_CODE_URL,
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_POSTAL_CODE,
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_ZIP_URL,
IDS_LIBADDRESSINPUT_MISMATCHING_VALUE_ZIP,
IDS_LIBADDRESSINPUT_PO_BOX_FORBIDDEN_VALUE));
TEST_F(LocalizationTest, InvalidMessageIsEmptyString) {
EXPECT_TRUE(localization_.GetString(INVALID_MESSAGE_ID).empty());
}
TEST(LocalizationGetErrorMessageTest, MissingRequiredPostalCode) {
Localization localization;
const AddressData address{.region_code = "CH"};
EXPECT_EQ("You must provide a postal code, for example 2544."
" Don't know your postal code? Find it out"
" <a href=\"http:
"here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, true, true));
EXPECT_EQ("You must provide a postal code, for example 2544.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, true, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, false, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, false, true));
}
TEST(LocalizationGetErrorMessageTest, MissingRequiredZipCode) {
Localization localization;
const AddressData address{.region_code = "US"};
EXPECT_EQ("You must provide a ZIP code, for example 95014."
" Don't know your ZIP code? Find it out"
" <a href=\"https:
"input.action\">here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, true, true));
EXPECT_EQ("You must provide a ZIP code, for example 95014.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, true, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, false, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISSING_REQUIRED_FIELD, false, true));
}
TEST(LocalizationGetErrorMessageTest, MissingRequiredOtherFields) {
Localization localization;
const AddressData address{.region_code = "US"};
const std::vector<AddressField> other_fields{
COUNTRY,
ADMIN_AREA,
LOCALITY,
DEPENDENT_LOCALITY,
SORTING_CODE,
STREET_ADDRESS,
ORGANIZATION,
RECIPIENT,
};
for (AddressField field : other_fields) {
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(
address, field, MISSING_REQUIRED_FIELD, true, true));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(
address, field, MISSING_REQUIRED_FIELD, true, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(
address, field, MISSING_REQUIRED_FIELD, false, false));
EXPECT_EQ("You can't leave this empty.",
localization.GetErrorMessage(
address, field, MISSING_REQUIRED_FIELD, false, true));
}
}
TEST(LocalizationGetErrorMessageTest, UnknownValueOtherFields) {
Localization localization;
const AddressData address{
.region_code = "US",
.address_line{
"bad address line 1",
"bad address line 2",
},
.administrative_area = "bad admin area",
.locality = "bad locality",
.dependent_locality = "bad dependent locality",
.sorting_code = "bad sorting code",
.organization = "bad organization",
.recipient = "bad recipient",
};
EXPECT_EQ("US "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, COUNTRY, UNKNOWN_VALUE, true, true));
EXPECT_EQ("US "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, COUNTRY, UNKNOWN_VALUE, true, false));
EXPECT_EQ("US "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, COUNTRY, UNKNOWN_VALUE, false, false));
EXPECT_EQ("US "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, COUNTRY, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad admin area "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ADMIN_AREA, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad admin area "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ADMIN_AREA, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad admin area "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ADMIN_AREA, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad admin area "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ADMIN_AREA, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, LOCALITY, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, LOCALITY, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, LOCALITY, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, LOCALITY, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad dependent locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, DEPENDENT_LOCALITY, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad dependent locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, DEPENDENT_LOCALITY, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad dependent locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, DEPENDENT_LOCALITY, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad dependent locality "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, DEPENDENT_LOCALITY, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad sorting code "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, SORTING_CODE, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad sorting code "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, SORTING_CODE, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad sorting code "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, SORTING_CODE, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad sorting code "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, SORTING_CODE, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad address line 1 "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, STREET_ADDRESS, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad address line 1 "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, STREET_ADDRESS, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad address line 1 "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, STREET_ADDRESS, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad address line 1 "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, STREET_ADDRESS, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad organization "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ORGANIZATION, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad organization "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ORGANIZATION, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad organization "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ORGANIZATION, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad organization "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, ORGANIZATION, UNKNOWN_VALUE, false, true));
EXPECT_EQ("bad recipient "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, RECIPIENT, UNKNOWN_VALUE, true, true));
EXPECT_EQ("bad recipient "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, RECIPIENT, UNKNOWN_VALUE, true, false));
EXPECT_EQ("bad recipient "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, RECIPIENT, UNKNOWN_VALUE, false, false));
EXPECT_EQ("bad recipient "
"is not recognized as a known value for this field.",
localization.GetErrorMessage(
address, RECIPIENT, UNKNOWN_VALUE, false, true));
}
TEST(LocalizationGetErrorMessageTest, InvalidFormatPostalCode) {
Localization localization;
const AddressData address{.region_code = "CH"};
EXPECT_EQ("This postal code format is not recognized. Example "
"of a valid postal code: 2544."
" Don't know your postal code? Find it out"
" <a href=\"http:
"here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, true, true));
EXPECT_EQ("This postal code format is not recognized. Example "
"of a valid postal code: 2544.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, true, false));
EXPECT_EQ("This postal code format is not recognized.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, false, false));
EXPECT_EQ("This postal code format is not recognized.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, false, true));
}
TEST(LocalizationGetErrorMessageTest, InvalidFormatZipCode) {
Localization localization;
const AddressData address{.region_code = "US"};
EXPECT_EQ("This ZIP code format is not recognized. Example of "
"a valid ZIP code: 95014."
" Don't know your ZIP code? Find it out"
" <a href=\"https:
"input.action\">here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, true, true));
EXPECT_EQ("This ZIP code format is not recognized. Example of "
"a valid ZIP code: 95014.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, true, false));
EXPECT_EQ("This ZIP code format is not recognized.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, false, false));
EXPECT_EQ("This ZIP code format is not recognized.",
localization.GetErrorMessage(address, POSTAL_CODE,
INVALID_FORMAT, false, true));
}
TEST(LocalizationGetErrorMessageTest, MismatchingValuePostalCode) {
Localization localization;
const AddressData address{.region_code = "CH"};
EXPECT_EQ("This postal code does not appear to match the rest "
"of this address."
" Don't know your postal code? Find it out"
" <a href=\"http:
"here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, true, true));
EXPECT_EQ("This postal code does not appear to match the rest "
"of this address.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, true, false));
EXPECT_EQ("This postal code does not appear to match the rest "
"of this address.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, false, false));
EXPECT_EQ("This postal code does not appear to match the rest "
"of this address."
" Don't know your postal code? Find it out"
" <a href=\"http:
"here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, false, true));
}
TEST(LocalizationGetErrorMessageTest, MismatchingValueZipCode) {
Localization localization;
const AddressData address{.region_code = "US"};
EXPECT_EQ("This ZIP code does not appear to match the rest of "
"this address."
" Don't know your ZIP code? Find it out"
" <a href=\"https:
"input.action\">here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, true, true));
EXPECT_EQ("This ZIP code does not appear to match the rest of "
"this address.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, true, false));
EXPECT_EQ("This ZIP code does not appear to match the rest of "
"this address.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, false, false));
EXPECT_EQ("This ZIP code does not appear to match the rest of "
"this address."
" Don't know your ZIP code? Find it out"
" <a href=\"https:
"input.action\">here</a>.",
localization.GetErrorMessage(address, POSTAL_CODE,
MISMATCHING_VALUE, false, true));
}
TEST(LocalizationGetErrorMessageTest, UsesPOBoxOtherFields) {
Localization localization;
const AddressData address{.region_code = "US"};
const std::vector<AddressField> other_fields{
COUNTRY,
ADMIN_AREA,
LOCALITY,
DEPENDENT_LOCALITY,
SORTING_CODE,
STREET_ADDRESS,
ORGANIZATION,
RECIPIENT,
};
for (AddressField field : other_fields) {
EXPECT_EQ("This address line appears to contain a post "
"office box. Please use a street"
" or building address.",
localization.GetErrorMessage(
address, field, USES_P_O_BOX, true, true));
EXPECT_EQ("This address line appears to contain a post "
"office box. Please use a street"
" or building address.",
localization.GetErrorMessage(
address, field, USES_P_O_BOX, true, false));
EXPECT_EQ("This address line appears to contain a post "
"office box. Please use a street"
" or building address.",
localization.GetErrorMessage(
address, field, USES_P_O_BOX, false, false));
EXPECT_EQ("This address line appears to contain a post "
"office box. Please use a street"
" or building address.",
localization.GetErrorMessage(
address, field, USES_P_O_BOX, false, true));
}
}
} | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/src/localization.cc | https://github.com/google/libaddressinput/blob/2610f7b1043d6784ada41392fc9392d1ea09ea07/cpp/test/localization_test.cc | 2610f7b1043d6784ada41392fc9392d1ea09ea07 |
9cd56c00-f971-453e-acfd-2b230713e4ec | cpp | tensorflow/tensorflow | status | third_party/xla/third_party/tsl/tsl/platform/status.cc | tensorflow/core/lib/core/status_test.cc | #include "tsl/platform/status.h"
#include <stdio.h>
#include <deque>
#include <functional>
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/base/call_once.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/stack_frame.h"
#include "tsl/platform/stacktrace.h"
#include "tsl/platform/str_util.h"
#include "tsl/platform/strcat.h"
#include "tsl/platform/stringprintf.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace tsl {
namespace {
class StatusLogSink : public TFLogSink {
public:
static StatusLogSink* GetInstance() {
static StatusLogSink* sink = new StatusLogSink();
return sink;
}
void enable() {
absl::call_once(flag_, [this] {
num_messages_ = 5;
if (const char* num_msgs_str =
getenv("TF_WORKER_NUM_FORWARDED_LOG_MESSAGES")) {
if (!absl::SimpleAtoi(num_msgs_str, &num_messages_)) {
LOG(WARNING) << "Failed to parse env variable "
"TF_WORKER_NUM_WARNING_ERROR_LOG_IN_STATUS="
<< num_msgs_str << " as int. Using the default value "
<< num_messages_ << ".";
}
}
if (num_messages_ > 0) {
TFAddLogSink(this);
}
});
}
void GetMessages(std::vector<std::string>* logs) TF_LOCKS_EXCLUDED(mu_) {
mutex_lock lock(mu_);
for (auto& msg : messages_) {
logs->push_back(msg);
}
}
void Send(const TFLogEntry& entry) override TF_LOCKS_EXCLUDED(mu_) {
if (entry.log_severity() < absl::LogSeverity::kWarning) return;
mutex_lock lock(mu_);
messages_.emplace_back(entry.ToString());
if (messages_.size() > static_cast<size_t>(num_messages_)) {
messages_.pop_front();
}
}
private:
mutex mu_;
absl::once_flag flag_;
int num_messages_ = 0;
std::deque<std::string> messages_ TF_GUARDED_BY(mu_);
};
}
namespace errors {
static constexpr const char kStackTraceProtoUrl[] =
"type.googleapis.com/tensorflow.StackTracePayload";
void SetStackTrace(absl::Status& status, std::vector<StackFrame> stack_trace) {
std::vector<std::string> items;
items.reserve(stack_trace.size());
for (StackFrame& frame : stack_trace) {
items.push_back(
absl::StrCat(absl::StrReplaceAll(frame.file_name, {{"\n", ""}}), "\n",
frame.line_number, "\n",
absl::StrReplaceAll(frame.function_name, {{"\n", ""}})));
}
status.SetPayload(kStackTraceProtoUrl,
absl::Cord(absl::StrJoin(items, "\n")));
}
std::vector<StackFrame> GetStackTrace(const absl::Status& status) {
std::vector<StackFrame> stack_trace;
absl::optional<absl::Cord> maybe_serialized_payload =
status.GetPayload(kStackTraceProtoUrl);
if (maybe_serialized_payload.has_value()) {
std::vector<std::string> split =
absl::StrSplit(maybe_serialized_payload.value().Flatten(), '\n');
assert(split.size() % 3 == 0);
for (int i = 0; i < split.size() / 3; ++i) {
const int idx = 3 * i;
int line_number = -1;
CHECK(absl::SimpleAtoi(split[idx + 1], &line_number));
stack_trace.emplace_back(std::move(split[idx]), line_number,
std::move(split[idx + 2]));
}
}
return stack_trace;
}
}
#ifdef _WIN32
const char* NullTerminatedMessage(const absl::Status& status) {
return absl::StatusMessageAsCStr(status);
}
#endif
std::string* TfCheckOpHelperOutOfLine(const absl::Status& v, const char* msg) {
std::stringstream ss;
ss << "Non-OK-status: " << msg << "\nStatus: " << v;
return new std::string(ss.str());
}
StatusGroup::StatusGroup() {}
StatusGroup::StatusGroup(std::initializer_list<absl::Status> statuses) {
for (const absl::Status& s : statuses) {
Update(s);
}
}
static constexpr const char kDerivedStatusProtoUrl[] =
"type.googleapis.com/tensorflow.DerivedStatus";
absl::Status StatusGroup::MakeDerived(const absl::Status& s) {
if (IsDerived(s)) {
return s;
} else {
absl::Status derived(s);
derived.SetPayload(kDerivedStatusProtoUrl, absl::Cord(""));
return derived;
}
}
bool StatusGroup::IsDerived(const absl::Status& s) {
return s.GetPayload(kDerivedStatusProtoUrl).has_value();
}
void StatusGroup::ConfigureLogHistory() {
StatusLogSink::GetInstance()->enable();
}
void StatusGroup::Update(const absl::Status& s) {
if (s.ok()) {
++num_ok_;
} else {
ok_ = false;
if (IsDerived(s)) {
derived_.insert(s);
} else {
non_derived_.insert(s);
}
}
}
static constexpr int kMaxAggregatedStatusMessageSize = 8 * 1024;
static constexpr int kMaxAttachedLogMessageSize = 512;
std::unordered_map<std::string, absl::Cord> StatusGroup::GetPayloads() const {
std::unordered_map<std::string, absl::Cord> payloads;
auto capture_payload = [&payloads](absl::string_view key,
const absl::Cord& value) {
payloads[std::string(key)] = value;
};
for (const auto& status : derived_) {
status.ForEachPayload(capture_payload);
}
for (const auto& status : non_derived_) {
status.ForEachPayload(capture_payload);
}
payloads.erase(kDerivedStatusProtoUrl);
return payloads;
}
absl::Status MakeStatus(
absl::StatusCode code, absl::string_view message,
const std::unordered_map<std::string, absl::Cord>& payloads) {
absl::Status status(code, message);
for (const auto& payload : payloads) {
status.SetPayload(payload.first, payload.second);
}
return status;
}
std::string MakeString(const absl::Status& status) {
return absl::StrCat(absl::StatusCodeToString(status.code()), ": ",
status.message());
}
absl::Status StatusGroup::as_summary_status() const {
if (ok_) {
return absl::OkStatus();
}
auto get_recent_logs = [this]() -> std::string {
if (!recent_logs_.empty()) {
std::vector<std::string> fmt;
fmt.push_back("\nRecent warning and error logs:");
for (auto& log : recent_logs_) {
fmt.push_back(" " + log.substr(0, kMaxAttachedLogMessageSize));
}
return absl::StrJoin(fmt, "\n");
} else {
return "";
}
};
if (non_derived_.size() == 1) {
return MakeStatus(
non_derived_.begin()->code(),
strings::StrCat(non_derived_.begin()->message(), get_recent_logs()),
GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<std::string> fmt;
fmt.push_back(
strings::Printf("%zu root error(s) found.", non_derived_.size()));
int index = 0;
auto code = absl::StatusCode::kCancelled;
for (const auto& s : non_derived_) {
if (code == absl::StatusCode::kCancelled &&
s.code() != absl::StatusCode::kCancelled) {
code = s.code();
}
fmt.emplace_back(strings::StrCat(" (", index, ") ", MakeString(s)));
++index;
}
fmt.push_back(strings::Printf("%zu successful operations.", num_ok_));
fmt.push_back(
strings::Printf("%zu derived errors ignored.", derived_.size()));
std::string error_msg =
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize);
return MakeStatus(code, strings::StrCat(error_msg, get_recent_logs()),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
absl::Status StatusGroup::as_concatenated_status() const {
if (ok_) {
return absl::OkStatus();
}
if (non_derived_.size() == 1) {
return MakeStatus(non_derived_.begin()->code(),
non_derived_.begin()->message(), GetPayloads());
}
if (!non_derived_.empty()) {
std::vector<string> fmt;
fmt.emplace_back("\n=====================");
for (const auto& s : non_derived_) {
fmt.emplace_back(MakeString(s));
}
fmt.emplace_back("=====================\n");
return MakeStatus(
non_derived_.begin()->code(),
absl::StrJoin(fmt, "\n").substr(0, kMaxAggregatedStatusMessageSize),
GetPayloads());
} else {
return MakeDerived(MakeStatus(derived_.begin()->code(),
derived_.begin()->message(), GetPayloads()));
}
}
void StatusGroup::AttachLogMessages() {
recent_logs_.clear();
StatusLogSink::GetInstance()->GetMessages(&recent_logs_);
}
} | #include "tensorflow/core/lib/core/status.h"
#include "absl/strings/match.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
TEST(Status, OK) {
EXPECT_EQ(absl::OkStatus().code(), error::OK);
EXPECT_EQ(absl::OkStatus().message(), "");
TF_EXPECT_OK(absl::OkStatus());
TF_ASSERT_OK(absl::OkStatus());
EXPECT_EQ(absl::OkStatus(), Status());
Status s;
EXPECT_TRUE(s.ok());
}
TEST(DeathStatus, CheckOK) {
Status status(errors::InvalidArgument("Invalid"));
ASSERT_DEATH(TF_CHECK_OK(status), "Invalid");
}
TEST(Status, Set) {
Status status;
status = Status(absl::StatusCode::kCancelled, "Error message");
EXPECT_EQ(status.code(), absl::StatusCode::kCancelled);
EXPECT_EQ(status.message(), "Error message");
}
TEST(Status, Copy) {
Status a(errors::InvalidArgument("Invalid"));
Status b(a);
ASSERT_EQ(a.ToString(), b.ToString());
}
TEST(Status, Assign) {
Status a(errors::InvalidArgument("Invalid"));
Status b;
b = a;
ASSERT_EQ(a.ToString(), b.ToString());
}
TEST(Status, Move) {
Status a(errors::InvalidArgument("Invalid"));
Status b(std::move(a));
ASSERT_EQ("INVALID_ARGUMENT: Invalid", b.ToString());
}
TEST(Status, MoveAssign) {
Status a(errors::InvalidArgument("Invalid"));
Status b;
b = std::move(a);
ASSERT_EQ("INVALID_ARGUMENT: Invalid", b.ToString());
}
TEST(Status, Update) {
Status s;
s.Update(absl::OkStatus());
ASSERT_TRUE(s.ok());
Status a(errors::InvalidArgument("Invalid"));
s.Update(a);
ASSERT_EQ(s.ToString(), a.ToString());
Status b(errors::Internal("Internal"));
s.Update(b);
ASSERT_EQ(s.ToString(), a.ToString());
s.Update(absl::OkStatus());
ASSERT_EQ(s.ToString(), a.ToString());
ASSERT_FALSE(s.ok());
}
TEST(Status, EqualsOK) { ASSERT_EQ(absl::OkStatus(), Status()); }
TEST(Status, EqualsSame) {
Status a(errors::InvalidArgument("Invalid"));
Status b(errors::InvalidArgument("Invalid"));
ASSERT_EQ(a, b);
}
TEST(Status, EqualsCopy) {
const Status a(errors::InvalidArgument("Invalid"));
const Status b = a;
ASSERT_EQ(a, b);
}
TEST(Status, EqualsDifferentCode) {
const Status a(errors::InvalidArgument("message"));
const Status b(errors::Internal("message"));
ASSERT_NE(a, b);
}
TEST(Status, EqualsDifferentMessage) {
const Status a(errors::InvalidArgument("message"));
const Status b(errors::InvalidArgument("another"));
ASSERT_NE(a, b);
}
TEST(StatusGroup, OKStatusGroup) {
StatusGroup c;
c.Update(absl::OkStatus());
c.Update(absl::OkStatus());
ASSERT_EQ(c.as_summary_status(), absl::OkStatus());
ASSERT_EQ(c.as_concatenated_status(), absl::OkStatus());
}
TEST(StatusGroup, AggregateWithSingleErrorStatus) {
StatusGroup c;
const Status internal(errors::Internal("Original error."));
c.Update(internal);
ASSERT_EQ(c.as_summary_status(), internal);
Status concat_status = c.as_concatenated_status();
ASSERT_EQ(concat_status.code(), internal.code());
ASSERT_TRUE(absl::StrContains(concat_status.message(), internal.message()));
const Status derived =
StatusGroup::MakeDerived(errors::Internal("Derived error."));
c.Update(derived);
ASSERT_EQ(c.as_summary_status(), internal);
concat_status = c.as_concatenated_status();
ASSERT_EQ(concat_status.code(), internal.code());
ASSERT_TRUE(absl::StrContains(concat_status.message(), internal.message()));
}
TEST(StatusGroup, AggregateWithMultipleErrorStatus) {
StatusGroup c;
const Status internal(errors::Internal("Original error."));
const Status cancelled(errors::Cancelled("Cancelled after 10 steps."));
const Status aborted(errors::Aborted("Aborted after 10 steps."));
c.Update(internal);
c.Update(cancelled);
c.Update(aborted);
Status summary = c.as_summary_status();
ASSERT_EQ(summary.code(), internal.code());
ASSERT_TRUE(absl::StrContains(summary.message(), internal.message()));
ASSERT_TRUE(absl::StrContains(summary.message(), cancelled.message()));
ASSERT_TRUE(absl::StrContains(summary.message(), aborted.message()));
Status concat_status = c.as_concatenated_status();
ASSERT_EQ(concat_status.code(), internal.code());
ASSERT_TRUE(absl::StrContains(concat_status.message(), internal.message()));
ASSERT_TRUE(absl::StrContains(concat_status.message(), cancelled.message()));
ASSERT_TRUE(absl::StrContains(concat_status.message(), aborted.message()));
}
TEST(Status, InvalidPayloadGetsIgnored) {
Status s = Status();
s.SetPayload("Invalid", absl::Cord("Invalid Val"));
ASSERT_FALSE(s.GetPayload("Invalid").has_value());
bool is_err_erased = s.ErasePayload("Invalid");
ASSERT_EQ(is_err_erased, false);
}
TEST(Status, SetPayloadSetsOrUpdatesIt) {
Status s(absl::StatusCode::kInternal, "Error message");
s.SetPayload("Error key", absl::Cord("Original"));
ASSERT_EQ(s.GetPayload("Error key"), absl::Cord("Original"));
s.SetPayload("Error key", absl::Cord("Updated"));
ASSERT_EQ(s.GetPayload("Error key"), absl::Cord("Updated"));
}
TEST(Status, ErasePayloadRemovesIt) {
Status s(absl::StatusCode::kInternal, "Error message");
s.SetPayload("Error key", absl::Cord("Original"));
bool is_err_erased = s.ErasePayload("Error key");
ASSERT_EQ(is_err_erased, true);
is_err_erased = s.ErasePayload("Error key");
ASSERT_EQ(is_err_erased, false);
ASSERT_FALSE(s.GetPayload("Error key").has_value());
}
static void BM_TF_CHECK_OK(::testing::benchmark::State& state) {
tensorflow::Status s = (state.max_iterations < 0)
? errors::InvalidArgument("Invalid")
: absl::OkStatus();
for (auto i : state) {
TF_CHECK_OK(s);
}
}
BENCHMARK(BM_TF_CHECK_OK);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/third_party/tsl/tsl/platform/status.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/lib/core/status_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
53ac2892-65fc-4c7b-870d-4c1419791a50 | cpp | google/tensorstore | driver_impl | tensorstore/driver/zarr/driver_impl.h | tensorstore/driver/zarr/driver_impl_test.cc | #ifndef TENSORSTORE_DRIVER_ZARR_DRIVER_IMPL_H_
#define TENSORSTORE_DRIVER_ZARR_DRIVER_IMPL_H_
#include <stddef.h>
#include <string>
#include <string_view>
#include "tensorstore/driver/kvs_backed_chunk_driver.h"
#include "tensorstore/driver/zarr/metadata.h"
#include "tensorstore/driver/zarr/spec.h"
#include "tensorstore/index.h"
#include "tensorstore/internal/cache/chunk_cache.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/util/garbage_collection/fwd.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_zarr {
std::string EncodeChunkIndices(span<const Index> indices,
DimensionSeparator dimension_separator);
class MetadataCache : public internal_kvs_backed_chunk_driver::MetadataCache {
using Base = internal_kvs_backed_chunk_driver::MetadataCache;
public:
using Base::Base;
std::string GetMetadataStorageKey(std::string_view entry_key) override;
Result<MetadataPtr> DecodeMetadata(std::string_view entry_key,
absl::Cord encoded_metadata) override;
Result<absl::Cord> EncodeMetadata(std::string_view entry_key,
const void* metadata) override;
};
class ZarrDriverSpec
: public internal::RegisteredDriverSpec<
ZarrDriverSpec,
internal_kvs_backed_chunk_driver::KvsDriverSpec> {
public:
using Base = internal::RegisteredDriverSpec<
ZarrDriverSpec,
internal_kvs_backed_chunk_driver::KvsDriverSpec>;
constexpr static char id[] = "zarr";
ZarrPartialMetadata partial_metadata;
SelectedField selected_field;
std::string metadata_key;
constexpr static auto ApplyMembers = [](auto& x, auto f) {
return f(internal::BaseCast<KvsDriverSpec>(x), x.partial_metadata,
x.selected_field, x.metadata_key);
};
absl::Status ApplyOptions(SpecOptions&& options) override;
Result<SpecRankAndFieldInfo> GetSpecInfo() const;
TENSORSTORE_DECLARE_JSON_DEFAULT_BINDER(ZarrDriverSpec,
JsonSerializationOptions,
JsonSerializationOptions,
::nlohmann::json::object_t)
Result<IndexDomain<>> GetDomain() const override;
Result<CodecSpec> GetCodec() const override;
Result<ChunkLayout> GetChunkLayout() const override;
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) const override;
Future<internal::Driver::Handle> Open(
DriverOpenRequest request) const override;
};
class DataCache : public internal_kvs_backed_chunk_driver::DataCache {
using Base = internal_kvs_backed_chunk_driver::DataCache;
public:
explicit DataCache(Initializer&& initializer, std::string key_prefix,
DimensionSeparator dimension_separator,
std::string metadata_key);
const ZarrMetadata& metadata() {
return *static_cast<const ZarrMetadata*>(initial_metadata().get());
}
absl::Status ValidateMetadataCompatibility(
const void* existing_metadata_ptr, const void* new_metadata_ptr) override;
void GetChunkGridBounds(const void* metadata_ptr, MutableBoxView<> bounds,
DimensionSet& implicit_lower_bounds,
DimensionSet& implicit_upper_bounds) override;
Result<std::shared_ptr<const void>> GetResizedMetadata(
const void* existing_metadata, span<const Index> new_inclusive_min,
span<const Index> new_exclusive_max) override;
static internal::ChunkGridSpecification GetChunkGridSpecification(
const ZarrMetadata& metadata);
Result<absl::InlinedVector<SharedArray<const void>, 1>> DecodeChunk(
span<const Index> chunk_indices, absl::Cord data) override;
Result<absl::Cord> EncodeChunk(
span<const Index> chunk_indices,
span<const SharedArray<const void>> component_arrays) override;
std::string GetChunkStorageKey(span<const Index> cell_indices) override;
absl::Status GetBoundSpecData(
internal_kvs_backed_chunk_driver::KvsDriverSpec& spec_base,
const void* metadata_ptr, size_t component_index) override;
Result<ChunkLayout> GetChunkLayoutFromMetadata(
const void* metadata_ptr, size_t component_index) override;
std::string GetBaseKvstorePath() override;
std::string key_prefix_;
DimensionSeparator dimension_separator_;
std::string metadata_key_;
};
class ZarrDriver;
using ZarrDriverBase = internal_kvs_backed_chunk_driver::RegisteredKvsDriver<
ZarrDriver, ZarrDriverSpec, DataCache,
internal::ChunkCacheReadWriteDriverMixin<
ZarrDriver, internal_kvs_backed_chunk_driver::KvsChunkedDriverBase>>;
class ZarrDriver : public ZarrDriverBase {
using Base = ZarrDriverBase;
public:
using Base::Base;
class OpenState;
const ZarrMetadata& metadata() const {
return *static_cast<const ZarrMetadata*>(
this->cache()->initial_metadata().get());
}
Result<CodecSpec> GetCodec() override;
Result<SharedArray<const void>> GetFillValue(
IndexTransformView<> transform) override;
Future<ArrayStorageStatistics> GetStorageStatistics(
GetStorageStatisticsRequest request) override;
};
}
}
TENSORSTORE_DECLARE_GARBAGE_COLLECTION_SPECIALIZATION(
tensorstore::internal_zarr::ZarrDriver)
#endif | #include "tensorstore/driver/zarr/driver_impl.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "tensorstore/driver/kvs_backed_chunk_driver_impl.h"
#include "tensorstore/driver/zarr/metadata.h"
#include "tensorstore/driver/zarr/spec.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/open.h"
#include "tensorstore/resize_options.h"
#include "tensorstore/tensorstore.h"
#include "tensorstore/transaction.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::Index;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::kImplicit;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::span;
using ::tensorstore::TransactionMode;
using ::tensorstore::internal_kvs_backed_chunk_driver::ResizeParameters;
using ::tensorstore::internal_zarr::DimensionSeparator;
using ::tensorstore::internal_zarr::ZarrDriver;
using ::tensorstore::internal_zarr::ZarrMetadata;
template <typename... Option>
Result<tensorstore::IndexTransform<>> ResolveBoundsFromMetadata(
const ZarrMetadata& metadata, std::string field,
tensorstore::IndexTransform<> transform,
tensorstore::ResolveBoundsOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto store,
tensorstore::Open({
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", ::nlohmann::json(metadata)},
{"field", field},
{"create", true},
})
.result());
return tensorstore::internal::TensorStoreAccess::handle(store)
.driver->ResolveBounds({{}, transform, options})
.result();
}
Result<ResizeParameters> GetResizeParameters(
const ZarrMetadata& metadata, std::string field,
tensorstore::IndexTransformView<> transform,
span<const Index> inclusive_min, span<const Index> exclusive_max,
tensorstore::ResizeOptions options,
TransactionMode transaction_mode = TransactionMode::no_transaction_mode) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto store,
tensorstore::Open({
{"driver", "zarr"},
{"kvstore", {{"driver", "memory"}}},
{"metadata", ::nlohmann::json(metadata)},
{"field", field},
{"create", true},
})
.result());
auto driver = tensorstore::internal::dynamic_pointer_cast<ZarrDriver>(
tensorstore::internal::TensorStoreAccess::handle(store).driver);
return tensorstore::internal_kvs_backed_chunk_driver::GetResizeParameters(
driver->cache(), &metadata, driver->component_index(), transform,
inclusive_min, exclusive_max, options, transaction_mode);
}
TEST(EncodeChunkIndicesTest, DotSeparated) {
EXPECT_EQ("1.2.3", EncodeChunkIndices(span<const Index>({1, 2, 3}),
DimensionSeparator::kDotSeparated));
}
TEST(EncodeChunkIndicesTest, SlashSeparated) {
EXPECT_EQ("1/2/3", EncodeChunkIndices(span<const Index>({1, 2, 3}),
DimensionSeparator::kSlashSeparated));
}
TEST(ResolveBoundsFromMetadataTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype", "<i2"},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
EXPECT_THAT(ResolveBoundsFromMetadata(
metadata, "",
tensorstore::IdentityTransform(2),
{}),
(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({100, 100})
.implicit_upper_bounds({1, 1})
.output_identity_transform()
.Finalize()
.value()));
}
TEST(ResolveBoundsFromMetadataTest, FixResizableBoundsSuccess) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype", "<i2"},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
tensorstore::ResolveBoundsOptions options;
options.Set(tensorstore::fix_resizable_bounds).IgnoreError();
EXPECT_THAT(ResolveBoundsFromMetadata(
metadata, "",
tensorstore::IdentityTransform(2),
options),
(tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({100, 100})
.output_identity_transform()
.Finalize()
.value()));
}
TEST(ResolveBoundsFromMetadataTest, FixResizableBoundsFailure) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype", "<i2"},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
tensorstore::ResolveBoundsOptions options;
options.Set(tensorstore::fix_resizable_bounds).IgnoreError();
EXPECT_THAT(ResolveBoundsFromMetadata(
metadata, "",
tensorstore::IdentityTransform(span<const Index>({200, 100})),
options),
MatchesStatus(absl::StatusCode::kOutOfRange));
}
TEST(ResolveBoundsFromMetadataTest, MultipleFieldsWithFieldShape) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype",
{
{"x", "<i2", {2, 3}},
{"y", "<i4", {4}},
}},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
EXPECT_THAT(
ResolveBoundsFromMetadata(
metadata, "x",
tensorstore::IdentityTransform(4), {}),
(tensorstore::IndexTransformBuilder<>(4, 4)
.input_origin({0, 0, 0, 0})
.input_shape({100, 100, 2, 3})
.implicit_upper_bounds({1, 1, 0, 0})
.output_identity_transform()
.Finalize()
.value()));
EXPECT_THAT(
ResolveBoundsFromMetadata(
metadata, "y",
tensorstore::IdentityTransform(3), {}),
(tensorstore::IndexTransformBuilder<>(3, 3)
.input_origin({0, 0, 0})
.input_shape({100, 100, 4})
.implicit_upper_bounds({1, 1, 0})
.output_identity_transform()
.Finalize()
.value()));
}
TEST(GetResizeParametersTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype", "<i2"},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
const auto transform = tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({100, 100})
.implicit_upper_bounds({1, 1})
.output_identity_transform()
.Finalize()
.value();
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}), {}));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_FALSE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
{
tensorstore::ResizeOptions options;
options.Set(tensorstore::expand_only).IgnoreError();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p,
GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}), options));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_TRUE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
{
tensorstore::ResizeOptions options;
options.Set(tensorstore::shrink_only).IgnoreError();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p,
GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}), options));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_FALSE(p.expand_only);
EXPECT_TRUE(p.shrink_only);
}
{
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}), {},
TransactionMode::atomic_isolated));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint, ::testing::ElementsAre(100, 100));
EXPECT_THAT(p.inclusive_min_constraint, ::testing::ElementsAre(0, 0));
EXPECT_FALSE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
{
tensorstore::ResizeOptions options;
options.Set(tensorstore::resize_metadata_only).IgnoreError();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p, GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, 150}),
options, TransactionMode::atomic_isolated));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_FALSE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
EXPECT_THAT(
GetResizeParameters(metadata,
"", transform,
span<const Index>({kImplicit, kImplicit}),
span<const Index>({kImplicit, kImplicit}), {}),
MatchesStatus(absl::StatusCode::kAborted));
EXPECT_THAT(
GetResizeParameters(metadata,
"",
tensorstore::IndexTransformBuilder<>(2, 2)
.input_origin({0, 0})
.input_shape({100, 100})
.implicit_lower_bounds({1, 1})
.implicit_upper_bounds({1, 1})
.output_identity_transform()
.Finalize()
.value(),
span<const Index>({2, kImplicit}),
span<const Index>({kImplicit, kImplicit}), {}),
MatchesStatus(absl::StatusCode::kFailedPrecondition));
}
TEST(GetResizeParametersTest, MultipleFields) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto metadata, ZarrMetadata::FromJson({
{"zarr_format", 2},
{"order", "C"},
{"filters", nullptr},
{"fill_value", nullptr},
{"compressor", nullptr},
{"dtype",
{
{"x", "<i2", {2, 3}},
{"y", "<i4", {4}},
}},
{"shape", {100, 100}},
{"chunks", {3, 2}},
}));
const auto transform = tensorstore::IndexTransformBuilder<>(4, 4)
.input_origin({0, 0, 0, 0})
.input_shape({100, 100, 2, 3})
.implicit_lower_bounds({1, 1, 1, 1})
.implicit_upper_bounds({1, 1, 1, 1})
.output_identity_transform()
.Finalize()
.value();
EXPECT_THAT(
GetResizeParameters(
metadata,
"x", transform,
span<const Index>({kImplicit, kImplicit, kImplicit, kImplicit}),
span<const Index>({kImplicit, 150, kImplicit, kImplicit}), {}),
MatchesStatus(absl::StatusCode::kFailedPrecondition,
"Resize operation would affect other fields but "
"`resize_tied_bounds` was not specified"));
tensorstore::ResizeOptions options;
options.Set(tensorstore::ResizeMode::resize_tied_bounds).IgnoreError();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto p,
GetResizeParameters(
metadata,
"x", transform,
span<const Index>({kImplicit, kImplicit, kImplicit, kImplicit}),
span<const Index>({kImplicit, 150, kImplicit, kImplicit}), options));
EXPECT_THAT(p.new_exclusive_max, ::testing::ElementsAre(kImplicit, 150));
EXPECT_THAT(p.exclusive_max_constraint,
::testing::ElementsAre(kImplicit, kImplicit));
EXPECT_FALSE(p.expand_only);
EXPECT_FALSE(p.shrink_only);
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/driver_impl.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/zarr/driver_impl_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
44ee2e2d-d074-4a87-b125-2bf59679e221 | cpp | tensorflow/tensorflow | tracking_allocator | third_party/xla/xla/tsl/framework/tracking_allocator.cc | tensorflow/core/framework/tracking_allocator_test.cc | #include "xla/tsl/framework/tracking_allocator.h"
#include "tsl/platform/env.h"
#include "tsl/platform/logging.h"
namespace tsl {
TrackingAllocator::TrackingAllocator(Allocator* allocator, bool track_sizes)
: allocator_(allocator),
ref_(1),
allocated_(0),
high_watermark_(0),
total_bytes_(0),
track_sizes_locally_(track_sizes && !allocator_->TracksAllocationSizes()),
next_allocation_id_(0) {}
void* TrackingAllocator::AllocateRaw(
size_t alignment, size_t num_bytes,
const AllocationAttributes& allocation_attr) {
void* ptr = allocator_->AllocateRaw(alignment, num_bytes, allocation_attr);
if (nullptr == ptr) {
return ptr;
}
if (allocator_->TracksAllocationSizes()) {
size_t allocated_bytes = allocator_->AllocatedSize(ptr);
{
mutex_lock lock(mu_);
allocated_ += allocated_bytes;
high_watermark_ = std::max(high_watermark_, allocated_);
total_bytes_ += allocated_bytes;
allocations_.emplace_back(allocated_bytes, Env::Default()->NowMicros());
++ref_;
}
} else if (track_sizes_locally_) {
size_t allocated_bytes = allocator_->AllocatedSizeSlow(ptr);
allocated_bytes = std::max(num_bytes, allocated_bytes);
mutex_lock lock(mu_);
next_allocation_id_ += 1;
Chunk chunk = {num_bytes, allocated_bytes, next_allocation_id_};
in_use_.emplace(std::make_pair(ptr, chunk));
allocated_ += allocated_bytes;
high_watermark_ = std::max(high_watermark_, allocated_);
total_bytes_ += allocated_bytes;
allocations_.emplace_back(allocated_bytes, Env::Default()->NowMicros());
++ref_;
} else {
mutex_lock lock(mu_);
total_bytes_ += num_bytes;
allocations_.emplace_back(num_bytes, Env::Default()->NowMicros());
++ref_;
}
return ptr;
}
void TrackingAllocator::DeallocateRaw(void* ptr) {
if (nullptr == ptr) {
return;
}
bool should_delete;
bool tracks_allocation_sizes = allocator_->TracksAllocationSizes();
size_t allocated_bytes = 0;
if (tracks_allocation_sizes) {
allocated_bytes = allocator_->AllocatedSize(ptr);
} else if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto itr = in_use_.find(ptr);
if (itr != in_use_.end()) {
tracks_allocation_sizes = true;
allocated_bytes = (*itr).second.allocated_size;
in_use_.erase(itr);
}
}
Allocator* allocator = allocator_;
{
mutex_lock lock(mu_);
if (tracks_allocation_sizes) {
CHECK_GE(allocated_, allocated_bytes);
allocated_ -= allocated_bytes;
allocations_.emplace_back(-allocated_bytes, Env::Default()->NowMicros());
}
should_delete = UnRef();
}
allocator->DeallocateRaw(ptr);
if (should_delete) {
delete this;
}
}
bool TrackingAllocator::TracksAllocationSizes() const {
return track_sizes_locally_ || allocator_->TracksAllocationSizes();
}
size_t TrackingAllocator::RequestedSize(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.requested_size;
}
return 0;
} else {
return allocator_->RequestedSize(ptr);
}
}
size_t TrackingAllocator::AllocatedSize(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.allocated_size;
}
return 0;
} else {
return allocator_->AllocatedSize(ptr);
}
}
int64_t TrackingAllocator::AllocationId(const void* ptr) const {
if (track_sizes_locally_) {
mutex_lock lock(mu_);
auto it = in_use_.find(ptr);
if (it != in_use_.end()) {
return (*it).second.allocation_id;
}
return 0;
} else {
return allocator_->AllocationId(ptr);
}
}
absl::optional<AllocatorStats> TrackingAllocator::GetStats() {
return allocator_->GetStats();
}
bool TrackingAllocator::ClearStats() { return allocator_->ClearStats(); }
std::tuple<size_t, size_t, size_t> TrackingAllocator::GetSizes() {
size_t high_watermark;
size_t total_bytes;
size_t still_live_bytes;
{
mutex_lock lock(mu_);
high_watermark = high_watermark_;
total_bytes = total_bytes_;
still_live_bytes = allocated_;
}
return std::make_tuple(total_bytes, high_watermark, still_live_bytes);
}
absl::InlinedVector<AllocRecord, 4UL> TrackingAllocator::GetRecordsAndUnRef() {
bool should_delete;
absl::InlinedVector<AllocRecord, 4UL> allocations;
{
mutex_lock lock(mu_);
allocations.swap(allocations_);
should_delete = UnRef();
}
if (should_delete) {
delete this;
}
return allocations;
}
absl::InlinedVector<AllocRecord, 4UL> TrackingAllocator::GetCurrentRecords() {
absl::InlinedVector<AllocRecord, 4UL> allocations;
{
mutex_lock lock(mu_);
for (const AllocRecord& alloc : allocations_) {
allocations.push_back(alloc);
}
}
return allocations;
}
bool TrackingAllocator::UnRef() {
CHECK_GE(ref_, 1);
--ref_;
return (ref_ == 0);
}
} | #include "tensorflow/core/framework/tracking_allocator.h"
#include <unordered_map>
#include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mem.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
class TestableSizeTrackingAllocator : public Allocator {
public:
string Name() override { return "test"; }
void* AllocateRaw(size_t , size_t num_bytes) override {
void* ptr = port::Malloc(num_bytes);
size_map_[ptr] = num_bytes;
return ptr;
}
void DeallocateRaw(void* ptr) override {
const auto& iter = size_map_.find(ptr);
EXPECT_NE(size_map_.end(), iter);
size_map_.erase(iter);
port::Free(ptr);
}
bool TracksAllocationSizes() const override { return true; }
size_t RequestedSize(const void* ptr) const override {
const auto& iter = size_map_.find(ptr);
EXPECT_NE(size_map_.end(), iter);
return iter->second;
}
absl::optional<AllocatorStats> GetStats() override { return absl::nullopt; }
private:
std::unordered_map<const void*, size_t> size_map_;
};
class NoMemoryAllocator : public Allocator {
public:
string Name() override { return "test"; }
void* AllocateRaw(size_t , size_t num_bytes) override {
return nullptr;
}
void DeallocateRaw(void* ptr) override {}
bool TracksAllocationSizes() const override { return true; }
absl::optional<AllocatorStats> GetStats() override { return absl::nullopt; }
};
TEST(TrackingAllocatorTest, SimpleNoTracking) {
Allocator* a = cpu_allocator();
EXPECT_FALSE(a->TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(a, false);
void* p1 = ta->AllocateRaw(4, 4);
ta->DeallocateRaw(p1);
void* p2 = ta->AllocateRaw(4, 12);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(16, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
ta->DeallocateRaw(p2);
auto records = ta->GetRecordsAndUnRef();
EXPECT_EQ(4, records[0].alloc_bytes);
EXPECT_EQ(12, records[1].alloc_bytes);
ta = new TrackingAllocator(a, true);
p1 = ta->AllocateRaw(4, 4);
EXPECT_EQ(4, ta->RequestedSize(p1));
EXPECT_LE(4, ta->AllocatedSize(p1));
EXPECT_EQ(1, ta->AllocationId(p1));
ta->DeallocateRaw(p1);
p2 = ta->AllocateRaw(4, 12);
EXPECT_EQ(12, ta->RequestedSize(p2));
EXPECT_LE(12, ta->AllocatedSize(p2));
EXPECT_EQ(2, ta->AllocationId(p2));
sizes = ta->GetSizes();
EXPECT_LE(16, std::get<0>(sizes));
EXPECT_LE(12, std::get<1>(sizes));
EXPECT_LE(12, std::get<2>(sizes));
ta->DeallocateRaw(p2);
records = ta->GetRecordsAndUnRef();
EXPECT_LE(4, records[0].alloc_bytes);
EXPECT_GE(-4, records[1].alloc_bytes);
EXPECT_LE(12, records[2].alloc_bytes);
EXPECT_GE(-12, records[3].alloc_bytes);
}
TEST(TrackingAllocatorTest, SimpleTracking) {
TestableSizeTrackingAllocator a = TestableSizeTrackingAllocator();
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
void* p1 = ta->AllocateRaw(4, 12);
ta->DeallocateRaw(p1);
void* p2 = ta->AllocateRaw(4, 4);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(16, std::get<0>(sizes));
EXPECT_EQ(12, std::get<1>(sizes));
EXPECT_EQ(4, std::get<2>(sizes));
ta->DeallocateRaw(p2);
auto records = ta->GetRecordsAndUnRef();
EXPECT_EQ(12, records[0].alloc_bytes);
EXPECT_EQ(-12, records[1].alloc_bytes);
EXPECT_EQ(4, records[2].alloc_bytes);
EXPECT_EQ(-4, records[3].alloc_bytes);
}
TEST(TrackingAllocatorTest, OutOfMemory) {
NoMemoryAllocator a;
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
void* p1 = ta->AllocateRaw(4, 12);
EXPECT_EQ(nullptr, p1);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(0, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
EXPECT_EQ(0, ta->GetRecordsAndUnRef().size());
}
TEST(TrackingAllocatorTest, FreeNullPtr) {
NoMemoryAllocator a;
EXPECT_TRUE(a.TracksAllocationSizes());
TrackingAllocator* ta = new TrackingAllocator(&a, false);
ta->DeallocateRaw(nullptr);
std::tuple<size_t, size_t, size_t> sizes = ta->GetSizes();
EXPECT_EQ(0, std::get<0>(sizes));
EXPECT_EQ(0, std::get<1>(sizes));
EXPECT_EQ(0, std::get<2>(sizes));
EXPECT_EQ(0, ta->GetRecordsAndUnRef().size());
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tsl/framework/tracking_allocator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/framework/tracking_allocator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
5036342e-0544-4dc5-a2c3-c2d51efedf19 | cpp | google/arolla | qtype_traits | arolla/qtype/qtype_traits.h | arolla/qtype/qtype_traits_test.cc | #ifndef AROLLA_QTYPE_QTYPE_TRAITS_H_
#define AROLLA_QTYPE_QTYPE_TRAITS_H_
#include <type_traits>
#include "absl/base/attributes.h"
#include "absl/log/check.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/util/demangle.h"
namespace arolla {
template <typename T>
struct QTypeTraits;
template <typename T, typename = void>
struct has_qtype_traits : std::false_type {};
template <typename T>
struct has_qtype_traits<T, std::void_t<decltype(QTypeTraits<T>::type())>>
: std::true_type {};
template <typename T>
constexpr bool has_qtype_traits_v = has_qtype_traits<T>::value;
template <typename T>
ABSL_ATTRIBUTE_ALWAYS_INLINE inline QTypePtr GetQType() {
static_assert(
has_qtype_traits_v<T>,
"QTypeTraits<T> specialization is not included. #include file with "
"QTypeTraits<T> expliclty to fix this problem. "
"E.g., #include \"arolla/qtype/base_types.h\" for standard "
"Arolla scalar and OptionalValue types.");
DCHECK(typeid(T) == QTypeTraits<T>::type()->type_info())
<< "There is an error in the QType implementation for "
<< QTypeTraits<T>::type()->name();
DCHECK(sizeof(T) <= QTypeTraits<T>::type()->type_layout().AllocSize())
<< "QType " << QTypeTraits<T>::type()->name()
<< " has too small frame layout to carry a value of C++ type "
<< TypeName<T>();
return QTypeTraits<T>::type();
}
#define AROLLA_DECLARE_QTYPE(...) \
template <> \
struct QTypeTraits<__VA_ARGS__> { \
static QTypePtr type(); \
}
template <>
struct QTypeTraits<QTypePtr> {
static QTypePtr type() { return GetQTypeQType(); }
};
}
#endif | #include "arolla/qtype/qtype_traits.h"
namespace arolla {
struct WithQTypeTraits {};
AROLLA_DECLARE_QTYPE(WithQTypeTraits);
struct WithoutQTypeTraits {};
static_assert(has_qtype_traits_v<WithQTypeTraits>);
static_assert(!has_qtype_traits_v<WithoutQTypeTraits>);
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/qtype_traits.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/qtype/qtype_traits_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
7e858b4c-c1de-484e-a11b-a26d73efd5ce | cpp | abseil/abseil-cpp | bit_gen_ref | absl/random/bit_gen_ref.h | absl/random/bit_gen_ref_test.cc | #ifndef ABSL_RANDOM_BIT_GEN_REF_H_
#define ABSL_RANDOM_BIT_GEN_REF_H_
#include <limits>
#include <type_traits>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/base/internal/fast_type_id.h"
#include "absl/base/macros.h"
#include "absl/meta/type_traits.h"
#include "absl/random/internal/distribution_caller.h"
#include "absl/random/internal/fast_uniform_bits.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace random_internal {
template <typename URBG, typename = void, typename = void, typename = void>
struct is_urbg : std::false_type {};
template <typename URBG>
struct is_urbg<
URBG,
absl::enable_if_t<std::is_same<
typename URBG::result_type,
typename std::decay<decltype((URBG::min)())>::type>::value>,
absl::enable_if_t<std::is_same<
typename URBG::result_type,
typename std::decay<decltype((URBG::max)())>::type>::value>,
absl::enable_if_t<std::is_same<
typename URBG::result_type,
typename std::decay<decltype(std::declval<URBG>()())>::type>::value>>
: std::true_type {};
template <typename>
struct DistributionCaller;
class MockHelpers;
}
class BitGenRef {
template <template <class...> class Trait, class AlwaysVoid, class... Args>
struct detector : std::false_type {};
template <template <class...> class Trait, class... Args>
struct detector<Trait, absl::void_t<Trait<Args...>>, Args...>
: std::true_type {};
template <class T>
using invoke_mock_t = decltype(std::declval<T*>()->InvokeMock(
std::declval<base_internal::FastTypeIdType>(), std::declval<void*>(),
std::declval<void*>()));
template <typename T>
using HasInvokeMock = typename detector<invoke_mock_t, void, T>::type;
public:
BitGenRef(const BitGenRef&) = default;
BitGenRef(BitGenRef&&) = default;
BitGenRef& operator=(const BitGenRef&) = default;
BitGenRef& operator=(BitGenRef&&) = default;
template <
typename URBGRef, typename URBG = absl::remove_cvref_t<URBGRef>,
typename absl::enable_if_t<(!std::is_same<URBG, BitGenRef>::value &&
random_internal::is_urbg<URBG>::value &&
!HasInvokeMock<URBG>::value)>* = nullptr>
BitGenRef(URBGRef&& gen ABSL_ATTRIBUTE_LIFETIME_BOUND)
: t_erased_gen_ptr_(reinterpret_cast<uintptr_t>(&gen)),
mock_call_(NotAMock),
generate_impl_fn_(ImplFn<URBG>) {}
template <typename URBGRef, typename URBG = absl::remove_cvref_t<URBGRef>,
typename absl::enable_if_t<(!std::is_same<URBG, BitGenRef>::value &&
random_internal::is_urbg<URBG>::value &&
HasInvokeMock<URBG>::value)>* = nullptr>
BitGenRef(URBGRef&& gen ABSL_ATTRIBUTE_LIFETIME_BOUND)
: t_erased_gen_ptr_(reinterpret_cast<uintptr_t>(&gen)),
mock_call_(&MockCall<URBG>),
generate_impl_fn_(ImplFn<URBG>) {}
using result_type = uint64_t;
static constexpr result_type(min)() {
return (std::numeric_limits<result_type>::min)();
}
static constexpr result_type(max)() {
return (std::numeric_limits<result_type>::max)();
}
result_type operator()() { return generate_impl_fn_(t_erased_gen_ptr_); }
private:
using impl_fn = result_type (*)(uintptr_t);
using mock_call_fn = bool (*)(uintptr_t, base_internal::FastTypeIdType, void*,
void*);
template <typename URBG>
static result_type ImplFn(uintptr_t ptr) {
absl::random_internal::FastUniformBits<result_type> fast_uniform_bits;
return fast_uniform_bits(*reinterpret_cast<URBG*>(ptr));
}
template <typename URBG>
static bool MockCall(uintptr_t gen_ptr, base_internal::FastTypeIdType type,
void* result, void* arg_tuple) {
return reinterpret_cast<URBG*>(gen_ptr)->InvokeMock(type, result,
arg_tuple);
}
static bool NotAMock(uintptr_t, base_internal::FastTypeIdType, void*, void*) {
return false;
}
inline bool InvokeMock(base_internal::FastTypeIdType type, void* args_tuple,
void* result) {
if (mock_call_ == NotAMock) return false;
return mock_call_(t_erased_gen_ptr_, type, args_tuple, result);
}
uintptr_t t_erased_gen_ptr_;
mock_call_fn mock_call_;
impl_fn generate_impl_fn_;
template <typename>
friend struct ::absl::random_internal::DistributionCaller;
friend class ::absl::random_internal::MockHelpers;
};
ABSL_NAMESPACE_END
}
#endif | #include "absl/random/bit_gen_ref.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/internal/fast_type_id.h"
#include "absl/random/internal/sequence_urbg.h"
#include "absl/random/random.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
class ConstBitGen {
public:
using result_type = absl::BitGen::result_type;
static constexpr result_type(min)() { return (absl::BitGen::min)(); }
static constexpr result_type(max)() { return (absl::BitGen::max)(); }
result_type operator()() { return 1; }
bool InvokeMock(base_internal::FastTypeIdType index, void*, void* result) {
*static_cast<int*>(result) = 42;
return true;
}
};
namespace {
int FnTest(absl::BitGenRef gen_ref) { return absl::Uniform(gen_ref, 1, 7); }
template <typename T>
class BitGenRefTest : public testing::Test {};
using BitGenTypes =
::testing::Types<absl::BitGen, absl::InsecureBitGen, std::mt19937,
std::mt19937_64, std::minstd_rand>;
TYPED_TEST_SUITE(BitGenRefTest, BitGenTypes);
TYPED_TEST(BitGenRefTest, BasicTest) {
TypeParam gen;
auto x = FnTest(gen);
EXPECT_NEAR(x, 4, 3);
}
TYPED_TEST(BitGenRefTest, Copyable) {
TypeParam gen;
absl::BitGenRef gen_ref(gen);
FnTest(gen_ref);
}
TEST(BitGenRefTest, PassThroughEquivalence) {
absl::random_internal::sequence_urbg urbg(
{0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull,
0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull});
std::vector<uint64_t> output(12);
{
absl::BitGenRef view(urbg);
for (auto& v : output) {
v = view();
}
}
std::vector<uint64_t> expected(
{0x0003eb76f6f7f755ull, 0xFFCEA50FDB2F953Bull, 0xC332DDEFBE6C5AA5ull,
0x6558218568AB9702ull, 0x2AEF7DAD5B6E2F84ull, 0x1521B62829076170ull,
0xECDD4775619F1510ull, 0x13CCA830EB61BD96ull, 0x0334FE1EAA0363CFull,
0xB5735C904C70A239ull, 0xD59E9E0BCBAADE14ull, 0xEECC86BC60622CA7ull});
EXPECT_THAT(output, testing::Eq(expected));
}
TEST(BitGenRefTest, MockingBitGenBaseOverrides) {
ConstBitGen const_gen;
EXPECT_EQ(FnTest(const_gen), 42);
absl::BitGenRef gen_ref(const_gen);
EXPECT_EQ(FnTest(gen_ref), 42);
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/bit_gen_ref.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/random/bit_gen_ref_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
9c4ce22d-cbf0-4e42-b3eb-255b77c035b5 | cpp | tensorflow/tensorflow | grappler_item_builder | tensorflow/core/grappler/grappler_item_builder.cc | tensorflow/core/grappler/grappler_item_builder_test.cc | #include "tensorflow/core/grappler/grappler_item_builder.h"
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_mgr.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/graph_optimizer.h"
#include "tensorflow/core/framework/attr_value.pb.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/function.pb.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/framework/types.pb.h"
#include "tensorflow/core/framework/variable.pb.h"
#include "tensorflow/core/framework/versions.pb.h"
#include "tensorflow/core/grappler/inputs/utils.h"
#include "tensorflow/core/grappler/op_types.h"
#include "tensorflow/core/grappler/optimizers/model_pruner.h"
#include "tensorflow/core/grappler/utils.h"
#include "tensorflow/core/lib/gtl/map_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/protobuf_internal.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
#include "tensorflow/core/protobuf/saver.pb.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace grappler {
namespace {
void InitializeTensor(DataType type, Tensor* tensor) {
const int period = 7;
if (type == DT_FLOAT) {
auto flat = tensor->flat<float>();
for (int i = 0; i < flat.size(); i++) {
flat(i) = static_cast<float>(i % period) / 10.0f;
}
} else if (type == DT_INT64) {
auto flat = tensor->flat<int64_t>();
for (int i = 0; i < flat.size(); i++) {
flat(i) = i % period;
}
} else if (type != DT_STRING && type != DT_RESOURCE && type != DT_VARIANT) {
memset(const_cast<char*>(tensor->tensor_data().data()), 0,
tensor->tensor_data().size());
}
}
Status PruneGraph(GrapplerItem* item) {
ModelPruner pruner;
GraphDef pruned_graph;
Cluster* cluster = nullptr;
TF_RETURN_IF_ERROR(pruner.Optimize(cluster, *item, &pruned_graph));
item->graph = std::move(pruned_graph);
return absl::OkStatus();
}
Status ReplaceUnknownShapeDim(const ItemConfig& cfg,
const TensorShapeProto& shape_pb_in,
TensorShapeProto* shape_pb_out,
TensorShape* shape_out) {
std::vector<int32> dims;
for (const auto& dim_proto : shape_pb_in.dim()) {
if (cfg.placeholder_unknown_output_shape_dim >= 0 &&
dim_proto.size() == -1) {
dims.push_back(cfg.placeholder_unknown_output_shape_dim);
shape_pb_out->add_dim()->set_size(
cfg.placeholder_unknown_output_shape_dim);
} else {
dims.push_back(std::max<int32>(1, dim_proto.size()));
shape_pb_out->add_dim()->set_size(dim_proto.size());
}
}
return TensorShapeUtils::MakeShape(dims.data(), dims.size(), shape_out);
}
Status UpdatePlaceholderShape(
const ItemConfig& cfg,
const std::unordered_set<string>& signature_feed_nodes,
GrapplerItem* new_item, NodeDef* node) {
if (node->attr().count("dtype") == 0) {
return absl::InternalError(absl::StrCat("Unknown type for placeholder ",
node->name(),
", skipping this input"));
}
DataType type = node->attr().at("dtype").type();
if (node->attr().count("shape") == 0) {
return absl::InternalError(absl::StrCat("Unknown shape for placeholder ",
node->name(),
", skipping this input"));
}
TensorShape shape;
TensorShapeProto shape_proto;
Status make_shape_status = ReplaceUnknownShapeDim(
cfg, node->attr().at("shape").shape(), &shape_proto, &shape);
if (!make_shape_status.ok()) {
return absl::InternalError(
absl::StrCat("Invalid shape for placeholder ", node->name(), ": ",
make_shape_status.ToString(), ", skipping this input"));
}
if ((cfg.placeholder_unknown_output_shape_dim >= 0) && (shape.dims() == 0) &&
(node->attr().count("_output_shapes") == 1)) {
const auto& output_shapes =
node->attr().at("_output_shapes").list().shape(0);
if (output_shapes.dim_size() != 0) {
shape.Clear();
shape_proto.clear_dim();
for (const auto& dim : output_shapes.dim()) {
auto size = dim.size();
if (size == -1) size = cfg.placeholder_unknown_output_shape_dim;
TF_RETURN_IF_ERROR(shape.AddDimWithStatus(size));
shape_proto.add_dim()->set_size(size);
}
}
}
Tensor fake_input(type, shape);
InitializeTensor(type, &fake_input);
if (cfg.feed_nodes.empty()) {
if (signature_feed_nodes.count(node->name()) == 0) {
new_item->feed.emplace_back(node->name(), fake_input);
}
} else if (cfg.feed_nodes.count(node->name()) > 0) {
auto it = find_if(new_item->feed.begin(), new_item->feed.end(),
[&node](std::pair<string, Tensor>& f) {
return f.first == node->name();
});
DCHECK(it != new_item->feed.end());
it->second = fake_input;
}
if (!shape_proto.dim().empty())
*(node->mutable_attr()->at("shape").mutable_shape()) = shape_proto;
return absl::OkStatus();
}
}
Status RuntimeGraphOptimizer(const GraphDef& graph_def_arg,
GraphDef* output_graph_def,
const ItemConfig& cfg) {
if (!cfg.apply_optimizations && !cfg.inline_functions &&
!cfg.erase_noinline_attributes) {
if (output_graph_def != &graph_def_arg) {
*output_graph_def = graph_def_arg;
}
return absl::OkStatus();
}
SessionOptions options;
GraphDef graph_def(graph_def_arg);
if (cfg.erase_noinline_attributes) {
for (auto& func : *graph_def.mutable_library()->mutable_function()) {
func.mutable_attr()->erase("_noinline");
}
}
std::vector<std::unique_ptr<Device>> devices;
DeviceFactory* cpu_factory = DeviceFactory::GetFactory("CPU");
TF_RETURN_IF_ERROR(cpu_factory->CreateDevices(
options, "/job:localhost/replica:0/task:0", &devices));
Device* cpu_device = devices[0].get();
auto dvc_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices));
FunctionLibraryDefinition function_library(OpRegistry::Global(),
graph_def.library());
Env* env = Env::Default();
OptimizerOptions* optimizer_opts =
options.config.mutable_graph_options()->mutable_optimizer_options();
if (cfg.apply_optimizations) {
optimizer_opts->set_opt_level(::tensorflow::OptimizerOptions::L1);
} else {
optimizer_opts->set_opt_level(::tensorflow::OptimizerOptions::L0);
}
optimizer_opts->set_do_function_inlining(cfg.inline_functions);
std::unique_ptr<ProcessFunctionLibraryRuntime> pflr(
new ProcessFunctionLibraryRuntime(dvc_mgr.get(), env, &options.config,
graph_def.versions().producer(),
&function_library, *optimizer_opts));
FunctionLibraryRuntime* flr = pflr->GetFLR(cpu_device->name());
GraphConstructorOptions graph_ctor_opts;
graph_ctor_opts.allow_internal_ops = true;
graph_ctor_opts.expect_device_spec = false;
std::unique_ptr<Graph> graphptr(new Graph(function_library));
TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(
graph_ctor_opts, std::move(graph_def), graphptr.get()));
::tensorflow::GraphOptimizer optimizer(*optimizer_opts);
optimizer.Optimize(flr, env, cpu_device, &graphptr,
tensorflow::GraphOptimizer::Options());
graphptr->ToGraphDef(output_graph_def);
return AddDefaultAttrsToGraphDef(output_graph_def, *graphptr->op_registry(),
0, true);
}
std::unique_ptr<GrapplerItem> GrapplerItemFromMetaGraphDef(
const string& id, const MetaGraphDef& meta_graph, const ItemConfig& cfg) {
if (id.empty()) {
LOG(ERROR) << "id must be non-empty.";
return nullptr;
}
std::unique_ptr<GrapplerItem> new_item(new GrapplerItem());
new_item->id = id;
new_item->graph = meta_graph.graph_def();
for (const auto& feed_node : cfg.feed_nodes) {
const string feed_name = NodeName(feed_node);
new_item->feed.emplace_back(feed_name, Tensor());
}
for (const auto& fetch_node : cfg.fetch_nodes) {
new_item->fetch.emplace_back(NodeName(fetch_node));
}
if (new_item->fetch.empty() &&
meta_graph.collection_def().count("train_op") > 0) {
const CollectionDef& nodes = meta_graph.collection_def().at("train_op");
if (nodes.has_node_list()) {
for (const auto& node : nodes.node_list().value()) {
new_item->fetch.push_back(NodeName(node));
}
}
}
std::unordered_set<string> signature_feed_nodes;
std::unordered_set<string> signature_fetch_nodes;
for (const auto& name_and_signature : meta_graph.signature_def()) {
for (const auto& name_and_input : name_and_signature.second.inputs()) {
const TensorInfo& input = name_and_input.second;
if (input.has_coo_sparse()) {
int64_t dim = std::max(1, cfg.placeholder_unknown_output_shape_dim);
TensorShape shape_1d({dim});
TensorShape shape_2d({dim, dim});
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().values_tensor_name()))) {
Tensor value_tensor(input.dtype(), shape_1d);
InitializeTensor(input.dtype(), &value_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().values_tensor_name()), value_tensor);
}
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().indices_tensor_name()))) {
Tensor indices_tensor(DT_INT64, shape_2d);
InitializeTensor(input.dtype(), &indices_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().indices_tensor_name()),
indices_tensor);
}
if (gtl::InsertIfNotPresent(
&signature_feed_nodes,
NodeName(input.coo_sparse().dense_shape_tensor_name()))) {
Tensor dense_shape_tensor(DT_INT64, shape_1d);
InitializeTensor(input.dtype(), &dense_shape_tensor);
new_item->feed.emplace_back(
NodeName(input.coo_sparse().dense_shape_tensor_name()),
dense_shape_tensor);
}
} else {
if (gtl::InsertIfNotPresent(&signature_feed_nodes,
NodeName(input.name()))) {
TensorShape shape;
TensorShapeProto shape_proto;
Status s = ReplaceUnknownShapeDim(cfg, input.tensor_shape(),
&shape_proto, &shape);
if (!s.ok()) {
LOG(ERROR) << "Invalid shape for signature input " << input.name()
<< ": " << s << ", skipping this input";
return nullptr;
}
Tensor fake_input(input.dtype(), shape);
InitializeTensor(input.dtype(), &fake_input);
new_item->feed.emplace_back(NodeName(input.name()), fake_input);
}
}
}
for (const auto& name_and_output : name_and_signature.second.outputs()) {
const TensorInfo& output = name_and_output.second;
if (output.has_coo_sparse()) {
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().values_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().values_tensor_name()));
}
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().indices_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().indices_tensor_name()));
}
if (gtl::InsertIfNotPresent(
&signature_fetch_nodes,
NodeName(output.coo_sparse().dense_shape_tensor_name()))) {
new_item->fetch.push_back(
NodeName(output.coo_sparse().dense_shape_tensor_name()));
}
} else {
if (gtl::InsertIfNotPresent(&signature_fetch_nodes,
NodeName(output.name()))) {
new_item->fetch.push_back(NodeName(output.name()));
}
}
}
}
for (const auto& feed : new_item->feed) {
if (feed.first.empty()) {
LOG(ERROR) << "Invalid feed node name skipping this input";
return nullptr;
} else {
VLOG(1) << "Will use feed node " << feed.first;
}
}
for (const auto& fetch : new_item->fetch) {
if (fetch.empty()) {
LOG(ERROR) << "Invalid fetch node name skipping this input";
return nullptr;
} else {
VLOG(1) << "Will use fetch node " << fetch;
}
}
if (new_item->fetch.empty()) {
LOG(ERROR) << "Failed to detect the fetch node(s), skipping this input";
return nullptr;
}
for (const string& var_collection :
{"variables", "local_variables", "model_variables",
"trainable_variables"}) {
if (meta_graph.collection_def().count(var_collection) == 0) {
continue;
}
const CollectionDef& vars = meta_graph.collection_def().at(var_collection);
for (const auto& raw_var : vars.bytes_list().value()) {
VariableDef var;
var.ParseFromString(raw_var);
if (!var.initializer_name().empty()) {
new_item->init_ops.push_back(NodeName(var.initializer_name()));
}
}
}
if (meta_graph.collection_def().count("table_initializer") > 0) {
const CollectionDef& inits =
meta_graph.collection_def().at("table_initializer");
if (inits.has_node_list()) {
for (const auto& node : inits.node_list().value()) {
new_item->init_ops.push_back(NodeName(node));
new_item->expected_init_time += 30 * 60;
}
}
}
std::unordered_map<string, string> asset_node_to_value;
if (!cfg.assets_directory_override.empty()) {
if (meta_graph.collection_def().count("saved_model_assets") > 0) {
const CollectionDef& collection =
meta_graph.collection_def().at("saved_model_assets");
const auto& any_assets = collection.any_list().value();
if (!any_assets.empty()) {
if (std::is_base_of<protobuf::Message, AssetFileDef>()) {
for (const auto& any_asset : any_assets) {
AssetFileDef asset_file_def;
if (!ParseAny(any_asset, &asset_file_def, "tensorflow.AssetFileDef")
.ok()) {
LOG(ERROR) << "Failed to parse AssetFile.";
continue;
}
string asset_filepath = io::JoinPath(cfg.assets_directory_override,
asset_file_def.filename());
if (!FilesExist({asset_filepath}, nullptr)) {
LOG(ERROR) << "Can't access one or more of the asset files "
<< asset_filepath << ", skipping this input";
return nullptr;
}
asset_node_to_value[NodeName(asset_file_def.tensor_info().name())] =
asset_filepath;
}
} else {
LOG(ERROR) << "Can't parse AssetFileDef when using lite protos.";
return nullptr;
}
}
}
} else if (meta_graph.collection_def().count("asset_filepaths") > 0) {
const CollectionDef& file_paths =
meta_graph.collection_def().at("asset_filepaths");
std::vector<string> paths;
for (const auto& raw_path : file_paths.bytes_list().value()) {
paths.push_back(raw_path);
}
if (!FilesExist(paths, nullptr)) {
LOG(ERROR) << "Can't access one or more of the asset files, skipping "
"this input";
return nullptr;
}
}
if (meta_graph.collection_def().count("queue_runners") > 0) {
const CollectionDef& vars = meta_graph.collection_def().at("queue_runners");
for (const auto& raw : vars.bytes_list().value()) {
QueueRunnerDef queue_runner;
if (!queue_runner.ParseFromString(raw)) {
LOG(ERROR) << "Could not parse queue_runners, skipping this input";
return nullptr;
}
if (queue_runner.cancel_op_name().empty()) {
LOG(ERROR) << "Queue without a cancel op, skipping this input";
return nullptr;
}
new_item->queue_runners.push_back(queue_runner);
}
}
for (const auto& col : meta_graph.collection_def()) {
const CollectionDef& collection = col.second;
for (const string& node : collection.node_list().value()) {
new_item->keep_ops.push_back(NodeName(node));
}
}
for (auto& node : *new_item->graph.mutable_node()) {
if (IsPlaceholder(node) && node.op() != "PlaceholderWithDefault") {
Status s = UpdatePlaceholderShape(cfg, signature_feed_nodes,
new_item.get(), &node);
if (!s.ok()) return nullptr;
} else if (IsConstant(node)) {
auto it = asset_node_to_value.find(node.name());
if (it != asset_node_to_value.end()) {
auto iter = node.mutable_attr()->find("value");
if (iter == node.attr().end()) {
LOG(ERROR) << "Value attribute expected in const op for asset files";
return nullptr;
}
if (!iter->second.has_tensor() ||
iter->second.tensor().string_val_size() != 1) {
LOG(INFO) << "Unexpected AttrValue proto: "
<< iter->second.DebugString();
return nullptr;
}
LOG(INFO) << "Using asset file " << it->second << " for node "
<< node.name();
*(iter->second.mutable_tensor()->mutable_string_val(0)) = it->second;
}
}
node.mutable_attr()->erase("_output_shapes");
if (cfg.ignore_user_placement) {
node.clear_device();
}
if (cfg.ignore_colocation) {
auto attr = node.mutable_attr();
auto it = attr->find("_class");
if (it != attr->end()) {
attr->erase(it);
}
}
}
if (meta_graph.collection_def().count("savers") > 0) {
const CollectionDef& savers = meta_graph.collection_def().at("savers");
for (const auto& raw : savers.bytes_list().value()) {
SaverDef saver;
if (!saver.ParseFromString(raw)) {
continue;
}
if (saver.filename_tensor_name().empty()) {
continue;
}
new_item->save_op = saver.save_tensor_name();
new_item->restore_op = saver.restore_op_name();
new_item->save_restore_loc_tensor = saver.filename_tensor_name();
break;
}
} else {
const SaverDef& saver = meta_graph.saver_def();
new_item->save_op = saver.save_tensor_name();
new_item->restore_op = saver.restore_op_name();
new_item->save_restore_loc_tensor = saver.filename_tensor_name();
}
Status attr_status = AddDefaultAttrsToGraphDef(
&new_item->graph,
FunctionLibraryDefinition(OpRegistry::Global(),
new_item->graph.library()),
0, true);
if (!attr_status.ok()) {
LOG(ERROR) << "Failed to instantiate default attribute values: "
<< attr_status.message();
return nullptr;
}
VLOG(1) << "Number of nodes in graph before RuntimeGraphOptimizer: "
<< new_item->graph.node_size();
Status optimize_status =
RuntimeGraphOptimizer(new_item->graph, &new_item->graph, cfg);
if (!optimize_status.ok()) {
LOG(ERROR) << "Graph preprocessing failed: " << optimize_status;
return nullptr;
}
VLOG(1) << "Number of nodes in graph after RuntimeGraphOptimizer: "
<< new_item->graph.node_size();
if (cfg.prune_graph) {
VLOG(1) << "Pruning graph...";
auto status = PruneGraph(new_item.get());
if (!status.ok()) {
LOG(ERROR) << "Pruning failed: " << status.message();
return nullptr;
}
VLOG(1) << "Number of nodes in graph after pruning: "
<< new_item->graph.node_size();
}
std::unordered_set<string> nodes;
for (const auto& node : new_item->graph.node()) {
nodes.insert(node.name());
}
for (const auto& feed : new_item->feed) {
if (nodes.find(feed.first) == nodes.end()) {
LOG(ERROR) << "Feed node " << feed.first << " doesn't exist in graph";
return nullptr;
}
}
for (const auto& fetch : new_item->fetch) {
if (nodes.find(fetch) == nodes.end()) {
LOG(ERROR) << "Fetch node " << fetch << " doesn't exist in graph";
return nullptr;
}
}
for (const auto& init : new_item->init_ops) {
if (nodes.find(init) == nodes.end()) {
LOG(ERROR) << "Init node " << init << " doesn't exist in graph";
return nullptr;
}
}
return new_item;
}
std::unique_ptr<GrapplerItem> GrapplerItemFromMetaGraphDefFile(
const string& id, const string& meta_graph_file, const ItemConfig& cfg) {
MetaGraphDef meta_graph;
if (!ReadMetaGraphDefFromFile(meta_graph_file, &meta_graph).ok()) {
LOG(ERROR) << "Failed to read " << meta_graph_file;
return nullptr;
}
return GrapplerItemFromMetaGraphDef(id, meta_graph, cfg);
}
}
} | #include "tensorflow/core/grappler/grappler_item_builder.h"
#include "google/protobuf/any.pb.h"
#include "tensorflow/cc/framework/gradients.h"
#include "tensorflow/cc/gradients/grad_testutil.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/core/framework/function_testlib.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/grappler/inputs/trivial_test_graph_input_yielder.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/protobuf/meta_graph.pb.h"
namespace tensorflow {
namespace grappler {
namespace {
class GrapplerItemBuilderTest : public ::testing::Test {};
TEST_F(GrapplerItemBuilderTest, AssetFilepathOverrideTest) {
MetaGraphDef meta_graph;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output var =
ops::Variable(s.WithOpName("var"), TensorShape(), DataType::DT_FLOAT);
Output filename_node =
ops::Const(s.WithOpName("filename"), string("model"), TensorShape());
Output tensor_name =
ops::Const(s.WithOpName("tensorname"), string("var"), TensorShape());
Output restore = ops::Restore(s.WithOpName("restore"), filename_node,
tensor_name, DataType::DT_FLOAT);
Output assign = ops::Assign(s.WithOpName("assign"), var, restore);
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
string temp_dir = testing::TmpDir();
Env *env = Env::Default();
string filename =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename");
env->DeleteFile(filename).IgnoreError();
std::unique_ptr<WritableFile> file_to_write;
TF_CHECK_OK(env->NewWritableFile(filename, &file_to_write));
TF_CHECK_OK(file_to_write->Close());
TF_CHECK_OK(env->FileExists(filename));
LOG(INFO) << filename;
AssetFileDef asset_file_def;
*asset_file_def.mutable_tensor_info()->mutable_name() = "filename";
*asset_file_def.mutable_filename() = "grappler_item_builder_test_filename";
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def);
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign";
ItemConfig cfg;
cfg.assets_directory_override = temp_dir;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
for (const NodeDef &node : item->graph.node()) {
if (node.name() == "filename") {
const auto iter = node.attr().find("value");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_tensor());
ASSERT_EQ(1, iter->second.tensor().string_val_size());
string tensor_string_val = iter->second.tensor().string_val(0);
EXPECT_EQ(tensor_string_val, filename);
}
}
}
TEST_F(GrapplerItemBuilderTest, AssetFilepathOverrideTest_FileNotAccessible) {
MetaGraphDef meta_graph;
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
Output var =
ops::Variable(s.WithOpName("var"), TensorShape(), DataType::DT_FLOAT);
Output filename_node1 =
ops::Const(s.WithOpName("filename1"), string("model1"), TensorShape());
Output filename_node2 =
ops::Const(s.WithOpName("filename2"), string("model2"), TensorShape());
Output tensor_name =
ops::Const(s.WithOpName("tensorname"), string("var"), TensorShape());
Output restore1 = ops::Restore(s.WithOpName("restore1"), filename_node1,
tensor_name, DataType::DT_FLOAT);
Output restore2 = ops::Restore(s.WithOpName("restore2"), filename_node1,
tensor_name, DataType::DT_FLOAT);
Output assign1 = ops::Assign(s.WithOpName("assign1"), var, restore1);
Output assign2 = ops::Assign(s.WithOpName("assign2"), var, restore2);
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
string temp_dir = testing::TmpDir();
Env *env = Env::Default();
string filename1 =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename1");
env->DeleteFile(filename1).IgnoreError();
std::unique_ptr<WritableFile> file_to_write;
TF_CHECK_OK(env->NewWritableFile(filename1, &file_to_write));
TF_CHECK_OK(file_to_write->Close());
TF_CHECK_OK(env->FileExists(filename1));
AssetFileDef asset_file_def1;
*asset_file_def1.mutable_tensor_info()->mutable_name() = "filename1";
*asset_file_def1.mutable_filename() = "grappler_item_builder_test_filename1";
string filename2 =
io::JoinPath(temp_dir, "grappler_item_builder_test_filename1");
env->DeleteFile(filename2).IgnoreError();
EXPECT_FALSE(env->FileExists(filename2).ok());
AssetFileDef asset_file_def2;
*asset_file_def2.mutable_tensor_info()->mutable_name() = "filename2";
*asset_file_def2.mutable_filename() = "grappler_item_builder_test_filename2";
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def1);
(*meta_graph.mutable_collection_def())["saved_model_assets"]
.mutable_any_list()
->add_value()
->PackFrom(asset_file_def2);
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign1";
*((*meta_graph.mutable_collection_def())["train_op"]
.mutable_node_list()
->add_value()) = "assign2";
ItemConfig cfg;
cfg.assets_directory_override = temp_dir;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item == nullptr);
}
TEST_F(GrapplerItemBuilderTest, GraphWithFunctions) {
MetaGraphDef meta_graph;
constexpr char device[] = "/cpu:0";
*meta_graph.mutable_graph_def() = test::function::GDef(
{test::function::NDef("x", "Const", {}, {{"dtype", DT_FLOAT}}, device),
test::function::NDef("y", "XTimesTwo", {"x"}, {{"T", DT_FLOAT}},
device)},
{
test::function::XTimesTwo(),
});
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
}
TEST_F(GrapplerItemBuilderTest, GraphWithCustomOps) {
MetaGraphDef meta_graph;
constexpr char device[] = "/cpu:0";
*meta_graph.mutable_graph_def() = test::function::GDef(
{test::function::NDef("x", "Const", {}, {{"dtype", DT_FLOAT}}, device),
test::function::NDef("y", "CustomOp", {"x"}, {{"T", DT_FLOAT}}, device)},
{});
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithSignatureDef) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.set_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
TensorInfo input2, output2;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.set_name("z");
SignatureDef serving_signature2;
(*serving_signature.mutable_inputs())["input2"] = input2;
(*serving_signature.mutable_outputs())["output2"] = output2;
(*meta_graph.mutable_signature_def())["serving2"] = serving_signature2;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item != nullptr);
EXPECT_EQ(item->feed.size(), 1);
EXPECT_EQ(item->fetch.size(), 1);
EXPECT_EQ(item->feed[0].first, "x");
EXPECT_EQ(item->fetch[0], "z");
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithIncompleteSignatureDef) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
CollectionDef train_op;
train_op.mutable_node_list()->add_value("y");
(*meta_graph.mutable_collection_def())["train_op"] = train_op;
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
output.mutable_coo_sparse()->set_values_tensor_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item == nullptr);
}
TEST_F(GrapplerItemBuilderTest, FromGraphWithUnknownDimInSignatureInput) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto shape_1d = PartialTensorShape({-1});
auto x = ops::Placeholder(s.WithOpName("x"), DT_FLOAT,
ops::Placeholder::Shape(shape_1d));
auto y = ops::Const(s.WithOpName("y"), static_cast<float>(1.0));
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
TensorInfo input, output;
input.set_name("x");
input.set_dtype(DT_FLOAT);
shape_1d.AsProto(input.mutable_tensor_shape());
output.set_name("z");
SignatureDef serving_signature;
(*serving_signature.mutable_inputs())["input"] = input;
(*serving_signature.mutable_outputs())["output"] = output;
(*meta_graph.mutable_signature_def())["serving"] = serving_signature;
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item1 =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item1 != nullptr);
ASSERT_EQ(item1->feed.size(), 1);
EXPECT_EQ(item1->feed[0].second.NumElements(), 64);
std::unique_ptr<GrapplerItem> item2 =
GrapplerItemFromMetaGraphDef("0", meta_graph, ItemConfig());
ASSERT_TRUE(item2 != nullptr);
ASSERT_EQ(item2->feed.size(), 1);
EXPECT_EQ(item2->feed[0].second.NumElements(), 1);
}
TEST_F(GrapplerItemBuilderTest, ExplicitFeedAndFetch) {
tensorflow::Scope s = tensorflow::Scope::NewRootScope();
auto x = ops::Const(s.WithOpName("x"), 0);
auto y = ops::Const(s.WithOpName("y"), 1);
auto z = ops::Add(s.WithOpName("z"), x, y);
MetaGraphDef meta_graph;
TF_CHECK_OK(s.ToGraphDef(meta_graph.mutable_graph_def()));
ItemConfig config;
config.feed_nodes.insert("x");
config.fetch_nodes.insert("z");
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, config);
ASSERT_TRUE(item != nullptr);
EXPECT_EQ(item->feed.size(), 1);
EXPECT_EQ(item->fetch.size(), 1);
EXPECT_EQ(item->feed[0].first, "x");
EXPECT_EQ(item->fetch[0], "z");
}
TEST_F(GrapplerItemBuilderTest, UnknownRankPlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { unknown_rank: true } } }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_TRUE(shape.unknown_rank());
}
TEST_F(GrapplerItemBuilderTest, ConfigPlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value {
shape {
dim {
size: -1
}
dim {
size: -1
}
}
} }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_EQ(shape.dim_size(), 2);
EXPECT_EQ(shape.dim(0).size(), 64);
EXPECT_EQ(shape.dim(1).size(), 64);
}
TEST_F(GrapplerItemBuilderTest, OutputShapePlaceholderTest) {
MetaGraphDef meta_graph;
const char* text_proto = R"EOF(
graph_def {
node {
name: "x"
op: "Placeholder"
attr { key: "dtype" value { type: DT_FLOAT } }
attr { key: "shape" value { shape { unknown_rank: true } } }
attr { key: "_output_shapes" value { list {
shape {
dim {
size: -1
}
dim {
size: 32
}
}
} } }
}
versions {
producer: 51
}
}
collection_def {
key: "train_op"
value {
node_list {
value: "x:0"
}
}
}
)EOF";
CHECK(protobuf::TextFormat::ParseFromString(text_proto, &meta_graph));
ItemConfig cfg;
cfg.placeholder_unknown_output_shape_dim = 64;
std::unique_ptr<GrapplerItem> item =
GrapplerItemFromMetaGraphDef("0", meta_graph, cfg);
ASSERT_TRUE(item != nullptr);
const NodeDef& node = item->graph.node(0);
const auto iter = node.attr().find("shape");
ASSERT_TRUE(iter != node.attr().end());
ASSERT_TRUE(iter->second.has_shape());
const auto& shape = iter->second.shape();
EXPECT_EQ(shape.dim_size(), 2);
EXPECT_EQ(shape.dim(0).size(), 64);
EXPECT_EQ(shape.dim(1).size(), 32);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/grappler_item_builder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/grappler/grappler_item_builder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
509e353a-35db-4466-9316-62db78131fc1 | cpp | tensorflow/tensorflow | multinomial_op | tensorflow/core/kernels/multinomial_op.cc | tensorflow/core/kernels/multinomial_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/multinomial_op.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include "unsupported/Eigen/CXX11/Tensor"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/kernels/stateless_random_ops.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/lib/random/simple_philox.h"
#include "tensorflow/core/util/guarded_philox_random.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace functor {
template <typename Device, typename T, typename OutputType>
struct MultinomialFunctor {
void operator()(OpKernelContext* ctx, const Device& d,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<float>::Flat noises,
typename TTypes<float>::Flat scores,
typename TTypes<float>::Flat scratch, int batch_size,
int num_classes, int num_samples,
const random::PhiloxRandom& gen,
typename TTypes<OutputType>::Matrix output);
};
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
extern template struct MultinomialFunctor<GPUDevice, Eigen::half, int32>;
extern template struct MultinomialFunctor<GPUDevice, float, int32>;
extern template struct MultinomialFunctor<GPUDevice, double, int32>;
extern template struct MultinomialFunctor<GPUDevice, int32, int32>;
extern template struct MultinomialFunctor<GPUDevice, int64_t, int32>;
extern template struct MultinomialFunctor<GPUDevice, Eigen::half, int64_t>;
extern template struct MultinomialFunctor<GPUDevice, float, int64_t>;
extern template struct MultinomialFunctor<GPUDevice, double, int64_t>;
extern template struct MultinomialFunctor<GPUDevice, int32, int64_t>;
extern template struct MultinomialFunctor<GPUDevice, int64_t, int64_t>;
#endif
template <typename T, typename OutputType>
struct MultinomialFunctor<CPUDevice, T, OutputType> {
void operator()(OpKernelContext* ctx, const CPUDevice& d,
typename TTypes<T>::ConstMatrix logits,
typename TTypes<float>::Flat ,
typename TTypes<float>::Flat ,
typename TTypes<float>::Flat , int batch_size,
int num_classes, int num_samples,
const random::PhiloxRandom& gen,
typename TTypes<OutputType>::Matrix output) {
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
auto DoWork = [ctx, num_samples, num_classes, &gen, &output, &logits](
int64_t start_row, int64_t limit_row) {
random::PhiloxRandom gen_copy = gen;
gen_copy.Skip(start_row * (num_samples + 3) / 4);
random::SimplePhilox simple_philox(&gen_copy);
Tensor cdf_tensor;
OP_REQUIRES_OK(ctx,
ctx->allocate_temp(DT_DOUBLE, TensorShape({num_classes}),
&cdf_tensor));
auto cdf = cdf_tensor.flat<double>();
for (int64_t b = start_row; b < limit_row; ++b) {
const auto* logits_row = &logits(b, 0);
T max = std::numeric_limits<T>::lowest();
for (int64_t j = 0; j < num_classes; ++j) {
if (Eigen::numext::isfinite(logits_row[j])) {
max = std::max(max, logits_row[j]);
}
}
const double max_logit = static_cast<double>(max);
cdf = (logits.template chip<0>(b).template cast<double>() - max_logit)
.exp();
double running_total = 0;
for (int64_t j = 0; j < num_classes; ++j) {
if (Eigen::numext::isfinite(logits_row[j])) {
running_total += cdf(j);
}
cdf(j) = running_total;
}
const double* cdf_begin = cdf.data();
const double* cdf_end = cdf.data() + num_classes;
for (int64_t j = 0; j < num_samples; ++j) {
const double to_find = simple_philox.RandDouble() * running_total;
auto found_iter = std::upper_bound(cdf_begin, cdf_end, to_find);
output(b, j) = std::distance(cdf_begin, found_iter);
}
}
};
const int64_t cost =
50 * (num_samples * std::log(num_classes) / std::log(2) + num_classes);
Shard(worker_threads.num_threads, worker_threads.workers, batch_size, cost,
DoWork);
}
};
}
namespace {
template <typename Device, typename T, typename OutputType>
class MultinomialOp : public OpKernel {
public:
explicit MultinomialOp(OpKernelConstruction* context) : OpKernel(context) {}
void DoCompute(OpKernelContext* ctx, const Tensor& logits_t,
const Tensor& num_samples_t, GuardedPhiloxRandom* generator) {
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(logits_t.shape()),
errors::InvalidArgument("logits should be a matrix, got shape ",
logits_t.shape().DebugString()));
OP_REQUIRES(
ctx, TensorShapeUtils::IsScalar(num_samples_t.shape()),
errors::InvalidArgument("num_samples should be a scalar, got shape ",
num_samples_t.shape().DebugString()));
const int num_samples = num_samples_t.scalar<int>()();
OP_REQUIRES(ctx, num_samples >= 0,
errors::InvalidArgument(
"num_samples should be nonnegative, got ", num_samples));
for (int i = 0; i < 2; i++) {
const int64_t dim = logits_t.dim_size(i);
OP_REQUIRES(ctx, static_cast<int>(dim) == dim,
errors::InvalidArgument(
"logits.shape = ", logits_t.shape().DebugString(),
" too large for int"));
}
const int batch_size = static_cast<int>(logits_t.dim_size(0));
const int num_classes = static_cast<int>(logits_t.dim_size(1));
OP_REQUIRES(ctx, num_classes > 0,
errors::InvalidArgument("num_classes should be positive, got ",
num_classes));
Tensor* samples_t;
OP_REQUIRES_OK(
ctx, ctx->allocate_output(0, TensorShape({batch_size, num_samples}),
&samples_t));
if (samples_t->NumElements() > 0) {
Tensor noises, scores, scratch;
if (std::is_same<Device, GPUDevice>::value) {
OP_REQUIRES_OK(
ctx,
ctx->allocate_temp(
DT_FLOAT, TensorShape({batch_size, num_samples, num_classes}),
&noises));
OP_REQUIRES_OK(
ctx,
ctx->allocate_temp(
DT_FLOAT, TensorShape({batch_size, num_samples, num_classes}),
&scores));
OP_REQUIRES_OK(
ctx,
ctx->allocate_temp(DT_FLOAT, TensorShape({batch_size, num_samples}),
&scratch));
}
int num_samples_ceil_4 = (num_samples + 3) / 4 * 4;
if (std::is_same<Device, CPUDevice>::value) num_samples_ceil_4 *= 2;
auto rng =
generator->ReserveRandomOutputs(batch_size * num_samples_ceil_4, 256);
functor::MultinomialFunctor<Device, T, OutputType>()(
ctx, ctx->eigen_device<Device>(), logits_t.matrix<T>(),
noises.flat<float>(), scores.flat<float>(), scratch.flat<float>(),
batch_size, num_classes, num_samples, rng,
samples_t->matrix<OutputType>());
}
}
};
template <typename Device, typename T, typename OutputType>
class StatefulMultinomialOp : public MultinomialOp<Device, T, OutputType> {
public:
explicit StatefulMultinomialOp(OpKernelConstruction* ctx)
: MultinomialOp<Device, T, OutputType>(ctx) {
OP_REQUIRES_OK(ctx, generator_.Init(ctx));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& logits_t = ctx->input(0);
const Tensor& num_samples_t = ctx->input(1);
this->DoCompute(ctx, logits_t, num_samples_t, &generator_);
}
private:
GuardedPhiloxRandom generator_;
};
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("Multinomial") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT32), \
StatefulMultinomialOp<CPUDevice, TYPE, int32>); \
REGISTER_KERNEL_BUILDER(Name("Multinomial") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT64), \
StatefulMultinomialOp<CPUDevice, TYPE, int64>);
TF_CALL_half(REGISTER);
TF_CALL_bfloat16(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("Multinomial") \
.Device(DEVICE_GPU) \
.HostMemory("num_samples") \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT32), \
StatefulMultinomialOp<GPUDevice, TYPE, int32>) \
REGISTER_KERNEL_BUILDER(Name("Multinomial") \
.Device(DEVICE_GPU) \
.HostMemory("num_samples") \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT64), \
StatefulMultinomialOp<GPUDevice, TYPE, int64>)
TF_CALL_half(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#endif
template <typename Device, typename T, typename OutputType>
class StatelessMultinomialOp : public MultinomialOp<Device, T, OutputType> {
public:
explicit StatelessMultinomialOp(OpKernelConstruction* ctx)
: MultinomialOp<Device, T, OutputType>(ctx) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& logits_t = ctx->input(0);
const Tensor& num_samples_t = ctx->input(1);
const Tensor& seed_t = ctx->input(2);
OP_REQUIRES(ctx, seed_t.dims() == 1 && seed_t.dim_size(0) == 2,
errors::InvalidArgument("seed must have shape [2], not ",
seed_t.shape().DebugString()));
random::PhiloxRandom::Key key;
random::PhiloxRandom::ResultType counter;
OP_REQUIRES_OK(ctx, GenerateKey(seed_t, &key, &counter));
GuardedPhiloxRandom generator;
generator.Init(counter, key);
this->DoCompute(ctx, logits_t, num_samples_t, &generator);
}
private:
GuardedPhiloxRandom generator_;
};
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT32), \
StatelessMultinomialOp<CPUDevice, TYPE, int32>); \
REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT64), \
StatelessMultinomialOp<CPUDevice, TYPE, int64>);
TF_CALL_half(REGISTER);
TF_CALL_bfloat16(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \
.Device(DEVICE_GPU) \
.HostMemory("num_samples") \
.HostMemory("seed") \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT32), \
StatelessMultinomialOp<GPUDevice, TYPE, int32>) \
REGISTER_KERNEL_BUILDER(Name("StatelessMultinomial") \
.Device(DEVICE_GPU) \
.HostMemory("num_samples") \
.HostMemory("seed") \
.TypeConstraint<TYPE>("T") \
.TypeConstraint("output_dtype", DT_INT64), \
StatelessMultinomialOp<GPUDevice, TYPE, int64>)
TF_CALL_half(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#endif
}
} | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* Multinomial(int batch_size, int num_classes, int num_samples) {
Graph* g = new Graph(OpRegistry::Global());
Tensor logits_t(DT_FLOAT, TensorShape({batch_size, num_classes}));
Tensor num_samples_t(DT_INT32, TensorShape());
logits_t.flat<float>().setRandom();
num_samples_t.scalar<int32>().setConstant(num_samples);
Node* ret;
TF_CHECK_OK(NodeBuilder(g->NewName("multinomial"), "Multinomial")
.Input(test::graph::Constant(g, logits_t))
.Input(test::graph::Constant(g, num_samples_t))
.Attr("T", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
#define BM_MultinomialDev(DEVICE, B, C, S) \
static void BM_Multinomial_##DEVICE##_##B##_##C##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, Multinomial(B, C, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * C * S * \
state.iterations()); \
} \
BENCHMARK(BM_Multinomial_##DEVICE##_##B##_##C##_##S);
#define BM_MultinomialBCS(B, C, S) \
BM_MultinomialDev(cpu, B, C, S); \
BM_MultinomialDev(gpu, B, C, S);
BM_MultinomialBCS(1, 10000, 4);
BM_MultinomialBCS(1, 10000, 128);
BM_MultinomialBCS(1, 10000, 10000);
BM_MultinomialBCS(1, 100000, 4);
BM_MultinomialBCS(32, 10000, 4);
BM_MultinomialBCS(32, 10000, 128);
BM_MultinomialBCS(32, 100000, 4);
BM_MultinomialBCS(128, 100000, 1);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/multinomial_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/multinomial_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
04076f27-7215-4ac2-a0ad-d0a45b77807a | cpp | tensorflow/tensorflow | build_xla_ops_pass | tensorflow/compiler/jit/build_xla_ops_pass.cc | tensorflow/compiler/jit/build_xla_ops_pass_test.cc | #include "tensorflow/compiler/jit/build_xla_ops_pass.h"
#include "absl/algorithm/container.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/framework/scope_internal.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/const_op.h"
#include "tensorflow/cc/ops/control_flow_ops.h"
#include "tensorflow/cc/ops/functional_ops.h"
#include "tensorflow/cc/ops/logging_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/device_util.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/flags.h"
#include "tensorflow/compiler/jit/xla_cluster_util.h"
#include "tensorflow/compiler/tf2xla/cc/ops/xla_jit_ops.h"
#include "tensorflow/compiler/tf2xla/xla_op_registry.h"
#include "xla/status_macros.h"
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/graph_constructor.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/framework/graph_def_util.h"
#include "tensorflow/core/framework/memory_types.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/node_def_util.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/hash/hash.h"
#include "tensorflow/core/public/version.h"
#include "tensorflow/core/util/dump_graph.h"
namespace tensorflow {
namespace {
struct DebuggingOpts {
bool print_outputs;
bool check_input_numerics;
bool check_output_numerics;
};
void MoveOutgoingEdges(Graph* g, Node* old_node, Node* new_node) {
std::vector<const Edge*> out_edges(old_node->out_edges().begin(),
old_node->out_edges().end());
for (const Edge* edge : out_edges) {
g->AddEdge(new_node, edge->src_output(), edge->dst(), edge->dst_input());
g->RemoveEdge(edge);
}
}
Output ControlToData(const Scope& scope, Node* control) {
Output data = ops::Const(scope.WithOpName("ctrl_as_data"),
Tensor(DT_INT32, TensorShape({0})));
scope.graph()->AddControlEdge(control, data.node());
return Output(data.node());
}
Operation DataToControl(const Scope& scope, Output data) {
return Operation(
ops::Identity(scope.WithOpName("data_as_ctrl"), data).node());
}
void MergeOutgoingDataEdges(const Scope& s, Node* old_node, Node* new_node,
absl::string_view cluster_name,
const DebuggingOpts& debugging_opts) {
if (!s.status().ok()) {
return;
}
std::vector<Output> merged_outputs(old_node->num_outputs(), Output(nullptr));
std::vector<const Edge*> data_edges;
absl::c_copy_if(old_node->out_edges(), std::back_inserter(data_edges),
[](const Edge* e) { return !e->IsControlEdge(); });
for (const Edge* e : data_edges) {
int oidx = e->src_output();
Output merged_output = merged_outputs[oidx];
if (merged_output.node() == nullptr) {
Output new_output(new_node, oidx);
if (debugging_opts.print_outputs) {
string cpu_device = "/job:localhost/replica:0/task:0/device:CPU:0";
ops::Print print_op(s.WithOpName("print_", oidx)
.WithDevice(cpu_device)
.WithAssignedDevice(cpu_device),
new_output, {new_output},
ops::Print::Attrs{}
.Message(absl::StrCat("output ", oidx, " from ",
old_node->name(), " is "))
.FirstN(1000)
.Summarize(-1));
new_output = print_op;
}
if (debugging_opts.check_output_numerics &&
DataTypeIsFloating(new_output.type())) {
ops::CheckNumerics check_numerics_op(
s.WithOpName("check_output_", oidx)
.WithDevice(new_node->requested_device())
.WithAssignedDevice(new_node->assigned_device_name()),
new_output,
absl::StrCat("CheckNumerics failed for output ", oidx, "(",
new_output.name(), ") from cluster ", cluster_name));
new_output = check_numerics_op;
}
ops::_XlaMerge xla_merge_op(s.WithOpName("merge_oidx_", oidx),
Output(old_node, oidx), new_output);
merged_output = merged_outputs[oidx] = xla_merge_op.output;
}
Node* dst = e->dst();
int dst_idx = e->dst_input();
s.graph()->RemoveEdge(e);
s.graph()->AddEdge(merged_output.node(), merged_output.index(), dst,
dst_idx);
}
}
void MergeOutgoingControlEdges(const Scope& s, Node* old_node, Node* new_node) {
if (!s.status().ok()) {
return;
}
std::vector<const Edge*> ctrl_edges;
absl::c_copy_if(old_node->out_edges(), std::back_inserter(ctrl_edges),
[](const Edge* e) { return e->IsControlEdge(); });
if (ctrl_edges.empty()) {
return;
}
if (ctrl_edges.size() == 1 && ctrl_edges.front()->dst()->IsSink()) {
s.graph()->AddControlEdge(new_node, s.graph()->sink_node());
return;
}
Output old_ctrl_as_data = ControlToData(s, old_node);
Output new_ctrl_as_data = ControlToData(s, new_node);
ops::Merge ctrl_merge_as_data(s.WithOpName("ctrl_merge"),
{old_ctrl_as_data, new_ctrl_as_data});
Operation ctrl_merge = DataToControl(s, ctrl_merge_as_data.output);
for (const Edge* e : ctrl_edges) {
s.graph()->AddControlEdge(ctrl_merge.node(), e->dst());
s.graph()->RemoveControlEdge(e);
}
}
struct XlaClusterInfo {
std::vector<Output> constant_inputs;
std::vector<Output> non_constant_inputs;
std::vector<Output> resource_inputs;
NameAttrList function;
};
Output IncomingEdgeAsOutput(const Edge* e) {
return Output(e->src(), e->src_output());
}
Status GetXlaClusterInfo(Node* n, XlaClusterInfo* result) {
int num_constant_inputs, num_resource_inputs;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), kXlaNumConstantArgsAttr, &num_constant_inputs));
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), kXlaNumResourceArgsAttr, &num_resource_inputs));
if (num_constant_inputs < 0 || num_resource_inputs < 0 ||
num_constant_inputs + num_resource_inputs > n->num_inputs()) {
return errors::InvalidArgument(
"Invalid number of constant/resource arguments to XLA kernel.");
}
int num_non_constant_inputs =
n->num_inputs() - num_constant_inputs - num_resource_inputs;
std::vector<const Edge*> input_edges_vector;
TF_RETURN_IF_ERROR(n->input_edges(&input_edges_vector));
absl::Span<const Edge*> input_edges(input_edges_vector);
absl::c_transform(input_edges.subspan(0, num_constant_inputs),
std::back_inserter(result->constant_inputs),
IncomingEdgeAsOutput);
absl::c_transform(
input_edges.subspan(num_constant_inputs, num_non_constant_inputs),
std::back_inserter(result->non_constant_inputs), IncomingEdgeAsOutput);
absl::c_transform(
input_edges.subspan(num_constant_inputs + num_non_constant_inputs,
num_resource_inputs),
std::back_inserter(result->resource_inputs), IncomingEdgeAsOutput);
result->function.set_name(n->type_string());
*result->function.mutable_attr() = n->def().attr();
return absl::OkStatus();
}
Status CopyIncomingControlEdges(Graph* g, Node* from, Node* to) {
for (const Edge* e : from->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), to);
}
}
return absl::OkStatus();
}
void RemoveAllIncomingControlEdges(Graph* g, Node* n) {
std::vector<const Edge*> incoming_ctrl_edges;
absl::c_copy_if(n->in_edges(), std::back_inserter(incoming_ctrl_edges),
[](const Edge* e) { return e->IsControlEdge(); });
for (const Edge* e : incoming_ctrl_edges) {
g->RemoveControlEdge(e);
}
}
Status DeviceRequiresCompilation(const jit::DeviceInfoCache& device_info_cache,
jit::DeviceId device, bool* result) {
const XlaOpRegistry::DeviceRegistration* registration =
device_info_cache.GetCompilationDevice(device);
*result = registration->autoclustering_policy ==
XlaOpRegistry::AutoclusteringPolicy::kAlways;
return absl::OkStatus();
}
absl::StatusOr<Node*> ReplaceFunctionCallWithPartitionedCall(
const GraphOptimizationPassOptions& options,
const FunctionLibraryDefinition& flib_def, Node* n, Graph* g,
const NameAttrList& func, const Scope& root) {
string config_string = options.session_options->config.SerializeAsString();
int input_count = absl::c_count_if(
n->in_edges(), [](const Edge* e) { return !e->IsControlEdge(); });
std::vector<Output> args(input_count);
for (const Edge* e : n->in_edges()) {
if (!e->IsControlEdge()) {
args[e->dst_input()] = Output(e->src(), e->src_output());
}
}
ops::StatefulPartitionedCall call(
root.WithOpName("stateful_partitioned_call"), args, n->output_types(),
func, ops::StatefulPartitionedCall::Attrs{}.ConfigProto(config_string));
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
g->AddControlEdge(e->src(), call.operation.node());
}
}
std::vector<const Edge*> edges_to_delete;
for (const Edge* e : n->out_edges()) {
edges_to_delete.push_back(e);
if (e->IsControlEdge()) {
g->AddControlEdge(call.operation.node(), e->dst());
} else {
g->AddEdge(call.operation.node(), e->src_output(), e->dst(),
e->dst_input());
}
}
for (const Edge* e : edges_to_delete) {
g->RemoveEdge(e);
}
g->RemoveNode(n);
return call.operation.node();
}
absl::StatusOr<jit::DeviceId> InferDeviceForCluster(
jit::DeviceInfoCache* device_info_cache, Node* n,
const string& function_name, const FunctionLibraryDefinition& flib_def) {
const FunctionDef* func_def = flib_def.Find(function_name);
TF_RET_CHECK(func_def) << "Could not find " << function_name;
jit::DeviceSet device_set;
for (const NodeDef& ndef : func_def->node_def()) {
VLOG(3) << ndef.DebugString();
if (!ndef.device().empty()) {
TF_ASSIGN_OR_RETURN(jit::DeviceId device_id,
device_info_cache->GetIdFor(ndef.device()));
device_set.Insert(device_id);
}
}
if (!n->assigned_device_name().empty()) {
TF_ASSIGN_OR_RETURN(jit::DeviceId device_id,
device_info_cache->GetIdFor(n->assigned_device_name()));
device_set.Insert(device_id);
}
TF_ASSIGN_OR_RETURN(jit::DeviceId result,
PickDeviceForXla(*device_info_cache, device_set,
true));
VLOG(2) << "For " << function_name << " PickDeviceForXla("
<< device_info_cache->DebugString(device_set) << ") -> "
<< device_info_cache->GetNameFor(result);
return result;
}
std::vector<Output> GetXlaRunArgs(const Scope& s,
const XlaClusterInfo& cluster_info,
const DebuggingOpts& debugging_opts) {
std::vector<Output> xla_run_args;
xla_run_args.reserve(cluster_info.non_constant_inputs.size() +
cluster_info.resource_inputs.size());
int input_idx = 0;
for (const Output& o : cluster_info.non_constant_inputs) {
if (debugging_opts.check_input_numerics && DataTypeIsFloating(o.type())) {
ops::CheckNumerics check_numerics_op(
s.WithOpName("check_input_", input_idx), o,
absl::StrCat("CheckNumerics failed for input ", input_idx, "(",
o.name(), ") into ", cluster_info.function.name()));
xla_run_args.push_back(check_numerics_op);
} else {
xla_run_args.push_back(o);
}
input_idx++;
}
absl::c_copy(cluster_info.resource_inputs, std::back_inserter(xla_run_args));
return xla_run_args;
}
absl::StatusOr<MemoryTypeVector> GetOutputMemoryTypes(const Scope& root,
Node* n) {
MemoryTypeVector input_mtypes, output_mtypes;
DeviceType device_type("");
TF_RETURN_IF_ERROR(
DeviceNameToDeviceType(n->assigned_device_name(), &device_type));
TF_RETURN_IF_ERROR(MemoryTypesForNode(root.graph()->op_registry(),
device_type, n->def(), &input_mtypes,
&output_mtypes));
return output_mtypes;
}
Status PredicateInt32Inputs(const Scope& root, Node* n,
Operation predicate_as_control) {
std::vector<Output> int32_inputs;
std::vector<int> int32_inputs_input_idxs;
for (const Edge* e : n->in_edges()) {
if (e->IsControlEdge()) {
continue;
}
if (e->src()->output_type(e->src_output()) == DT_INT32) {
TF_ASSIGN_OR_RETURN(MemoryTypeVector source_output_mem_types,
GetOutputMemoryTypes(root, e->src()));
if (source_output_mem_types[e->src_output()] == DEVICE_MEMORY) {
int32_inputs.push_back(Output(e->src(), e->src_output()));
int32_inputs_input_idxs.push_back(e->dst_input());
}
}
}
if (int32_inputs.empty()) {
return absl::OkStatus();
}
ops::IdentityN identity_n(root.WithOpName("int32_id_n"), int32_inputs);
root.graph()->AddControlEdge(predicate_as_control.node(),
identity_n.operation.node());
for (int i = 0, end = int32_inputs.size(); i < end; i++) {
TF_RETURN_IF_ERROR(root.graph()->UpdateEdge(identity_n[i].node(), i, n,
int32_inputs_input_idxs[i]));
}
return absl::OkStatus();
}
Status ReplaceNodeWithXlaCompileAndXlaRun(
jit::DeviceInfoCache* device_info_cache,
const GraphOptimizationPassOptions& options,
const FunctionLibraryDefinition& flib_def, bool lazy_compilation_enabled,
const DebuggingOpts& debugging_opts, Graph* g, Node* n) {
XlaClusterInfo cluster_info;
TF_RETURN_IF_ERROR(GetXlaClusterInfo(n, &cluster_info));
TF_ASSIGN_OR_RETURN(
jit::DeviceId device,
InferDeviceForCluster(device_info_cache, n, cluster_info.function.name(),
flib_def));
bool requires_compilation;
TF_RETURN_IF_ERROR(DeviceRequiresCompilation(*device_info_cache, device,
&requires_compilation));
if (!lazy_compilation_enabled) {
requires_compilation = true;
}
string device_name_str = string(device_info_cache->GetNameFor(device));
Status status;
Scope root = NewInternalScope(g, &status, nullptr)
.NewSubScope(n->name())
.WithDevice(n->requested_device())
.WithAssignedDevice(device_name_str);
ops::_XlaCompile xla_compile(root.WithOpName("xla_compile"),
cluster_info.constant_inputs,
cluster_info.non_constant_inputs,
cluster_info.resource_inputs,
requires_compilation,
cluster_info.function);
bool has_ref_attr;
TF_RETURN_IF_ERROR(
GetNodeAttr(n->attrs(), kXlaHasReferenceVarsAttr, &has_ref_attr));
xla_compile.operation.node()->AddAttr(kXlaHasReferenceVarsAttr, has_ref_attr);
TF_RETURN_IF_ERROR(
CopyIncomingControlEdges(g, n, xla_compile.key.node()));
std::vector<Output> xla_run_args =
GetXlaRunArgs(root, cluster_info, debugging_opts);
if (requires_compilation) {
ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args,
xla_compile.key, n->output_types());
MoveOutgoingEdges(g, n,
xla_run.operation.node());
g->RemoveNode(n);
} else {
ops::Switch s(root.WithOpName("predicated_compilation_key"),
xla_compile.key, xla_compile.compilation_successful);
Output predicated_compilation_key = s.output_true;
Output inverse_predicated_compilation_key = s.output_false;
ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args,
predicated_compilation_key, n->output_types());
MergeOutgoingControlEdges(root, n,
xla_run.operation.node());
MergeOutgoingDataEdges(root, n,
xla_run.operation.node(),
cluster_info.function.name(), debugging_opts);
TF_RETURN_IF_ERROR(root.status());
RemoveAllIncomingControlEdges(g, n);
Operation inverse_predicate_as_control =
DataToControl(root, inverse_predicated_compilation_key);
g->AddControlEdge(inverse_predicate_as_control.node(), n);
n->ClearAttr(kXlaCompiledKernelAttr);
TF_ASSIGN_OR_RETURN(Node* const pco, ReplaceFunctionCallWithPartitionedCall(
options, flib_def, n, g,
cluster_info.function, root));
TF_RETURN_IF_ERROR(
PredicateInt32Inputs(root, pco, inverse_predicate_as_control));
}
return absl::OkStatus();
}
}
Status BuildXlaOpsPass::Run(const GraphOptimizationPassOptions& options) {
Graph* graph = options.graph->get();
std::vector<Node*> xla_compiled_kernels;
absl::c_copy_if(graph->op_nodes(), std::back_inserter(xla_compiled_kernels),
[](const Node* n) {
if (n->IsSend() || n->IsRecv() || n->IsControlFlow()) {
return false;
}
return IsXlaCompiledKernel(*n);
});
bool lazy_compilation_enabled =
enable_lazy_compilation_
? *enable_lazy_compilation_
: GetBuildXlaOpsPassFlags()->tf_xla_enable_lazy_compilation;
jit::DeviceInfoCache device_info_cache;
const BuildXlaOpsPassFlags& flags = *GetBuildXlaOpsPassFlags();
DebuggingOpts debugging_opts;
debugging_opts.print_outputs = flags.tf_xla_print_cluster_outputs;
debugging_opts.check_input_numerics =
flags.tf_xla_check_cluster_input_numerics;
debugging_opts.check_output_numerics =
flags.tf_xla_check_cluster_output_numerics;
VLOG(1) << "print_outputs = " << debugging_opts.print_outputs;
VLOG(1) << "check_input_numerics = " << debugging_opts.check_input_numerics;
VLOG(1) << "check_output_numerics = " << debugging_opts.check_output_numerics;
for (Node* n : xla_compiled_kernels) {
TF_RETURN_IF_ERROR(ReplaceNodeWithXlaCompileAndXlaRun(
&device_info_cache, options, *options.flib_def,
lazy_compilation_enabled, debugging_opts, graph, n));
}
if (VLOG_IS_ON(1)) {
DumpGraphToFile("build_xla_ops", *graph, options.flib_def);
}
return absl::OkStatus();
}
} | #include "tensorflow/compiler/jit/build_xla_ops_pass.h"
#include "tensorflow/cc/framework/ops.h"
#include "tensorflow/cc/ops/array_ops.h"
#include "tensorflow/cc/ops/resource_variable_ops.h"
#include "tensorflow/cc/ops/standard_ops.h"
#include "tensorflow/compiler/jit/defs.h"
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h"
#include "tensorflow/compiler/jit/node_matchers.h"
#include "tensorflow/compiler/jit/test_util.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/graph/algorithm.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
namespace tensorflow {
namespace {
class BuildXlaOpsTest : public ::testing::Test {
protected:
void SetUp() override {
CHECK(DeviceFactory::AddDevices(
SessionOptions(), "/job:localhost/replica:0/task:0", &devices_)
.ok());
}
private:
std::vector<std::unique_ptr<Device>> devices_;
};
using ::tensorflow::testing::FindNodeByName;
using ::tensorflow::testing::matchers::Attr;
using ::tensorflow::testing::matchers::CtrlDeps;
using ::tensorflow::testing::matchers::Inputs;
using ::tensorflow::testing::matchers::NodeWith;
using ::tensorflow::testing::matchers::Op;
using ::tensorflow::testing::matchers::Out;
using ::testing::_;
Status BuildXlaOps(const Scope& s, const FunctionDefLibrary& fdef_lib,
std::unique_ptr<Graph>* result) {
auto graph = std::make_unique<Graph>(OpRegistry::Global());
TF_RETURN_IF_ERROR(s.ToGraph(graph.get()));
FunctionLibraryDefinition flib_def(graph->op_registry(), fdef_lib);
static const char* kCpuDevice = "/job:localhost/replica:0/task:0/cpu:0";
for (Node* n : graph->nodes()) {
if (n->requested_device().empty()) {
n->set_assigned_device_name(kCpuDevice);
} else {
n->set_assigned_device_name(n->requested_device());
}
}
FixupSourceAndSinkEdges(graph.get());
GraphOptimizationPassWrapper wrapper;
GraphOptimizationPassOptions opt_options =
wrapper.CreateGraphOptimizationPassOptions(&graph);
opt_options.flib_def = &flib_def;
BuildXlaOpsPass pass(true);
TF_RETURN_IF_ERROR(pass.Run(opt_options));
VLOG(3) << graph->ToGraphDefDebug().DebugString();
*result = std::move(graph);
return absl::OkStatus();
}
Status MakeXlaCompiledKernel(Graph* graph, const string& callee_name,
const string& node_name, int num_constant_args,
int num_resource_args, Node** result) {
NodeDef call_node;
call_node.set_name(node_name);
call_node.set_op(callee_name);
AddNodeAttr(kXlaCompiledKernelAttr, true, &call_node);
AddNodeAttr(kXlaNumConstantArgsAttr, num_constant_args, &call_node);
AddNodeAttr(kXlaNumResourceArgsAttr, num_resource_args, &call_node);
TF_ASSIGN_OR_RETURN(*result, graph->AddNode(call_node));
return absl::OkStatus();
}
Status MakeXlaCompiledKernel(Graph* graph, const string& callee_name,
const string& node_name, Node** result) {
return MakeXlaCompiledKernel(graph, callee_name, node_name,
0, 0,
result);
}
Node* MakeWrite(const Scope& scope, Output value_to_write, const string& id) {
Output var_handle = ops::VarHandleOp(scope.WithOpName("Var_" + id), DT_FLOAT,
TensorShape({}));
ops::AssignVariableOp assign_op(scope.WithOpName("Assignee_" + id),
var_handle, value_to_write);
return assign_op.operation.node();
}
Node* MakeWrite(const Scope& scope, const string& id) {
return MakeWrite(
scope, ops::Const(scope.WithOpName("ValueToAssign" + id), 1.0f), id);
}
FunctionDefLibrary CreateFunctionDefLibWithConstFunction(const string& name) {
FunctionDefLibrary fdef_lib;
FunctionDef func = FunctionDefHelper::Create(
name, {}, {"out: float"},
{}, {FunctionDefHelper::Const("one", 1.0f)},
{{"out", "out:output:0"}});
*fdef_lib.add_function() = std::move(func);
return fdef_lib;
}
TEST_F(BuildXlaOpsTest, ControlDepsPreserved) {
const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:XLA_CPU:0";
Scope root = Scope::NewRootScope().WithDevice(kXlaDeviceName).ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call));
call->AddAttr(kXlaHasReferenceVarsAttr, false);
call->set_requested_device(kXlaDeviceName);
Node* write_op = MakeWrite(root, "write");
write_op->AddAttr(kXlaHasReferenceVarsAttr, false);
root.graph()->AddControlEdge(call, write_op);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
Node* write_op_new = FindNodeByName(graph.get(), write_op->name());
ASSERT_NE(write_op_new, nullptr);
EXPECT_THAT(write_op_new, NodeWith(CtrlDeps(NodeWith(Op("_XlaRun")))));
}
TEST_F(BuildXlaOpsTest, CleanFailureOnBogusAttr) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(
MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", 100, 100, &call));
Node* write_op = MakeWrite(root, "write");
root.graph()->AddControlEdge(call, write_op);
std::unique_ptr<Graph> graph;
Status failure_status = BuildXlaOps(root, fdef_lib, &graph);
ASSERT_FALSE(failure_status.ok());
EXPECT_EQ(failure_status.code(), error::INVALID_ARGUMENT);
}
TEST_F(BuildXlaOpsTest, OnNonXlaDevice) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call));
TF_ASSERT_OK(root.DoShapeInference(call));
call->AddAttr(kXlaHasReferenceVarsAttr, false);
Node* write_op = MakeWrite(root, Output(call), "write_result");
write_op->AddAttr(kXlaHasReferenceVarsAttr, false);
auto xla_compile = NodeWith(Op("_XlaCompile"), Attr("must_compile", false));
auto predicated_compilation_key =
NodeWith(Op("Switch"), Inputs(Out(0, xla_compile), Out(1, xla_compile)));
auto xla_run =
NodeWith(Op("_XlaRun"), Inputs(Out(1, predicated_compilation_key)));
auto tf_call =
NodeWith(Op("StatefulPartitionedCall"),
CtrlDeps(NodeWith(Op("Identity"),
Inputs(Out(0, predicated_compilation_key)))));
auto merge = NodeWith(Op("_XlaMerge"), Inputs(Out(tf_call), Out(xla_run)));
auto assign_var = NodeWith(Op("AssignVariableOp"), Inputs(_, Out(merge)));
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
Node* write_op_new = FindNodeByName(graph.get(), write_op->name());
ASSERT_NE(write_op_new, nullptr);
EXPECT_THAT(write_op_new, assign_var);
}
TEST_F(BuildXlaOpsTest, OnXlaDevice) {
const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:XLA_CPU:0";
Scope root = Scope::NewRootScope().WithDevice(kXlaDeviceName).ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call));
call->set_requested_device(kXlaDeviceName);
TF_ASSERT_OK(root.DoShapeInference(call));
call->AddAttr(kXlaHasReferenceVarsAttr, false);
Node* write_op = MakeWrite(root, Output(call), "write_result");
write_op->AddAttr(kXlaHasReferenceVarsAttr, false);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
auto xla_op =
NodeWith(Op("_XlaRun"), Inputs(Out(NodeWith(Op("_XlaCompile")))));
auto assign_var =
NodeWith(Op("AssignVariableOp"), Inputs(Out(NodeWith()), Out(xla_op)));
Node* write_op_new = FindNodeByName(graph.get(), write_op->name());
ASSERT_NE(write_op_new, nullptr);
EXPECT_THAT(write_op_new, assign_var);
}
TEST_F(BuildXlaOpsTest, NoExtraMergeForEdgeToSink) {
Scope root = Scope::NewRootScope().ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithConstFunction("cluster_0");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call));
call->AddAttr(kXlaHasReferenceVarsAttr, false);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
Node* sink_node = graph->sink_node();
EXPECT_THAT(sink_node,
NodeWith(CtrlDeps(NodeWith(Op("_XlaRun")),
NodeWith(Op("StatefulPartitionedCall")),
NodeWith(Op("NoOp")))));
}
#ifdef GOOGLE_CUDA
FunctionDefLibrary CreateFunctionDefLibWithInt32Input(const string& name) {
FunctionDefLibrary fdef_lib;
FunctionDef func = FunctionDefHelper::Create(
name, {"in: int32"},
{"out: int32"},
{}, {{{"out"}, "Identity", {"in"}}},
{{"out", "out:output:0"}});
*fdef_lib.add_function() = std::move(func);
return fdef_lib;
}
TEST_F(BuildXlaOpsTest, NoDeviceToHostCopiesForClustersWithInt32Inputs) {
const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:GPU:0";
Scope root = Scope::NewRootScope()
.WithDevice(kXlaDeviceName)
.WithAssignedDevice(kXlaDeviceName)
.ExitOnError();
FunctionDefLibrary fdef_lib =
CreateFunctionDefLibWithInt32Input("cluster_int32");
TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib));
Node* call;
TF_ASSERT_OK(
MakeXlaCompiledKernel(root.graph(), "cluster_int32", "C", &call));
call->set_requested_device(kXlaDeviceName);
call->AddAttr(kXlaHasReferenceVarsAttr, false);
auto var =
ops::VarHandleOp(root.WithOpName("var"), DT_INT32, TensorShape({}));
auto int32_on_device =
ops::ReadVariableOp(root.WithOpName("int32_on_device"), var, DT_INT32);
root.graph()->AddEdge(int32_on_device.node(), 0, call, 0);
std::unique_ptr<Graph> graph;
TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph));
Node* stateful_partitioned_call_op = nullptr;
for (Node* n : graph->op_nodes()) {
if (n->type_string() == "StatefulPartitionedCall") {
ASSERT_EQ(stateful_partitioned_call_op, nullptr);
stateful_partitioned_call_op = n;
}
}
ASSERT_NE(stateful_partitioned_call_op, nullptr);
auto xla_compile = NodeWith(Op("_XlaCompile"));
auto switch_on_compilation_pred =
NodeWith(Op("Switch"), Inputs(Out(0, xla_compile), Out(1, xla_compile)));
auto ctrl_dep =
NodeWith(Op("Identity"), Inputs(Out(0, switch_on_compilation_pred)));
EXPECT_THAT(
stateful_partitioned_call_op,
NodeWith(Inputs(Out(NodeWith(Op("IdentityN"), CtrlDeps(ctrl_dep))))));
}
#endif
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/build_xla_ops_pass.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/jit/build_xla_ops_pass_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
90cb12a6-a116-4867-b829-8f9408189ca5 | cpp | google/quiche | random_decoder_test_base | quiche/http2/test_tools/random_decoder_test_base.cc | quiche/http2/test_tools/random_decoder_test_base_test.cc | #include "quiche/http2/test_tools/random_decoder_test_base.h"
#include <stddef.h>
#include <algorithm>
#include <memory>
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/http2_constants.h"
#include "quiche/http2/test_tools/verify_macros.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
using ::testing::AssertionResult;
namespace http2 {
namespace test {
RandomDecoderTest::RandomDecoderTest() = default;
bool RandomDecoderTest::StopDecodeOnDone() { return stop_decode_on_done_; }
DecodeStatus RandomDecoderTest::DecodeSegments(DecodeBuffer* original,
const SelectSize& select_size) {
DecodeStatus status = DecodeStatus::kDecodeInProgress;
bool first = true;
QUICHE_VLOG(2) << "DecodeSegments: input size=" << original->Remaining();
while (first || original->HasData()) {
size_t remaining = original->Remaining();
size_t size =
std::min(remaining, select_size(first, original->Offset(), remaining));
DecodeBuffer db(original->cursor(), size);
QUICHE_VLOG(2) << "Decoding " << size << " bytes of " << remaining
<< " remaining";
if (first) {
first = false;
status = StartDecoding(&db);
} else {
status = ResumeDecoding(&db);
}
if (db.Offset() == 0 && db.HasData() &&
status != DecodeStatus::kDecodeError) {
ADD_FAILURE() << "Decoder didn't make any progress; db.FullSize="
<< db.FullSize()
<< " original.Offset=" << original->Offset();
return DecodeStatus::kDecodeError;
}
original->AdvanceCursor(db.Offset());
switch (status) {
case DecodeStatus::kDecodeDone:
if (original->Empty() || StopDecodeOnDone()) {
return DecodeStatus::kDecodeDone;
}
continue;
case DecodeStatus::kDecodeInProgress:
continue;
case DecodeStatus::kDecodeError:
return DecodeStatus::kDecodeError;
}
}
return status;
}
AssertionResult RandomDecoderTest::DecodeAndValidateSeveralWays(
DecodeBuffer* original, bool return_non_zero_on_first,
const Validator& validator) {
const uint32_t original_remaining = original->Remaining();
QUICHE_VLOG(1) << "DecodeAndValidateSeveralWays - Start, remaining = "
<< original_remaining;
uint32_t first_consumed;
{
DecodeBuffer input(original->cursor(), original_remaining);
QUICHE_VLOG(2) << "DecodeSegmentsAndValidate with SelectRemaining";
HTTP2_VERIFY_SUCCESS(
DecodeSegmentsAndValidate(&input, SelectRemaining(), validator))
<< "\nFailed with SelectRemaining; input.Offset=" << input.Offset()
<< "; input.Remaining=" << input.Remaining();
first_consumed = input.Offset();
}
if (original_remaining <= 30) {
DecodeBuffer input(original->cursor(), original_remaining);
QUICHE_VLOG(2) << "DecodeSegmentsAndValidate with SelectOne";
HTTP2_VERIFY_SUCCESS(
DecodeSegmentsAndValidate(&input, SelectOne(), validator))
<< "\nFailed with SelectOne; input.Offset=" << input.Offset()
<< "; input.Remaining=" << input.Remaining();
HTTP2_VERIFY_EQ(first_consumed, input.Offset())
<< "\nFailed with SelectOne";
}
if (original_remaining <= 20) {
DecodeBuffer input(original->cursor(), original_remaining);
QUICHE_VLOG(2) << "DecodeSegmentsAndValidate with SelectZeroAndOne";
HTTP2_VERIFY_SUCCESS(DecodeSegmentsAndValidate(
&input, SelectZeroAndOne(return_non_zero_on_first), validator))
<< "\nFailed with SelectZeroAndOne";
HTTP2_VERIFY_EQ(first_consumed, input.Offset())
<< "\nFailed with SelectZeroAndOne; input.Offset=" << input.Offset()
<< "; input.Remaining=" << input.Remaining();
}
{
DecodeBuffer input(original->cursor(), original_remaining);
QUICHE_VLOG(2) << "DecodeSegmentsAndValidate with SelectRandom";
HTTP2_VERIFY_SUCCESS(DecodeSegmentsAndValidate(
&input, SelectRandom(return_non_zero_on_first), validator))
<< "\nFailed with SelectRandom; input.Offset=" << input.Offset()
<< "; input.Remaining=" << input.Remaining();
HTTP2_VERIFY_EQ(first_consumed, input.Offset())
<< "\nFailed with SelectRandom";
}
HTTP2_VERIFY_EQ(original_remaining, original->Remaining());
original->AdvanceCursor(first_consumed);
QUICHE_VLOG(1) << "DecodeAndValidateSeveralWays - SUCCESS";
return ::testing::AssertionSuccess();
}
RandomDecoderTest::SelectSize RandomDecoderTest::SelectZeroAndOne(
bool return_non_zero_on_first) {
std::shared_ptr<bool> zero_next(new bool);
*zero_next = !return_non_zero_on_first;
return [zero_next](bool , size_t ,
size_t ) -> size_t {
if (*zero_next) {
*zero_next = false;
return 0;
} else {
*zero_next = true;
return 1;
}
};
}
RandomDecoderTest::SelectSize RandomDecoderTest::SelectRandom(
bool return_non_zero_on_first) {
return [this, return_non_zero_on_first](bool first, size_t ,
size_t remaining) -> size_t {
uint32_t r = random_.Rand32();
if (first && return_non_zero_on_first) {
QUICHE_CHECK_LT(0u, remaining);
if (remaining == 1) {
return 1;
}
return 1 + (r % remaining);
}
return r % (remaining + 1);
};
}
uint32_t RandomDecoderTest::RandStreamId() {
return random_.Rand32() & StreamIdMask();
}
}
} | #include "quiche/http2/test_tools/random_decoder_test_base.h"
#include <stddef.h>
#include <functional>
#include <ios>
#include <set>
#include <type_traits>
#include "quiche/http2/decoder/decode_buffer.h"
#include "quiche/http2/decoder/decode_status.h"
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/quiche_callbacks.h"
namespace http2 {
namespace test {
namespace {
const char kData[]{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
const bool kReturnNonZeroOnFirst = true;
const bool kMayReturnZeroOnFirst = false;
class RandomDecoderTestTest : public RandomDecoderTest {
public:
RandomDecoderTestTest() : data_db_(kData) {
QUICHE_CHECK_EQ(sizeof kData, 8u);
}
protected:
typedef quiche::MultiUseCallback<DecodeStatus(DecodeBuffer* db)> DecodingFn;
DecodeStatus StartDecoding(DecodeBuffer* db) override {
++start_decoding_calls_;
if (start_decoding_fn_) {
return start_decoding_fn_(db);
}
return DecodeStatus::kDecodeError;
}
DecodeStatus ResumeDecoding(DecodeBuffer* db) override {
++resume_decoding_calls_;
if (resume_decoding_fn_) {
return resume_decoding_fn_(db);
}
return DecodeStatus::kDecodeError;
}
bool StopDecodeOnDone() override {
++stop_decode_on_done_calls_;
if (override_stop_decode_on_done_) {
return sub_stop_decode_on_done_;
}
return RandomDecoderTest::StopDecodeOnDone();
}
size_t start_decoding_calls_ = 0;
size_t resume_decoding_calls_ = 0;
size_t stop_decode_on_done_calls_ = 0;
DecodingFn start_decoding_fn_;
DecodingFn resume_decoding_fn_;
DecodeBuffer data_db_;
bool sub_stop_decode_on_done_ = true;
bool override_stop_decode_on_done_ = true;
};
TEST_F(RandomDecoderTestTest, StopOnStartPartiallyDone) {
start_decoding_fn_ = [this](DecodeBuffer* db) {
EXPECT_EQ(1u, start_decoding_calls_);
EXPECT_EQ(kData, db->cursor());
EXPECT_EQ(sizeof kData, db->Remaining());
db->DecodeUInt8();
return DecodeStatus::kDecodeDone;
};
EXPECT_EQ(DecodeStatus::kDecodeDone,
DecodeSegments(&data_db_, SelectRemaining()));
EXPECT_EQ(1u, data_db_.Offset());
EXPECT_EQ(1u, start_decoding_calls_);
EXPECT_EQ(0u, resume_decoding_calls_);
EXPECT_EQ(1u, stop_decode_on_done_calls_);
}
TEST_F(RandomDecoderTestTest, StopOnResumePartiallyDone) {
start_decoding_fn_ = [this](DecodeBuffer* db) {
EXPECT_EQ(1u, start_decoding_calls_);
db->DecodeUInt8();
return DecodeStatus::kDecodeInProgress;
};
resume_decoding_fn_ = [this](DecodeBuffer* db) {
EXPECT_EQ(1u, resume_decoding_calls_);
EXPECT_EQ(data_db_.cursor(), db->cursor());
db->DecodeUInt16();
return DecodeStatus::kDecodeDone;
};
override_stop_decode_on_done_ = false;
stop_decode_on_done_ = true;
EXPECT_EQ(DecodeStatus::kDecodeDone,
DecodeSegments(&data_db_, SelectRemaining()));
EXPECT_EQ(3u, data_db_.Offset());
EXPECT_EQ(1u, start_decoding_calls_);
EXPECT_EQ(1u, resume_decoding_calls_);
EXPECT_EQ(1u, stop_decode_on_done_calls_);
}
TEST_F(RandomDecoderTestTest, InProgressWhenEmpty) {
start_decoding_fn_ = [this](DecodeBuffer* db) {
EXPECT_EQ(1u, start_decoding_calls_);
if (db->HasData()) {
db->DecodeUInt8();
if (db->HasData()) {
db->DecodeUInt8();
}
}
return DecodeStatus::kDecodeInProgress;
};
resume_decoding_fn_ = [](DecodeBuffer* db) {
if (db->HasData()) {
db->AdvanceCursor(db->Remaining());
}
return DecodeStatus::kDecodeInProgress;
};
EXPECT_EQ(DecodeStatus::kDecodeInProgress,
DecodeSegments(&data_db_, SelectRandom(kMayReturnZeroOnFirst)));
EXPECT_TRUE(data_db_.Empty());
EXPECT_EQ(1u, start_decoding_calls_);
EXPECT_LE(1u, resume_decoding_calls_);
EXPECT_EQ(0u, stop_decode_on_done_calls_);
}
TEST_F(RandomDecoderTestTest, DoneExactlyAtEnd) {
start_decoding_fn_ = [this](DecodeBuffer* db) {
EXPECT_EQ(1u, start_decoding_calls_);
EXPECT_EQ(1u, db->Remaining());
EXPECT_EQ(1u, db->FullSize());
db->DecodeUInt8();
return DecodeStatus::kDecodeInProgress;
};
resume_decoding_fn_ = [this](DecodeBuffer* db) {
EXPECT_EQ(1u, db->Remaining());
EXPECT_EQ(1u, db->FullSize());
db->DecodeUInt8();
if (data_db_.Remaining() == 1) {
return DecodeStatus::kDecodeDone;
}
return DecodeStatus::kDecodeInProgress;
};
override_stop_decode_on_done_ = true;
sub_stop_decode_on_done_ = true;
EXPECT_EQ(DecodeStatus::kDecodeDone, DecodeSegments(&data_db_, SelectOne()));
EXPECT_EQ(0u, data_db_.Remaining());
EXPECT_EQ(1u, start_decoding_calls_);
EXPECT_EQ((sizeof kData) - 1, resume_decoding_calls_);
EXPECT_EQ(0u, stop_decode_on_done_calls_);
}
TEST_F(RandomDecoderTestTest, DecodeSeveralWaysToEnd) {
size_t decoded_since_start = 0;
auto shared_fn = [&decoded_since_start, this](DecodeBuffer* db) {
decoded_since_start += db->Remaining();
db->AdvanceCursor(db->Remaining());
EXPECT_EQ(0u, db->Remaining());
if (decoded_since_start == data_db_.FullSize()) {
return DecodeStatus::kDecodeDone;
}
return DecodeStatus::kDecodeInProgress;
};
start_decoding_fn_ = [&decoded_since_start, shared_fn](DecodeBuffer* db) {
decoded_since_start = 0;
return shared_fn(db);
};
resume_decoding_fn_ = shared_fn;
Validator validator = ValidateDoneAndEmpty();
EXPECT_TRUE(DecodeAndValidateSeveralWays(&data_db_, kMayReturnZeroOnFirst,
validator));
EXPECT_EQ(0u, data_db_.Remaining());
EXPECT_EQ(4u, start_decoding_calls_);
EXPECT_EQ(0u, stop_decode_on_done_calls_);
}
TEST_F(RandomDecoderTestTest, DecodeTwoWaysAndStopEarly) {
size_t decoded_since_start = 0;
auto shared_fn = [&decoded_since_start, this](DecodeBuffer* db) {
uint32_t amount = db->Remaining();
if (start_decoding_calls_ == 2 && amount > 1) {
amount = 1;
}
decoded_since_start += amount;
db->AdvanceCursor(amount);
if (decoded_since_start == data_db_.FullSize()) {
return DecodeStatus::kDecodeDone;
}
if (decoded_since_start > 1 && start_decoding_calls_ == 2) {
return DecodeStatus::kDecodeDone;
}
return DecodeStatus::kDecodeInProgress;
};
start_decoding_fn_ = [&decoded_since_start, shared_fn](DecodeBuffer* db) {
decoded_since_start = 0;
return shared_fn(db);
};
resume_decoding_fn_ = shared_fn;
Validator validator = [this](const DecodeBuffer& ,
DecodeStatus status) -> AssertionResult {
if (start_decoding_calls_ <= 2 && status != DecodeStatus::kDecodeDone) {
return ::testing::AssertionFailure()
<< "Expected DecodeStatus::kDecodeDone, not " << status;
}
if (start_decoding_calls_ > 2) {
return ::testing::AssertionFailure()
<< "How did we get to pass " << start_decoding_calls_;
}
return ::testing::AssertionSuccess();
};
EXPECT_FALSE(DecodeAndValidateSeveralWays(&data_db_, kMayReturnZeroOnFirst,
validator));
EXPECT_EQ(2u, start_decoding_calls_);
EXPECT_EQ(1u, stop_decode_on_done_calls_);
}
TEST_F(RandomDecoderTestTest, DecodeThreeWaysAndError) {
size_t decoded_since_start = 0;
auto shared_fn = [&decoded_since_start, this](DecodeBuffer* db) {
if (start_decoding_calls_ == 3 && decoded_since_start > 0) {
return DecodeStatus::kDecodeError;
}
uint32_t amount = db->Remaining();
if (start_decoding_calls_ == 3 && amount > 1) {
amount = 1;
}
decoded_since_start += amount;
db->AdvanceCursor(amount);
if (decoded_since_start == data_db_.FullSize()) {
return DecodeStatus::kDecodeDone;
}
return DecodeStatus::kDecodeInProgress;
};
start_decoding_fn_ = [&decoded_since_start, shared_fn](DecodeBuffer* db) {
decoded_since_start = 0;
return shared_fn(db);
};
resume_decoding_fn_ = shared_fn;
Validator validator = ValidateDoneAndEmpty();
EXPECT_FALSE(DecodeAndValidateSeveralWays(&data_db_, kReturnNonZeroOnFirst,
validator));
EXPECT_EQ(3u, start_decoding_calls_);
EXPECT_EQ(0u, stop_decode_on_done_calls_);
}
TEST(CorruptEnumTest, ManyValues) {
std::set<uint64_t> values;
DecodeStatus status;
QUICHE_LOG(INFO) << "sizeof status = " << sizeof status;
Http2Random rng;
for (int ndx = 0; ndx < 256; ++ndx) {
CorruptEnum(&status, &rng);
values.insert(static_cast<uint64_t>(status));
}
}
typedef typename std::underlying_type<DecodeStatus>::type DecodeStatusUT;
struct CorruptEnumTestStruct {
DecodeStatusUT filler1;
DecodeStatus status;
DecodeStatusUT filler2;
};
TEST(CorruptEnumTest, CorruptsOnlyEnum) {
Http2Random rng;
for (const DecodeStatusUT filler : {DecodeStatusUT(), ~DecodeStatusUT()}) {
QUICHE_LOG(INFO) << "filler=0x" << std::hex << filler;
CorruptEnumTestStruct s;
s.filler1 = filler;
s.filler2 = filler;
for (int ndx = 0; ndx < 256; ++ndx) {
CorruptEnum(&s.status, &rng);
EXPECT_EQ(s.filler1, filler);
EXPECT_EQ(s.filler2, filler);
}
}
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/random_decoder_test_base.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/test_tools/random_decoder_test_base_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
813f4cdc-b499-41a1-9d6d-a182f641d0ef | cpp | tensorflow/tensorflow | bf16 | tensorflow/lite/experimental/shlo/legacy/src/bf16.h | tensorflow/lite/experimental/shlo/bf16_test.cc | #ifndef TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_BF16_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_SHLO_LEGACY_SRC_BF16_H_
#include "tensorflow/lite/experimental/shlo/legacy/src/has_keyword.h"
#if defined(__STDCPP_BFLOAT16_T__)
#include <stdfloat>
namespace stablehlo {
using BF16 = bfloat16_t;
}
#elif __has_keyword(__bf16) && __x86_64__
namespace stablehlo {
using BF16 = __bf16;
}
#elif __has_keyword(__bf16) && __aarch64__
#include <cmath>
#include <cstdint>
namespace stablehlo {
class BF16 {
public:
BF16(float f = 0.0f) {
if (std::isnan(f)) {
value_ = std::signbit(f) ? 0xFFC0 : 0x7FC0;
} else {
uint32_t input = *reinterpret_cast<const uint32_t*>(&f);
uint32_t lsb = (input >> 16) & 1;
uint32_t rounding_bias = 0x7fff + lsb;
input += rounding_bias;
value_ = static_cast<uint16_t>(input >> 16u);
}
}
BF16& operator=(BF16 other) {
value_ = other.value_;
return *this;
}
bool operator==(BF16 other) const { return value_ == other.value_; }
bool operator!=(BF16 other) const { return !(*this == other); }
operator float() const {
uint32_t tmp = value_ << 16;
return *reinterpret_cast<float*>(&tmp);
}
BF16 operator-() const { return BF16(-static_cast<float>(*this)); }
BF16& operator+=(BF16 other) {
value_ = BF16(static_cast<float>(*this) + static_cast<float>(other)).value_;
return *this;
}
BF16& operator-=(BF16 other) {
value_ = BF16(static_cast<float>(*this) - static_cast<float>(other)).value_;
return *this;
}
BF16& operator*=(BF16 other) {
value_ = BF16(static_cast<float>(*this) * static_cast<float>(other)).value_;
return *this;
}
BF16& operator/=(BF16 other) {
value_ = BF16(static_cast<float>(*this) / static_cast<float>(other)).value_;
return *this;
}
private:
uint16_t value_;
};
inline BF16 operator+(BF16 x, BF16 y) {
x += y;
return x;
}
inline BF16 operator-(BF16 x, BF16 y) {
x -= y;
return x;
}
inline BF16 operator*(BF16 x, BF16 y) {
x *= y;
return x;
}
inline BF16 operator/(BF16 x, BF16 y) {
x /= y;
return x;
}
}
#else
#error Type BF16 is not available
#endif
#endif | #include "tensorflow/lite/experimental/shlo/bf16.h"
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/casts.h"
namespace shlo_ref {
namespace {
::testing::Matcher<BF16> MatchesBits(uint16_t bits) {
return ::testing::ResultOf([](BF16 y) { return absl::bit_cast<uint16_t>(y); },
::testing::Eq(bits));
}
::testing::Matcher<float> NearFloat(float x, float relative_error = 1e-3) {
return ::testing::FloatNear(x, std::abs(x) * relative_error);
}
float BinaryToFloat(uint32_t sign, uint32_t exponent, uint32_t high_mantissa,
uint32_t low_mantissa) {
float dest;
uint32_t src =
(sign << 31) + (exponent << 23) + (high_mantissa << 16) + low_mantissa;
memcpy(static_cast<void*>(&dest), static_cast<const void*>(&src),
sizeof(dest));
return dest;
}
template <typename T>
void TestRoundtrips() {
for (T value : {
-std::numeric_limits<T>::infinity(),
std::numeric_limits<T>::infinity(),
T(-1.0),
T(-0.5),
T(-0.0),
T(1.0),
T(0.5),
T(0.0),
}) {
EXPECT_EQ(value, static_cast<T>(static_cast<BF16>(value)));
}
}
TEST(BF16Test, FloatRoundtrips) { TestRoundtrips<float>(); }
TEST(BF16Test, DoubleRoundtrips) { TestRoundtrips<double>(); }
TEST(BF16Test, Float16Roundtrips) { TestRoundtrips<BF16>(); }
TEST(BF16Test, ConversionFromFloat) {
EXPECT_THAT(BF16(1.0f), MatchesBits(0x3f80));
EXPECT_THAT(BF16(0.5f), MatchesBits(0x3f00));
EXPECT_THAT(BF16(0.33333f), MatchesBits(0x3eab));
EXPECT_THAT(BF16(3.38e38f), MatchesBits(0x7f7e));
EXPECT_THAT(BF16(3.40e38f), MatchesBits(0x7f80));
}
TEST(BF16Test, RoundToNearestEven) {
float val1 = static_cast<float>(absl::bit_cast<BF16>(uint16_t{0x3c00}));
float val2 = static_cast<float>(absl::bit_cast<BF16>(uint16_t{0x3c01}));
float val3 = static_cast<float>(absl::bit_cast<BF16>(uint16_t{0x3c02}));
EXPECT_THAT(BF16(0.5f * (val1 + val2)), MatchesBits(0x3c00));
EXPECT_THAT(BF16(0.5f * (val2 + val3)), MatchesBits(0x3c02));
}
TEST(BF16Test, ConversionFromInt) {
EXPECT_THAT(BF16(-1), MatchesBits(0xbf80));
EXPECT_THAT(BF16(0), MatchesBits(0x0000));
EXPECT_THAT(BF16(1), MatchesBits(0x3f80));
EXPECT_THAT(BF16(2), MatchesBits(0x4000));
EXPECT_THAT(BF16(3), MatchesBits(0x4040));
EXPECT_THAT(BF16(12), MatchesBits(0x4140));
}
TEST(BF16Test, ConversionFromBool) {
EXPECT_THAT(BF16(false), MatchesBits(0x0000));
EXPECT_THAT(BF16(true), MatchesBits(0x3f80));
}
TEST(BF16Test, ConversionToBool) {
EXPECT_EQ(static_cast<bool>(BF16(3)), true);
EXPECT_EQ(static_cast<bool>(BF16(0.33333f)), true);
EXPECT_EQ(BF16(-0.0), false);
EXPECT_EQ(static_cast<bool>(BF16(0.0)), false);
}
TEST(BF16Test, ExplicitConversionToFloat) {
EXPECT_EQ(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0x0000)), 0.0f);
EXPECT_EQ(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0x3f80)), 1.0f);
}
TEST(BF16Test, ImplicitConversionToFloat) {
EXPECT_EQ((absl::bit_cast<BF16, uint16_t>(0x0000)), 0.0f);
EXPECT_EQ((absl::bit_cast<BF16, uint16_t>(0x3f80)), 1.0f);
}
TEST(BF16Test, Zero) {
EXPECT_EQ(BF16(0.0f), BF16(0.0f));
EXPECT_EQ(BF16(-0.0f), BF16(0.0f));
EXPECT_EQ(BF16(-0.0f), BF16(-0.0f));
EXPECT_THAT(BF16(0.0f), MatchesBits(0x0000));
EXPECT_THAT(BF16(-0.0f), MatchesBits(0x8000));
}
TEST(BF16Test, DefaultConstruct) {
EXPECT_EQ(static_cast<float>(BF16()), 0.0f);
}
TEST(BF16Test, Conversion) {
for (int i = 0; i < 100; ++i) {
float a = i + 1.25;
BF16 b = static_cast<BF16>(a);
float c = static_cast<float>(b);
EXPECT_LE(std::abs(c - a), a / 128);
}
}
TEST(BF16Test, Epsilon) {
EXPECT_LE(1.0f, static_cast<float>(std::numeric_limits<BF16>::epsilon() +
BF16(1.0f)));
EXPECT_EQ(1.0f, static_cast<float>(std::numeric_limits<BF16>::epsilon() /
BF16(2.0f) +
BF16(1.0f)));
}
TEST(BF16Test, Negate) {
EXPECT_EQ(static_cast<float>(-BF16(3.0f)), -3.0f);
EXPECT_EQ(static_cast<float>(-BF16(-4.5f)), 4.5f);
}
TEST(BF16Test, DivisionByZero) {
EXPECT_TRUE(std::isnan(static_cast<float>(BF16(0.0 / 0.0))));
EXPECT_TRUE(std::isinf(static_cast<float>(BF16(1.0 / 0.0))));
EXPECT_TRUE(std::isinf(static_cast<float>(BF16(-1.0 / 0.0))));
EXPECT_TRUE(std::isnan(BF16(0.0 / 0.0)));
EXPECT_TRUE(std::isinf(BF16(1.0 / 0.0)));
EXPECT_TRUE(std::isinf(BF16(-1.0 / 0.0)));
}
TEST(BF16Test, NonFinite) {
EXPECT_FALSE(std::isinf(
static_cast<float>(BF16(3.38e38f))));
EXPECT_FALSE(std::isnan(static_cast<float>(BF16(0.0f))));
EXPECT_TRUE(
std::isinf(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0xff80))));
EXPECT_TRUE(
std::isnan(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0xffc0))));
EXPECT_TRUE(
std::isinf(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0x7f80))));
EXPECT_TRUE(
std::isnan(static_cast<float>(absl::bit_cast<BF16, uint16_t>(0x7fc0))));
EXPECT_FALSE(isinf(absl::bit_cast<BF16, uint16_t>(0x7bff)));
EXPECT_FALSE(isnan(absl::bit_cast<BF16, uint16_t>(0x0000)));
EXPECT_TRUE(isinf(absl::bit_cast<BF16, uint16_t>(0xff80)));
EXPECT_TRUE(isnan(absl::bit_cast<BF16, uint16_t>(0xffc0)));
EXPECT_TRUE(isinf(absl::bit_cast<BF16, uint16_t>(0x7f80)));
EXPECT_TRUE(isnan(absl::bit_cast<BF16, uint16_t>(0x7fc0)));
EXPECT_THAT(BF16(BinaryToFloat(0x0, 0xff, 0x40, 0x0)),
MatchesBits(0x7fe0));
EXPECT_THAT(BF16(BinaryToFloat(0x1, 0xff, 0x40, 0x0)),
MatchesBits(0xffe0));
}
TEST(BF16Test, NumericLimits) {
static_assert(std::numeric_limits<BF16>::is_signed);
EXPECT_EQ(
absl::bit_cast<uint16_t>(std::numeric_limits<BF16>::infinity()),
absl::bit_cast<uint16_t>(BF16(std::numeric_limits<float>::infinity())));
constexpr uint16_t BFLOAT16_QUIET_BIT = 0x0040;
EXPECT_TRUE(isnan(std::numeric_limits<BF16>::quiet_NaN()));
EXPECT_TRUE(isnan(BF16(std::numeric_limits<float>::quiet_NaN())));
EXPECT_GT((absl::bit_cast<uint16_t>(std::numeric_limits<BF16>::quiet_NaN()) &
BFLOAT16_QUIET_BIT),
0);
EXPECT_GT(
(absl::bit_cast<uint16_t>(BF16(std::numeric_limits<float>::quiet_NaN())) &
BFLOAT16_QUIET_BIT),
0);
EXPECT_TRUE(isnan(std::numeric_limits<BF16>::signaling_NaN()));
EXPECT_TRUE(isnan(BF16(std::numeric_limits<float>::signaling_NaN())));
EXPECT_EQ(
0, (absl::bit_cast<uint16_t>(std::numeric_limits<BF16>::signaling_NaN()) &
BFLOAT16_QUIET_BIT));
EXPECT_EQ(0, (absl::bit_cast<uint16_t>(
BF16(std::numeric_limits<float>::signaling_NaN())) &
BFLOAT16_QUIET_BIT));
EXPECT_GT(std::numeric_limits<BF16>::min(), BF16(0.f));
EXPECT_GT(std::numeric_limits<BF16>::denorm_min(), BF16(0.f));
EXPECT_EQ(std::numeric_limits<BF16>::denorm_min() / BF16(2), BF16(0.f));
}
TEST(BF16Test, Arithmetic) {
EXPECT_EQ(static_cast<float>(BF16(2) + BF16(2)), 4);
EXPECT_EQ(static_cast<float>(BF16(2) + BF16(-2)), 0);
EXPECT_THAT(static_cast<float>(BF16(0.33333f) + BF16(0.66667f)),
NearFloat(1.0f));
EXPECT_EQ(static_cast<float>(BF16(2.0f) * BF16(-5.5f)), -11.0f);
EXPECT_THAT(static_cast<float>(BF16(1.0f) / BF16(3.0f)), NearFloat(0.3339f));
EXPECT_EQ(static_cast<float>(-BF16(4096.0f)), -4096.0f);
EXPECT_EQ(static_cast<float>(-BF16(-4096.0f)), 4096.0f);
}
TEST(BF16Test, Comparison) {
EXPECT_TRUE(BF16(1.0f) > BF16(0.5f));
EXPECT_TRUE(BF16(0.5f) < BF16(1.0f));
EXPECT_FALSE((BF16(1.0f) < BF16(0.5f)));
EXPECT_FALSE((BF16(0.5f) > BF16(1.0f)));
EXPECT_FALSE((BF16(4.0f) > BF16(4.0f)));
EXPECT_FALSE((BF16(4.0f) < BF16(4.0f)));
EXPECT_FALSE((BF16(0.0f) < BF16(-0.0f)));
EXPECT_FALSE((BF16(-0.0f) < BF16(0.0f)));
EXPECT_FALSE((BF16(0.0f) > BF16(-0.0f)));
EXPECT_FALSE((BF16(-0.0f) > BF16(0.0f)));
EXPECT_TRUE(BF16(0.2f) > BF16(-1.0f));
EXPECT_TRUE(BF16(-1.0f) < BF16(0.2f));
EXPECT_TRUE(BF16(-16.0f) < BF16(-15.0f));
EXPECT_TRUE(BF16(1.0f) == BF16(1.0f));
EXPECT_TRUE(BF16(1.0f) != BF16(2.0f));
EXPECT_FALSE((BF16(0.0 / 0.0) == BF16(0.0 / 0.0)));
EXPECT_TRUE(BF16(0.0 / 0.0) != BF16(0.0 / 0.0));
EXPECT_FALSE((BF16(1.0) == BF16(0.0 / 0.0)));
EXPECT_FALSE((BF16(1.0) < BF16(0.0 / 0.0)));
EXPECT_FALSE((BF16(1.0) > BF16(0.0 / 0.0)));
EXPECT_TRUE(BF16(1.0) != BF16(0.0 / 0.0));
EXPECT_TRUE(BF16(1.0) < BF16(1.0 / 0.0));
EXPECT_TRUE(BF16(1.0) > BF16(-1.0 / 0.0));
}
constexpr float PI = 3.14159265358979323846f;
TEST(BF16Test, BasicFunctions) {
EXPECT_EQ(static_cast<float>(abs(BF16(3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(BF16(3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(BF16(-3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(abs(BF16(-3.5f))), 3.5f);
EXPECT_EQ(static_cast<float>(floor(BF16(3.5f))), 3.0f);
EXPECT_EQ(static_cast<float>(floor(BF16(3.5f))), 3.0f);
EXPECT_EQ(static_cast<float>(floor(BF16(-3.5f))), -4.0f);
EXPECT_EQ(static_cast<float>(floor(BF16(-3.5f))), -4.0f);
EXPECT_EQ(static_cast<float>(ceil(BF16(3.5f))), 4.0f);
EXPECT_EQ(static_cast<float>(ceil(BF16(3.5f))), 4.0f);
EXPECT_EQ(static_cast<float>(ceil(BF16(-3.5f))), -3.0f);
EXPECT_EQ(static_cast<float>(ceil(BF16(-3.5f))), -3.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(BF16(0.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(BF16(0.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(BF16(4.0f))), 2.0f);
EXPECT_FLOAT_EQ(static_cast<float>(sqrt(BF16(4.0f))), 2.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(BF16(0.0f), BF16(1.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(BF16(0.0f), BF16(1.0f))), 0.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(BF16(2.0f), BF16(2.0f))), 4.0f);
EXPECT_FLOAT_EQ(static_cast<float>(pow(BF16(2.0f), BF16(2.0f))), 4.0f);
EXPECT_EQ(static_cast<float>(exp(BF16(0.0f))), 1.0f);
EXPECT_EQ(static_cast<float>(exp(BF16(0.0f))), 1.0f);
EXPECT_THAT(static_cast<float>(exp(BF16(PI))),
NearFloat(20.f + static_cast<float>(PI)));
EXPECT_THAT(static_cast<float>(exp(BF16(PI))),
NearFloat(20.f + static_cast<float>(PI)));
EXPECT_EQ(static_cast<float>(expm1(BF16(0.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(expm1(BF16(0.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(expm1(BF16(2.0f))), NearFloat(6.375f));
EXPECT_THAT(static_cast<float>(expm1(BF16(2.0f))), NearFloat(6.375f));
EXPECT_EQ(static_cast<float>(log(BF16(1.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(log(BF16(1.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(log(BF16(10.0f))), NearFloat(2.296875f));
EXPECT_THAT(static_cast<float>(log(BF16(10.0f))), NearFloat(2.296875f));
EXPECT_EQ(static_cast<float>(log1p(BF16(0.0f))), 0.0f);
EXPECT_EQ(static_cast<float>(log1p(BF16(0.0f))), 0.0f);
EXPECT_THAT(static_cast<float>(log1p(BF16(10.0f))), NearFloat(2.390625f));
EXPECT_THAT(static_cast<float>(log1p(BF16(10.0f))), NearFloat(2.390625f));
}
TEST(BF16Test, TrigonometricFunctions) {
EXPECT_THAT(cos(BF16(0.0f)), NearFloat(BF16(std::cos(0.0f))));
EXPECT_THAT(cos(BF16(0.0f)), NearFloat(BF16(std::cos(0.0f))));
EXPECT_FLOAT_EQ(cos(BF16(PI)), BF16(std::cos(PI)));
EXPECT_NEAR(cos(BF16(PI / 2)), BF16(std::cos(PI / 2)), 1e-3);
EXPECT_NEAR(cos(BF16(3 * PI / 2)), BF16(std::cos(3 * PI / 2)), 1e-2);
EXPECT_THAT(cos(BF16(3.5f)), NearFloat(BF16(std::cos(3.5f))));
EXPECT_FLOAT_EQ(sin(BF16(0.0f)), BF16(std::sin(0.0f)));
EXPECT_FLOAT_EQ(sin(BF16(0.0f)), BF16(std::sin(0.0f)));
EXPECT_NEAR(sin(BF16(PI)), BF16(std::sin(PI)), 1e-3);
EXPECT_THAT(sin(BF16(PI / 2)), NearFloat(BF16(std::sin(PI / 2))));
EXPECT_THAT(sin(BF16(3 * PI / 2)), NearFloat(BF16(std::sin(3 * PI / 2))));
EXPECT_THAT(sin(BF16(3.5f)), NearFloat(BF16(std::sin(3.5f))));
EXPECT_FLOAT_EQ(tan(BF16(0.0f)), BF16(std::tan(0.0f)));
EXPECT_FLOAT_EQ(tan(BF16(0.0f)), BF16(std::tan(0.0f)));
EXPECT_NEAR(tan(BF16(PI)), BF16(std::tan(PI)), 1e-3);
EXPECT_THAT(tan(BF16(3.5f)), NearFloat(BF16(std::tan(3.5f))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/bf16.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/bf16_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
56045f28-85a8-4780-80eb-1f1726626053 | cpp | google/cel-cpp | string_extension_func_registrar | eval/public/string_extension_func_registrar.cc | eval/public/string_extension_func_registrar_test.cc | #include "eval/public/string_extension_func_registrar.h"
#include "absl/status/status.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_options.h"
#include "extensions/strings.h"
namespace google::api::expr::runtime {
absl::Status RegisterStringExtensionFunctions(
CelFunctionRegistry* registry, const InterpreterOptions& options) {
return cel::extensions::RegisterStringsFunctions(registry, options);
}
} | #include "eval/public/string_extension_func_registrar.h"
#include <cstdint>
#include <string>
#include <vector>
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "absl/types/span.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_function_registry.h"
#include "eval/public/cel_value.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "internal/testing.h"
#include "google/protobuf/arena.h"
namespace google::api::expr::runtime {
namespace {
using google::protobuf::Arena;
class StringExtensionTest : public ::testing::Test {
protected:
StringExtensionTest() = default;
void SetUp() override {
ASSERT_OK(RegisterBuiltinFunctions(®istry_));
ASSERT_OK(RegisterStringExtensionFunctions(®istry_));
}
void PerformSplitStringTest(Arena* arena, std::string* value,
std::string* delimiter, CelValue* result) {
auto function = registry_.FindOverloads(
"split", true, {CelValue::Type::kString, CelValue::Type::kString});
ASSERT_EQ(function.size(), 1);
auto func = function[0];
std::vector<CelValue> args = {CelValue::CreateString(value),
CelValue::CreateString(delimiter)};
absl::Span<CelValue> arg_span(&args[0], args.size());
auto status = func->Evaluate(arg_span, result, arena);
ASSERT_OK(status);
}
void PerformSplitStringWithLimitTest(Arena* arena, std::string* value,
std::string* delimiter, int64_t limit,
CelValue* result) {
auto function = registry_.FindOverloads(
"split", true,
{CelValue::Type::kString, CelValue::Type::kString,
CelValue::Type::kInt64});
ASSERT_EQ(function.size(), 1);
auto func = function[0];
std::vector<CelValue> args = {CelValue::CreateString(value),
CelValue::CreateString(delimiter),
CelValue::CreateInt64(limit)};
absl::Span<CelValue> arg_span(&args[0], args.size());
auto status = func->Evaluate(arg_span, result, arena);
ASSERT_OK(status);
}
void PerformJoinStringTest(Arena* arena, std::vector<std::string>& values,
CelValue* result) {
auto function =
registry_.FindOverloads("join", true, {CelValue::Type::kList});
ASSERT_EQ(function.size(), 1);
auto func = function[0];
std::vector<CelValue> cel_list;
cel_list.reserve(values.size());
for (const std::string& value : values) {
cel_list.push_back(
CelValue::CreateString(Arena::Create<std::string>(arena, value)));
}
std::vector<CelValue> args = {CelValue::CreateList(
Arena::Create<ContainerBackedListImpl>(arena, cel_list))};
absl::Span<CelValue> arg_span(&args[0], args.size());
auto status = func->Evaluate(arg_span, result, arena);
ASSERT_OK(status);
}
void PerformJoinStringWithSeparatorTest(Arena* arena,
std::vector<std::string>& values,
std::string* separator,
CelValue* result) {
auto function = registry_.FindOverloads(
"join", true, {CelValue::Type::kList, CelValue::Type::kString});
ASSERT_EQ(function.size(), 1);
auto func = function[0];
std::vector<CelValue> cel_list;
cel_list.reserve(values.size());
for (const std::string& value : values) {
cel_list.push_back(
CelValue::CreateString(Arena::Create<std::string>(arena, value)));
}
std::vector<CelValue> args = {
CelValue::CreateList(
Arena::Create<ContainerBackedListImpl>(arena, cel_list)),
CelValue::CreateString(separator)};
absl::Span<CelValue> arg_span(&args[0], args.size());
auto status = func->Evaluate(arg_span, result, arena);
ASSERT_OK(status);
}
void PerformLowerAsciiTest(Arena* arena, std::string* value,
CelValue* result) {
auto function =
registry_.FindOverloads("lowerAscii", true, {CelValue::Type::kString});
ASSERT_EQ(function.size(), 1);
auto func = function[0];
std::vector<CelValue> args = {CelValue::CreateString(value)};
absl::Span<CelValue> arg_span(&args[0], args.size());
auto status = func->Evaluate(arg_span, result, arena);
ASSERT_OK(status);
}
CelFunctionRegistry registry_;
Arena arena_;
};
TEST_F(StringExtensionTest, TestStringSplit) {
Arena arena;
CelValue result;
std::string value = "This!!Is!!Test";
std::string delimiter = "!!";
std::vector<std::string> expected = {"This", "Is", "Test"};
ASSERT_NO_FATAL_FAILURE(
PerformSplitStringTest(&arena, &value, &delimiter, &result));
ASSERT_EQ(result.type(), CelValue::Type::kList);
EXPECT_EQ(result.ListOrDie()->size(), 3);
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(result.ListOrDie()->Get(&arena, i).StringOrDie().value(),
expected[i]);
}
}
TEST_F(StringExtensionTest, TestStringSplitEmptyDelimiter) {
Arena arena;
CelValue result;
std::string value = "TEST";
std::string delimiter = "";
std::vector<std::string> expected = {"T", "E", "S", "T"};
ASSERT_NO_FATAL_FAILURE(
PerformSplitStringTest(&arena, &value, &delimiter, &result));
ASSERT_EQ(result.type(), CelValue::Type::kList);
EXPECT_EQ(result.ListOrDie()->size(), 4);
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(result.ListOrDie()->Get(&arena, i).StringOrDie().value(),
expected[i]);
}
}
TEST_F(StringExtensionTest, TestStringSplitWithLimitTwo) {
Arena arena;
CelValue result;
int64_t limit = 2;
std::string value = "This!!Is!!Test";
std::string delimiter = "!!";
std::vector<std::string> expected = {"This", "Is!!Test"};
ASSERT_NO_FATAL_FAILURE(PerformSplitStringWithLimitTest(
&arena, &value, &delimiter, limit, &result));
ASSERT_EQ(result.type(), CelValue::Type::kList);
EXPECT_EQ(result.ListOrDie()->size(), 2);
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(result.ListOrDie()->Get(&arena, i).StringOrDie().value(),
expected[i]);
}
}
TEST_F(StringExtensionTest, TestStringSplitWithLimitOne) {
Arena arena;
CelValue result;
int64_t limit = 1;
std::string value = "This!!Is!!Test";
std::string delimiter = "!!";
ASSERT_NO_FATAL_FAILURE(PerformSplitStringWithLimitTest(
&arena, &value, &delimiter, limit, &result));
ASSERT_EQ(result.type(), CelValue::Type::kList);
EXPECT_EQ(result.ListOrDie()->size(), 1);
EXPECT_EQ(result.ListOrDie()->Get(&arena, 0).StringOrDie().value(), value);
}
TEST_F(StringExtensionTest, TestStringSplitWithLimitZero) {
Arena arena;
CelValue result;
int64_t limit = 0;
std::string value = "This!!Is!!Test";
std::string delimiter = "!!";
ASSERT_NO_FATAL_FAILURE(PerformSplitStringWithLimitTest(
&arena, &value, &delimiter, limit, &result));
ASSERT_EQ(result.type(), CelValue::Type::kList);
EXPECT_EQ(result.ListOrDie()->size(), 0);
}
TEST_F(StringExtensionTest, TestStringSplitWithLimitNegative) {
Arena arena;
CelValue result;
int64_t limit = -1;
std::string value = "This!!Is!!Test";
std::string delimiter = "!!";
std::vector<std::string> expected = {"This", "Is", "Test"};
ASSERT_NO_FATAL_FAILURE(PerformSplitStringWithLimitTest(
&arena, &value, &delimiter, limit, &result));
ASSERT_EQ(result.type(), CelValue::Type::kList);
EXPECT_EQ(result.ListOrDie()->size(), 3);
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(result.ListOrDie()->Get(&arena, i).StringOrDie().value(),
expected[i]);
}
}
TEST_F(StringExtensionTest, TestStringSplitWithLimitAsMaxPossibleSplits) {
Arena arena;
CelValue result;
int64_t limit = 3;
std::string value = "This!!Is!!Test";
std::string delimiter = "!!";
std::vector<std::string> expected = {"This", "Is", "Test"};
ASSERT_NO_FATAL_FAILURE(PerformSplitStringWithLimitTest(
&arena, &value, &delimiter, limit, &result));
ASSERT_EQ(result.type(), CelValue::Type::kList);
EXPECT_EQ(result.ListOrDie()->size(), 3);
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(result.ListOrDie()->Get(&arena, i).StringOrDie().value(),
expected[i]);
}
}
TEST_F(StringExtensionTest,
TestStringSplitWithLimitGreaterThanMaxPossibleSplits) {
Arena arena;
CelValue result;
int64_t limit = 4;
std::string value = "This!!Is!!Test";
std::string delimiter = "!!";
std::vector<std::string> expected = {"This", "Is", "Test"};
ASSERT_NO_FATAL_FAILURE(PerformSplitStringWithLimitTest(
&arena, &value, &delimiter, limit, &result));
ASSERT_EQ(result.type(), CelValue::Type::kList);
EXPECT_EQ(result.ListOrDie()->size(), 3);
for (int i = 0; i < expected.size(); ++i) {
EXPECT_EQ(result.ListOrDie()->Get(&arena, i).StringOrDie().value(),
expected[i]);
}
}
TEST_F(StringExtensionTest, TestStringJoin) {
Arena arena;
CelValue result;
std::vector<std::string> value = {"This", "Is", "Test"};
std::string expected = "ThisIsTest";
ASSERT_NO_FATAL_FAILURE(PerformJoinStringTest(&arena, value, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
TEST_F(StringExtensionTest, TestStringJoinEmptyInput) {
Arena arena;
CelValue result;
std::vector<std::string> value = {};
std::string expected = "";
ASSERT_NO_FATAL_FAILURE(PerformJoinStringTest(&arena, value, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
TEST_F(StringExtensionTest, TestStringJoinWithSeparator) {
Arena arena;
CelValue result;
std::vector<std::string> value = {"This", "Is", "Test"};
std::string separator = "-";
std::string expected = "This-Is-Test";
ASSERT_NO_FATAL_FAILURE(
PerformJoinStringWithSeparatorTest(&arena, value, &separator, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
TEST_F(StringExtensionTest, TestStringJoinWithMultiCharSeparator) {
Arena arena;
CelValue result;
std::vector<std::string> value = {"This", "Is", "Test"};
std::string separator = "--";
std::string expected = "This--Is--Test";
ASSERT_NO_FATAL_FAILURE(
PerformJoinStringWithSeparatorTest(&arena, value, &separator, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
TEST_F(StringExtensionTest, TestStringJoinWithEmptySeparator) {
Arena arena;
CelValue result;
std::vector<std::string> value = {"This", "Is", "Test"};
std::string separator = "";
std::string expected = "ThisIsTest";
ASSERT_NO_FATAL_FAILURE(
PerformJoinStringWithSeparatorTest(&arena, value, &separator, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
TEST_F(StringExtensionTest, TestStringJoinWithSeparatorEmptyInput) {
Arena arena;
CelValue result;
std::vector<std::string> value = {};
std::string separator = "-";
std::string expected = "";
ASSERT_NO_FATAL_FAILURE(
PerformJoinStringWithSeparatorTest(&arena, value, &separator, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
TEST_F(StringExtensionTest, TestLowerAscii) {
Arena arena;
CelValue result;
std::string value = "ThisIs@Test!-5";
std::string expected = "thisis@test!-5";
ASSERT_NO_FATAL_FAILURE(PerformLowerAsciiTest(&arena, &value, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
TEST_F(StringExtensionTest, TestLowerAsciiWithEmptyInput) {
Arena arena;
CelValue result;
std::string value = "";
std::string expected = "";
ASSERT_NO_FATAL_FAILURE(PerformLowerAsciiTest(&arena, &value, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
TEST_F(StringExtensionTest, TestLowerAsciiWithNonAsciiCharacter) {
Arena arena;
CelValue result;
std::string value = "TacoCÆt";
std::string expected = "tacocÆt";
ASSERT_NO_FATAL_FAILURE(PerformLowerAsciiTest(&arena, &value, &result));
ASSERT_EQ(result.type(), CelValue::Type::kString);
EXPECT_EQ(result.StringOrDie().value(), expected);
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/string_extension_func_registrar.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/string_extension_func_registrar_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
3109da71-abb0-461e-846c-6d61ac6ece58 | cpp | google/arolla | id_filter | arolla/array/id_filter.cc | arolla/array/id_filter_test.cc | #include "arolla/array/id_filter.h"
#include <algorithm>
#include <cstdint>
#include <utility>
#include "arolla/memory/buffer.h"
#include "arolla/memory/raw_buffer_factory.h"
#include "arolla/util/fingerprint.h"
namespace arolla {
IdFilter IdFilter::UpperBoundMergeImpl(int64_t size,
RawBufferFactory* buf_factory,
const IdFilter& a, const IdFilter& b) {
if (a.type() == kEmpty || b.type() == kFull) return b;
if (b.type() == kEmpty || a.type() == kFull) return a;
if (a.IsSame(b)) return a;
if (std::max(a.ids().size(), b.ids().size()) >= size * kDenseSparsityLimit) {
return kFull;
}
Buffer<int64_t>::Builder bldr(a.ids().size() + b.ids().size(), buf_factory);
auto inserter = bldr.GetInserter();
auto ia = a.ids().begin();
auto ib = b.ids().begin();
while (ia != a.ids().end() && ib != b.ids().end()) {
int64_t va = *ia - a.ids_offset();
int64_t vb = *ib - b.ids_offset();
int64_t v = std::min(va, vb);
if (va == v) ia++;
if (vb == v) ib++;
inserter.Add(v);
}
while (ia != a.ids().end()) inserter.Add(*(ia++) - a.ids_offset());
while (ib != b.ids().end()) inserter.Add(*(ib++) - b.ids_offset());
return IdFilter(size, std::move(bldr).Build(inserter));
}
void FingerprintHasherTraits<IdFilter>::operator()(
FingerprintHasher* hasher, const IdFilter& value) const {
hasher->Combine(value.type());
if (value.type() != IdFilter::Type::kFull) {
hasher->Combine(value.ids());
}
}
} | #include "arolla/array/id_filter.h"
#include <cstdint>
#include <tuple>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "arolla/memory/buffer.h"
#include "arolla/memory/raw_buffer_factory.h"
namespace arolla {
namespace {
TEST(IdFilterTest, UpperBoundIntersect) {
IdFilter empty(IdFilter::kEmpty);
IdFilter full(IdFilter::kFull);
IdFilter a = IdFilter(5, CreateBuffer<int64_t>({3, 4}));
IdFilter b = IdFilter(5, CreateBuffer<int64_t>({0, 2, 3}));
IdFilter c = IdFilter(5, CreateBuffer<int64_t>({0, 1, 3, 4}));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(a).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(a, empty).IsSame(empty));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(empty, a).IsSame(empty));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(a, full).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(full, a).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(a, b).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(b, a).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(a, b, c).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(c, b, a).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(a, empty, c).IsSame(empty));
EXPECT_TRUE(IdFilter::UpperBoundIntersect(full, b, c).IsSame(b));
}
TEST(IdFilterTest, UpperBoundMerge) {
IdFilter empty(IdFilter::kEmpty);
IdFilter full(IdFilter::kFull);
IdFilter a = IdFilter(5, CreateBuffer<int64_t>({3, 4}));
IdFilter b = IdFilter(5, CreateBuffer<int64_t>({0, 2, 3}));
RawBufferFactory* bf = GetHeapBufferFactory();
EXPECT_TRUE(IdFilter::UpperBoundMerge(5, bf, a).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundMerge(5, bf, a, empty).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundMerge(5, bf, empty, a).IsSame(a));
EXPECT_TRUE(IdFilter::UpperBoundMerge(25, bf, a, full).IsSame(full));
EXPECT_TRUE(IdFilter::UpperBoundMerge(25, bf, a, full, b).IsSame(full));
EXPECT_TRUE(IdFilter::UpperBoundMerge(5, bf, a, b).IsSame(full));
EXPECT_THAT(IdFilter::UpperBoundMerge(25, bf, a, b).ids(),
testing::ElementsAre(0, 2, 3, 4));
}
TEST(IdFilterTest, IntersectPartial_ForEach) {
IdFilter a = IdFilter(5, CreateBuffer<int64_t>({3, 4}));
IdFilter b = IdFilter(5, CreateBuffer<int64_t>({0, 2, 3}));
IdFilter c = IdFilter(5, CreateBuffer<int64_t>({0, 1, 3, 4}));
using FnArgs = std::tuple<int64_t, int64_t, int64_t>;
std::vector<FnArgs> res;
auto fn = [&](int64_t id, int64_t offset1, int64_t offset2) {
res.push_back({id, offset1, offset2});
};
IdFilter::IntersectPartial_ForEach(a, b, fn);
EXPECT_EQ(res, (std::vector<FnArgs>{{3, 0, 2}}));
res.clear();
IdFilter::IntersectPartial_ForEach(b, a, fn);
EXPECT_EQ(res, (std::vector<FnArgs>{{3, 2, 0}}));
res.clear();
IdFilter::IntersectPartial_ForEach(a, c, fn);
EXPECT_EQ(res, (std::vector<FnArgs>{{3, 0, 2}, {4, 1, 3}}));
res.clear();
IdFilter::IntersectPartial_ForEach(c, a, fn);
EXPECT_EQ(res, (std::vector<FnArgs>{{3, 2, 0}, {4, 3, 1}}));
res.clear();
IdFilter::IntersectPartial_ForEach(b, c, fn);
EXPECT_EQ(res, (std::vector<FnArgs>{{0, 0, 0}, {3, 2, 2}}));
res.clear();
IdFilter::IntersectPartial_ForEach(c, b, fn);
EXPECT_EQ(res, (std::vector<FnArgs>{{0, 0, 0}, {3, 2, 2}}));
res.clear();
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/array/id_filter.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/array/id_filter_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
69fb686e-9d31-45fd-992b-c343fb30b34f | cpp | tensorflow/tensorflow | prefetch_interval_picker | third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker.cc | third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker_test.cc | #include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/heap_simulator/heap_simulator.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/memory_space_assignment.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace memory_space_assignment {
namespace {
const float kEvictionRetryMultiplier = 2.0;
const int kNumExploredDecreasingIntervals = 100;
}
bool InstructionCountPrefetchIntervalPicker::CanAllocateInAlternateMemoryNoCopy(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return end_time - start_time <= max_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::PreferredEvictionEndTime(
const Shape& shape, int64_t start_time, int64_t latest_end_time) const {
return std::min(start_time + min_overlap_count_, latest_end_time);
}
int64_t InstructionCountPrefetchIntervalPicker::LatestPrefetchStartTime(
const Shape& shape, int64_t start_time, int64_t end_time,
const HloUse* use) const {
return end_time - min_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::PreferredPrefetchStartTime(
const Shape& shape, int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time, int64_t prefetch_end_time) const {
return std::max(earliest_prefetch_start_time,
prefetch_end_time - max_overlap_count_);
}
int64_t InstructionCountPrefetchIntervalPicker::EstimatedPrefetchEndTime(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return end_time;
}
float InstructionCountPrefetchIntervalPicker::GetLogicalIntervalElapsed(
int64_t start_time, int64_t end_time) const {
return static_cast<float>(end_time - start_time - 1);
}
void InstructionCountPrefetchIntervalPicker::Begin(
const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) {
end_time_ = end_time;
const Shape& shape = ShapeUtil::GetSubshape(
use.instruction->operand(use.operand_number)->shape(), use.operand_index);
if (preferred_time) {
current_prefetch_time_ = *preferred_time;
} else {
current_prefetch_time_ =
PreferredPrefetchStartTime(shape, start_time, end_time, end_time);
}
}
int64_t InstructionCountPrefetchIntervalPicker::Next() {
CHECK(!Done()) << "Prefetch interval picker's Next() is called even though "
"Done() is false";
return current_prefetch_time_++;
}
bool InstructionCountPrefetchIntervalPicker::Done() const {
return end_time_ - current_prefetch_time_ <= min_overlap_count_;
}
int64_t InstructionCountPrefetchIntervalPicker::latest_time() const {
return end_time_ - min_overlap_count_ - 1;
}
std::string InstructionCountPrefetchIntervalPicker::ToDebugString() const {
return absl::StrCat("Overlapped HLOs = ", end_time_ - current_prefetch_time_);
}
std::string InstructionCountPrefetchIntervalPicker::ToNoCopyDebugString(
const Shape& shape, int64_t start_time, int64_t end_time) const {
return absl::StrCat("Overlapped HLOs = ", end_time - start_time);
}
CostAnalysisPrefetchIntervalPicker::CostAnalysisPrefetchIntervalPicker(
const CostAnalysis& cost_analysis, float min_overlap_to_async_copy_ratio,
float preferred_overlap_to_async_copy_ratio,
float max_overlap_to_mem_size_async_copy_ratio, int64_t mem_size_bytes,
const Shape* shape_override)
: while_nest_level_(
cost_analysis.hlo_live_range().instruction_schedule().size() + 1, 0),
computation_nest_level_(
cost_analysis.hlo_live_range().instruction_schedule().size() + 1, 0),
cost_analysis_(cost_analysis),
min_overlap_to_async_copy_ratio_(min_overlap_to_async_copy_ratio),
preferred_overlap_to_async_copy_ratio_(
preferred_overlap_to_async_copy_ratio),
max_async_copy_elapsed_(
cost_analysis_.GetAsyncCopyElapsed(
ShapeUtil::MakeShape(S32, {mem_size_bytes / 4})) *
max_overlap_to_mem_size_async_copy_ratio),
shape_override_(shape_override ? std::optional(*shape_override)
: std::nullopt) {
instruction_schedule_ =
&cost_analysis_.hlo_live_range().instruction_schedule();
std::vector<float> instructions_elapsed_time(
instruction_schedule_->size() + 1, 0.0);
int max_while_nest_level = 0;
for (const auto& instruction_and_logical_time : *instruction_schedule_) {
const HloInstruction* instruction = instruction_and_logical_time.first;
int64_t logical_time = instruction_and_logical_time.second;
if (logical_time >= instructions_elapsed_time.size()) {
instructions_elapsed_time.resize(logical_time + 1, 0.0);
while_nest_level_.resize(logical_time + 1, 0);
}
int while_nest_level = cost_analysis_.CalculateComputationNestLevel(
instruction_and_logical_time.first, true);
while_nest_level_[logical_time] = while_nest_level;
max_while_nest_level = std::max(max_while_nest_level, while_nest_level);
int computation_nest_level = cost_analysis_.CalculateComputationNestLevel(
instruction_and_logical_time.first, false);
computation_nest_level_[logical_time] = computation_nest_level;
if (instruction->opcode() == HloOpcode::kWhile ||
instruction->opcode() == HloOpcode::kConditional) {
continue;
}
float elapsed_time = cost_analysis_.GetInstructionElapsed(
*instruction_and_logical_time.first);
instructions_elapsed_time[logical_time] =
elapsed_time * cost_analysis_.GetWhileNestMultiplier(while_nest_level);
}
float cumsum = 0.0;
elapsed_time_cumsum_.reserve(instructions_elapsed_time.size());
for (float elapsed_time : instructions_elapsed_time) {
cumsum += elapsed_time;
elapsed_time_cumsum_.push_back(cumsum);
}
const int64_t size = instructions_elapsed_time.size();
CHECK_EQ(size, while_nest_level_.size());
std::vector<int> most_recent_by_level(while_nest_level_.size(), -1);
int prev_nest_level = 0;
int change_idx = -1;
while_nest_level_change_.reserve(size);
for (int i = 0; i < size; ++i) {
int nest_level = while_nest_level_[i];
if (nest_level != prev_nest_level) {
prev_nest_level = nest_level;
change_idx = -1;
for (int smaller_level = 0; smaller_level < nest_level; smaller_level++) {
change_idx = std::max(change_idx, most_recent_by_level[smaller_level]);
}
}
most_recent_by_level[nest_level] = i;
while_nest_level_change_.push_back(change_idx);
}
for (int i = 0; i <= max_while_nest_level; ++i) {
while_execution_counts_.push_back(cost_analysis_.GetWhileNestMultiplier(i));
}
}
float CostAnalysisPrefetchIntervalPicker::GetMaxElapsedInAlternateMemory(
float async_copy_elapsed) const {
return max_async_copy_elapsed_;
}
bool CostAnalysisPrefetchIntervalPicker::CanAllocateInAlternateMemoryNoCopy(
const Shape& shape, int64_t start_time, int64_t end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float logical_interval_elapsed =
GetLogicalIntervalElapsed(start_time, end_time);
return GetMaxElapsedInAlternateMemory(async_copy_elapsed) >
logical_interval_elapsed;
}
int64_t CostAnalysisPrefetchIntervalPicker::PreferredEvictionEndTime(
const Shape& shape, int64_t start_time, int64_t latest_end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t end_time;
for (end_time = start_time + 1; end_time <= latest_end_time; ++end_time) {
float logical_interval_elapsed =
GetLogicalIntervalElapsed(start_time, end_time);
if (logical_interval_elapsed >=
(1 + kEvictionRetryMultiplier * retry_number_) *
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed) {
break;
}
}
return end_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::LatestPrefetchStartTime(
const Shape& shape, int64_t start_time, int64_t end_time,
const HloUse* use) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float inst_elapsed_reduction = 0.0f;
if (use) {
float elapsed_time =
cost_analysis_.GetInstructionElapsed(*use->instruction);
float elapsed_time_in_alternate_mem =
cost_analysis_.GetInstructionElapsedInAlternateMemory(
*use->instruction,
{std::make_pair(use->operand_number, use->operand_index)},
{});
inst_elapsed_reduction = elapsed_time - elapsed_time_in_alternate_mem;
}
int end_nest_level = computation_nest_level_[end_time];
float min_interval = min_overlap_to_async_copy_ratio_ * async_copy_elapsed;
int latest_prefetch_time;
for (latest_prefetch_time = end_time - 1;
latest_prefetch_time >= start_time &&
(computation_nest_level_[latest_prefetch_time] != end_nest_level ||
min_interval >
GetLogicalIntervalElapsed(latest_prefetch_time, end_time) +
inst_elapsed_reduction);
--latest_prefetch_time) {
}
return latest_prefetch_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::PreferredPrefetchStartTime(
const Shape& shape, int64_t earliest_prefetch_start_time,
int64_t latest_prefetch_start_time, int64_t prefetch_end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t preferred_prefetch_start_time = earliest_prefetch_start_time;
float preferred_interval =
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed;
float best_interval = GetLogicalIntervalElapsed(earliest_prefetch_start_time,
prefetch_end_time);
int end_nest_level = computation_nest_level_[prefetch_end_time];
for (int64_t prefetch_start_time = earliest_prefetch_start_time + 1;
prefetch_start_time <= latest_prefetch_start_time;
++prefetch_start_time) {
float interval =
GetLogicalIntervalElapsed(prefetch_start_time, prefetch_end_time);
if (computation_nest_level_[prefetch_start_time] == end_nest_level &&
std::abs(preferred_interval - interval) <
std::abs(preferred_interval - best_interval)) {
best_interval = interval;
preferred_prefetch_start_time = prefetch_start_time;
}
}
return preferred_prefetch_start_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::LatestPrefetchEndTime(
int64_t original_prefetch_end_time,
int64_t proposed_prefetch_end_time) const {
int64_t original_nest_level =
computation_nest_level_[original_prefetch_end_time];
int64_t new_prefetch_end_time;
for (new_prefetch_end_time = proposed_prefetch_end_time;
computation_nest_level_[new_prefetch_end_time] != original_nest_level;
--new_prefetch_end_time) {
}
return new_prefetch_end_time;
}
int64_t CostAnalysisPrefetchIntervalPicker::EstimatedPrefetchEndTime(
const Shape& shape, int64_t start_time, int64_t end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
int64_t estimated_end_time;
for (estimated_end_time = start_time + 1; estimated_end_time < end_time;
++estimated_end_time) {
float interval = GetLogicalIntervalElapsed(start_time, estimated_end_time);
if (interval >= async_copy_elapsed) {
break;
}
}
return estimated_end_time;
}
void CostAnalysisPrefetchIntervalPicker::Begin(
const HloUse& use, int64_t start_time, int64_t end_time,
std::optional<int64_t> preferred_time) {
const Shape& shape = ShapeUtil::GetSubshape(
use.instruction->operand(use.operand_number)->shape(), use.operand_index);
async_copy_elapsed_ = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float elapsed_time = cost_analysis_.GetInstructionElapsed(*use.instruction);
float elapsed_time_in_alternate_mem =
cost_analysis_.GetInstructionElapsedInAlternateMemory(
*use.instruction,
{std::make_pair(use.operand_number, use.operand_index)},
{});
inst_elapsed_reduction_ = elapsed_time - elapsed_time_in_alternate_mem;
end_logical_time_ = end_time;
int end_nest_level = computation_nest_level_[end_logical_time_];
float min_interval = min_overlap_to_async_copy_ratio_ * async_copy_elapsed_;
latest_prefetch_time_ =
LatestPrefetchStartTime(shape, start_time, end_time, &use);
float max_interval = GetMaxElapsedInAlternateMemory(async_copy_elapsed_);
for (earliest_prefetch_time_ = start_time;
earliest_prefetch_time_ < latest_prefetch_time_ &&
(computation_nest_level_[earliest_prefetch_time_] != end_nest_level ||
max_interval < GetLogicalIntervalElapsed(earliest_prefetch_time_,
end_logical_time_));
++earliest_prefetch_time_) {
}
if (earliest_prefetch_time_ > latest_prefetch_time_) {
increasing_prefetch_time_iterator_ = earliest_prefetch_time_;
decreasing_prefetch_time_iterator_ = latest_prefetch_time_;
CHECK(Done());
return;
}
int64_t starting_prefetch_time;
if (preferred_time && *preferred_time <= latest_prefetch_time_) {
starting_prefetch_time = *preferred_time;
} else {
starting_prefetch_time =
PreferredPrefetchStartTime(shape, earliest_prefetch_time_,
latest_prefetch_time_, end_logical_time_);
}
float preferred_interval =
preferred_overlap_to_async_copy_ratio_ * async_copy_elapsed_;
VLOG(4) << "Interval min/max/preferred = " << min_interval << " "
<< max_interval << " " << preferred_interval
<< " prefetch time earliest/latest/starting = "
<< earliest_prefetch_time_ << " " << latest_prefetch_time_ << " "
<< starting_prefetch_time;
increasing_prefetch_time_iterator_ = starting_prefetch_time;
decreasing_prefetch_time_iterator_ = starting_prefetch_time;
using_increasing_prefetch_time_iterator_ = true;
Next();
}
int64_t CostAnalysisPrefetchIntervalPicker::Next() {
CHECK(!Done()) << "Prefetch interval picker's Next() is called even though "
"Done() is false";
if (using_increasing_prefetch_time_iterator_) {
int64_t prefetch_time = increasing_prefetch_time_iterator_++;
while (increasing_prefetch_time_iterator_ <= latest_prefetch_time_ &&
computation_nest_level_[increasing_prefetch_time_iterator_] !=
computation_nest_level_[end_logical_time_]) {
++increasing_prefetch_time_iterator_;
}
if (decreasing_prefetch_time_iterator_ >= earliest_prefetch_time_) {
using_increasing_prefetch_time_iterator_ = false;
}
return prefetch_time;
} else {
int64_t prefetch_time = decreasing_prefetch_time_iterator_--;
float next_target_interval_elapsed = 0;
if (increasing_prefetch_time_iterator_ > latest_prefetch_time_) {
next_target_interval_elapsed =
GetLogicalIntervalElapsed(prefetch_time, end_logical_time_) +
(GetLogicalIntervalElapsed(earliest_prefetch_time_,
end_logical_time_) /
kNumExploredDecreasingIntervals);
VLOG(3) << "Next target interval elapsed: "
<< next_target_interval_elapsed;
}
while (decreasing_prefetch_time_iterator_ >= earliest_prefetch_time_ &&
(computation_nest_level_[decreasing_prefetch_time_iterator_] !=
computation_nest_level_[end_logical_time_] ||
GetLogicalIntervalElapsed(decreasing_prefetch_time_iterator_,
end_logical_time_) <
next_target_interval_elapsed)) {
--decreasing_prefetch_time_iterator_;
}
if (increasing_prefetch_time_iterator_ <= latest_prefetch_time_) {
using_increasing_prefetch_time_iterator_ = true;
}
return prefetch_time;
}
}
bool CostAnalysisPrefetchIntervalPicker::Done() const {
return increasing_prefetch_time_iterator_ > latest_prefetch_time_ &&
decreasing_prefetch_time_iterator_ < earliest_prefetch_time_;
}
int64_t CostAnalysisPrefetchIntervalPicker::latest_time() const {
return latest_prefetch_time_;
}
void CostAnalysisPrefetchIntervalPicker::SetRetryNumber(int retry_number) {
retry_number_ = retry_number;
}
int CostAnalysisPrefetchIntervalPicker::GetMinWhileNestLevel(
int64_t start_time, int64_t end_time) const {
int min_nest_level =
std::min(while_nest_level_[start_time], while_nest_level_[end_time]);
int change_idx = while_nest_level_change_[end_time];
while (change_idx >= start_time) {
min_nest_level = std::min(min_nest_level, while_nest_level_[change_idx]);
change_idx = while_nest_level_change_[change_idx];
}
return min_nest_level;
}
float CostAnalysisPrefetchIntervalPicker::GetLogicalIntervalElapsed(
int64_t start_time, int64_t end_time) const {
CHECK_LE(start_time, end_time);
if (start_time == end_time) {
return 0.0;
}
if (start_time < 0) {
start_time = 0;
}
int interval_while_nest_level = GetMinWhileNestLevel(start_time, end_time);
return (elapsed_time_cumsum_[end_time - 1] -
elapsed_time_cumsum_[start_time]) /
while_execution_counts_[interval_while_nest_level];
}
std::string CostAnalysisPrefetchIntervalPicker::ToDebugString() const {
int current_logical_prefetch_time = using_increasing_prefetch_time_iterator_
? increasing_prefetch_time_iterator_
: decreasing_prefetch_time_iterator_;
float logical_interval_elapsed = GetLogicalIntervalElapsed(
current_logical_prefetch_time, end_logical_time_);
return absl::StrCat(
"Async copy elapsed (s) = ", async_copy_elapsed_,
", inst elapsed reduction (s) = ", inst_elapsed_reduction_,
", logical interval elapsed (s) = ", logical_interval_elapsed,
", interval = (", current_logical_prefetch_time, ", ", end_logical_time_,
")");
}
std::string CostAnalysisPrefetchIntervalPicker::ToNoCopyDebugString(
const Shape& shape, int64_t start_time, int64_t end_time) const {
float async_copy_elapsed = cost_analysis_.GetAsyncCopyElapsed(
shape_override_ ? *shape_override_ : shape);
float logical_interval_elapsed =
GetLogicalIntervalElapsed(start_time, end_time);
return absl::StrCat(
"Async copy elapsed (s) = ", async_copy_elapsed,
", logical interval elapsed (s) = ", logical_interval_elapsed);
}
std::optional<float>
CostAnalysisPrefetchIntervalPicker::BufferIntervalAlternateMemoryBenefit(
const GlobalDecreasingSizeBestFitHeap<HloValue>::BufferInterval& interval)
const {
return cost_analysis_.GetMemoryBoundedness(interval);
}
}
} | #include "xla/service/memory_space_assignment/prefetch_interval_picker.h"
#include <cstdint>
#include <optional>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/service/memory_space_assignment/cost_analysis.h"
#include "xla/service/memory_space_assignment/testing_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace memory_space_assignment {
namespace {
constexpr int64_t kPointerSize = 8;
int64_t ShapeSize(const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
}
using CostAnalysisPrefetchIntervalPickerTest = HloTestBase;
TEST_F(CostAnalysisPrefetchIntervalPickerTest, PrefetchIntervalOrder) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
f = f32[2,4] negate(e)
g = f32[2,4] negate(f)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[2,4] negate(n)
p = f32[2,4] negate(o)
q = f32[2,4] negate(p)
r = f32[2,4] negate(q)
s = f32[2,4] negate(r)
t = f32[2,4] negate(s)
u = f32[2,4] negate(t)
ROOT v = f32[2,4] add(u, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
4.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 0, 22, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 15);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 16);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 14);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 17);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 13);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 18);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 12);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 11);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 10);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 9);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
interval_picker.Begin(use, 19, 22, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, PrefetchIntervalOrderWhile) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition {
param1 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body {
param2 = (f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
add = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
d = f32[2,4] negate(c)
e = f32[2,4] negate(d)
f = f32[2,4] negate(e)
g = f32[2,4] negate(f)
h = f32[2,4] negate(g)
i = f32[2,4] negate(h)
j = f32[2,4] negate(i)
k = f32[2,4] negate(j)
l = f32[2,4] negate(k)
m = f32[2,4] negate(l)
n = f32[2,4] negate(m)
o = f32[2,4] negate(n)
p = f32[2,4] negate(o)
q = f32[2,4] negate(p)
tuple = (f32[2,4]) tuple(q)
while = (f32[2,4]) while(tuple), condition=while_condition, body=while_body
gte1 = f32[2,4] get-tuple-element(while), index=0
r = f32[2,4] negate(gte1)
s = f32[2,4] negate(r)
t = f32[2,4] negate(s)
u = f32[2,4] negate(t)
ROOT v = f32[2,4] add(u, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
EXPECT_EQ(cost_analysis->GetWhileNestMultiplier(1), 5.0);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 0, 31, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 25);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 26);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 18);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 27);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_EQ(interval_picker.Next(), 17);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_TRUE(interval_picker.Done());
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, NestedWhile) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
while_condition.2 {
param1 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body.2 {
param2 = (f32[2,4]) parameter(0)
gte2 = f32[2,4] get-tuple-element(param2), index=0
add = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add)
}
while_condition.1 {
param3 = (f32[2,4]) parameter(0)
ROOT cond = pred[] constant(true)
}
while_body.1 {
param4 = (f32[2,4]) parameter(0)
gte1 = f32[2,4] get-tuple-element(param4), index=0
add1 = f32[2,4] add(gte1, gte1)
tuple1 = (f32[2,4]) tuple(add1)
while = (f32[2,4]) while(tuple1), condition=while_condition.2, body=while_body.2
gte2 = f32[2,4] get-tuple-element(while), index=0
add2 = f32[2,4] add(gte2, gte2)
ROOT tuple2 = (f32[2,4]) tuple(add2)
}
ENTRY Entry {
param0 = f32[2,4] parameter(0)
a = f32[2,4] negate(param0)
b = f32[2,4] negate(a)
c = f32[2,4] negate(b)
tuple = (f32[2,4]) tuple(c)
while = (f32[2,4]) while(tuple), condition=while_condition.1, body=while_body.1
gte1 = f32[2,4] get-tuple-element(while), index=0
ROOT root = f32[2,4] add(gte1, param0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
const Shape& shape = root->operand(1)->shape();
EXPECT_EQ(interval_picker.LatestPrefetchStartTime(shape, 0,
23, &use),
4);
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, ConsecutiveConditionals) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
true_computation.0 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation.0 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
true_computation.1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg1 = f32[3]{0} negate(gte)
}
false_computation.1 {
p0 = (f32[3]{0}) parameter(0)
gte = f32[3]{0} get-tuple-element(p0), index=0
ROOT neg2 = f32[3]{0} negate(gte)
}
ENTRY entry {
p0 = f32[3]{0} parameter(0)
p1 = f32[3]{0} parameter(1)
p2 = pred[] parameter(2)
tuple0 = (f32[3]{0}) tuple(p0)
tuple1 = (f32[3]{0}) tuple(p1)
conditional0 = f32[3]{0} conditional(p2, tuple0, tuple0), true_computation=true_computation.0, false_computation=false_computation.0
conditional1 = f32[3]{0} conditional(p2, tuple1, tuple1), true_computation=true_computation.1, false_computation=false_computation.1
ROOT tuple2 = (f32[3]{0}, f32[3]{0}) tuple(conditional0, conditional1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
LOG(INFO) << module->ToString();
HloInstruction* conditional1 =
module->entry_computation()->GetInstructionWithName("conditional1");
const HloUse use{conditional1, 1, {0}};
const Shape& shape =
module->entry_computation()->parameter_instruction(0)->shape();
EXPECT_LT(interval_picker.LatestPrefetchStartTime(shape, 0,
11, &use),
5);
}
TEST_F(CostAnalysisPrefetchIntervalPickerTest, EarliestLatestWindowTooSmall) {
absl::string_view hlo_string = R"(
HloModule bug, is_scheduled=true
ENTRY Entry {
param0 = f32[2,4] parameter(0)
negate = f32[2,4] negate(param0)
tanh = f32[2,4] tanh(param0)
ROOT add = f32[2,4] add(tanh, negate)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloCostAnalysis hlo_cost_analysis(ShapeSize);
CostAnalysisOptions options;
HloCostAnalysisCosts hlo_cost_analysis_costs(hlo_cost_analysis);
TF_ASSERT_OK_AND_ASSIGN(
auto cost_analysis,
FakeCostAnalysis::Create(hlo_cost_analysis_costs, *module, options));
cost_analysis->SetOverrideForGetInstructionElapsed(
[](const HloInstruction& hlo) {
if (hlo.opcode() == HloOpcode::kTanh) {
return 20.0;
}
return 1.0;
});
CostAnalysisPrefetchIntervalPicker interval_picker(
*cost_analysis,
1.0,
2.0,
12.0,
32);
HloInstruction* root = module->entry_computation()->root_instruction();
const HloUse use{root, 1, {}};
interval_picker.Begin(use, 1, 3, std::nullopt);
LOG(INFO) << interval_picker.ToDebugString();
EXPECT_FALSE(interval_picker.Done());
EXPECT_EQ(interval_picker.Next(), 1);
EXPECT_TRUE(interval_picker.Done());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/memory_space_assignment/prefetch_interval_picker_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
46d571f3-161a-4386-84a9-90482896c9ef | cpp | tensorflow/tensorflow | sparse_add_op | tensorflow/core/kernels/sparse_add_op.cc | tensorflow/core/kernels/sparse_add_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/op_requires.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
template <typename T, typename Treal>
class SparseAddOp : public OpKernel {
public:
explicit SparseAddOp(OpKernelConstruction *ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext *ctx) override {
const Tensor *a_indices, *b_indices, *a_values_t, *b_values_t, *a_shape,
*b_shape, *thresh_t;
OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices));
OP_REQUIRES_OK(ctx, ctx->input("b_indices", &b_indices));
OP_REQUIRES(ctx,
TensorShapeUtils::IsMatrix(a_indices->shape()) &&
TensorShapeUtils::IsMatrix(b_indices->shape()),
errors::InvalidArgument(
"Input indices should be matrices but received shapes: ",
a_indices->shape().DebugString(), " and ",
b_indices->shape().DebugString()));
const int64_t a_nnz = a_indices->dim_size(0);
const int64_t b_nnz = b_indices->dim_size(0);
const int num_dims = a_indices->dim_size(1);
OP_REQUIRES(ctx, b_indices->dim_size(1) == num_dims,
errors::InvalidArgument(
"Input indices must have the same dimension, got ",
num_dims, " and ", b_indices->dim_size(1)));
OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t));
OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(a_values_t->shape()) &&
TensorShapeUtils::IsVector(b_values_t->shape()),
errors::InvalidArgument(
"Input values should be vectors but received shapes: ",
a_values_t->shape().DebugString(), " and ",
b_values_t->shape().DebugString()));
auto a_values = ctx->input(1).vec<T>();
auto b_values = ctx->input(4).vec<T>();
OP_REQUIRES(
ctx, a_values.size() == a_nnz && b_values.size() == b_nnz,
errors::InvalidArgument("Expected ", a_nnz, " and ", b_nnz,
" non-empty input values, got ",
a_values.size(), " and ", b_values.size()));
OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape));
OP_REQUIRES_OK(ctx, ctx->input("b_shape", &b_shape));
OP_REQUIRES(ctx,
TensorShapeUtils::IsVector(a_shape->shape()) &&
TensorShapeUtils::IsVector(b_shape->shape()),
errors::InvalidArgument(
"Input shapes should be a vector but received shapes ",
a_shape->shape().DebugString(), " and ",
b_shape->shape().DebugString()));
OP_REQUIRES(
ctx, a_shape->NumElements() == num_dims,
errors::InvalidArgument("Second dimension of a_indices and length of "
"a_shape must match, got ",
num_dims, " and ", a_shape->NumElements()));
OP_REQUIRES(ctx, num_dims > 0,
errors::InvalidArgument("Tesors must not be empty"));
OP_REQUIRES(
ctx, a_shape->IsSameSize(*b_shape),
errors::InvalidArgument(
"Operands do not have the same ranks; got shapes: ",
a_shape->SummarizeValue(10), " and ", b_shape->SummarizeValue(10)));
const auto a_shape_flat = a_shape->flat<int64_t>();
const auto b_shape_flat = b_shape->flat<int64_t>();
for (int i = 0; i < a_shape->NumElements(); ++i) {
OP_REQUIRES(ctx, a_shape_flat(i) == b_shape_flat(i),
errors::InvalidArgument(
"Operands' shapes do not match: got ", a_shape_flat(i),
" and ", b_shape_flat(i), " for dimension ", i));
}
OP_REQUIRES_OK(ctx, ctx->input("thresh", &thresh_t));
OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(thresh_t->shape()),
errors::InvalidArgument(
"The magnitude threshold must be a scalar: got shape ",
thresh_t->shape().DebugString()));
const Treal thresh = thresh_t->scalar<Treal>()();
auto a_indices_mat = a_indices->matrix<int64_t>();
auto b_indices_mat = b_indices->matrix<int64_t>();
std::vector<std::pair<bool, int64>> entries_to_copy;
entries_to_copy.reserve(a_nnz + b_nnz);
std::vector<T> out_values;
int64_t i = 0, j = 0;
T s;
while (i < a_nnz && j < b_nnz) {
switch (sparse::DimComparator::cmp(a_indices_mat, b_indices_mat, i, j,
num_dims)) {
case -1:
entries_to_copy.emplace_back(true, i);
out_values.push_back(a_values(i));
++i;
break;
case 0:
s = a_values(i) + b_values(j);
if (thresh <= std::abs(s)) {
entries_to_copy.emplace_back(true, i);
out_values.push_back(s);
}
++i;
++j;
break;
case 1:
entries_to_copy.emplace_back(false, j);
out_values.push_back(b_values(j));
++j;
break;
}
}
#define HANDLE_LEFTOVERS(A_OR_B, IDX, IS_A) \
while (IDX < A_OR_B##_nnz) { \
entries_to_copy.emplace_back(IS_A, IDX); \
out_values.push_back(A_OR_B##_values(IDX)); \
++IDX; \
}
HANDLE_LEFTOVERS(a, i, true);
HANDLE_LEFTOVERS(b, j, false);
#undef HANDLE_LEFTOVERS
const int64_t sum_nnz = out_values.size();
Tensor *out_indices_t, *out_values_t;
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, TensorShape({sum_nnz, num_dims}),
&out_indices_t));
OP_REQUIRES_OK(
ctx, ctx->allocate_output(1, TensorShape({sum_nnz}), &out_values_t));
auto out_indices_mat = out_indices_t->matrix<int64_t>();
auto out_values_flat = out_values_t->vec<T>();
for (i = 0; i < sum_nnz; ++i) {
const bool from_a = entries_to_copy[i].first;
const int64_t idx = entries_to_copy[i].second;
out_indices_mat.chip<0>(i) =
from_a ? a_indices_mat.chip<0>(idx) : b_indices_mat.chip<0>(idx);
}
if (sum_nnz > 0) {
std::copy_n(out_values.begin(), sum_nnz, &out_values_flat(0));
}
ctx->set_output(2, *a_shape);
}
};
#define REGISTER_KERNELS(type, thresh_type) \
REGISTER_KERNEL_BUILDER( \
Name("SparseAdd").Device(DEVICE_CPU).TypeConstraint<type>("T"), \
SparseAddOp<type, thresh_type>)
REGISTER_KERNELS(float, float);
REGISTER_KERNELS(double, double);
REGISTER_KERNELS(int64_t, int64);
REGISTER_KERNELS(int32, int32);
REGISTER_KERNELS(int16, int16);
REGISTER_KERNELS(int8, int8);
REGISTER_KERNELS(complex64, float);
REGISTER_KERNELS(complex128, double);
#undef REGISTER_KERNELS
} | #include "tensorflow/core/framework/allocator.h"
#include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class SparseAddOpTest : public OpsTestBase {
protected:
template <typename T>
void MakeOp() {
DataType value_type = tensorflow::DataTypeToEnum<T>::value;
DataType thresh_type = value_type;
if (std::is_same<T, std::complex<float>>::value) {
thresh_type = DT_FLOAT;
} else if (std::is_same<T, std::complex<double>>::value) {
thresh_type = DT_DOUBLE;
}
TF_ASSERT_OK(NodeDefBuilder("sparseadd", "SparseAdd")
.Input(FakeInput(DT_INT64))
.Input(FakeInput(value_type))
.Input(FakeInput(DT_INT64))
.Input(FakeInput(DT_INT64))
.Input(FakeInput(value_type))
.Input(FakeInput(DT_INT64))
.Input(FakeInput(thresh_type))
.Attr("Treal", thresh_type)
.Finalize(node_def()));
TF_ASSERT_OK(InitOp());
}
};
TEST_F(SparseAddOpTest, TwoD_AddSparseTensorWithSelf) {
MakeOp<float>();
const auto indices_shape = TensorShape({4, 2});
std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1};
const absl::Span<const int64_t> indices(in);
std::initializer_list<int64_t> sh{3, 2};
const absl::Span<const int64_t> shape(sh);
#define ADD_TENSOR_INPUT() \
AddInputFromArray<int64_t>(indices_shape, indices); \
AddInputFromArray<float>(TensorShape({4}), {1, 2, 3, 4}); \
AddInputFromArray<int64_t>(TensorShape({2}), shape);
ADD_TENSOR_INPUT();
ADD_TENSOR_INPUT();
AddInputFromArray<float>(TensorShape({}), {0.0});
#undef ADD_TENSOR_INPUT
TF_ASSERT_OK(RunOpKernel());
Tensor expected_indices(allocator(), DT_INT64, indices_shape);
test::FillValues<int64_t>(&expected_indices, indices);
test::ExpectTensorEqual<int64_t>(expected_indices, *GetOutput(0));
Tensor expected_values(allocator(), DT_FLOAT, {4});
test::FillValues<float>(&expected_values, {2, 4, 6, 8});
test::ExpectTensorEqual<float>(expected_values, *GetOutput(1));
Tensor expected_shape(allocator(), DT_INT64,
{static_cast<int64_t>(shape.size())});
test::FillValues<int64_t>(&expected_shape, shape);
test::ExpectTensorEqual<int64_t>(expected_shape, *GetOutput(2));
}
#define RUN_TEST(VALTYPE) \
TEST_F(SparseAddOpTest, TwoD_AddSparseTensorsWithDiffIndices_##VALTYPE) { \
MakeOp<VALTYPE>(); \
DataType val_dtype = tensorflow::DataTypeToEnum<VALTYPE>::value; \
\
const auto indices_shape = TensorShape({4, 2}); \
std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; \
const gtl::ArraySlice<int64_t> indices(in); \
std::initializer_list<int64_t> sh{3, 2}; \
const gtl::ArraySlice<int64_t> shape(sh); \
\
AddInputFromArray<int64_t>(indices_shape, indices); \
AddInputFromArray<VALTYPE>(TensorShape({4}), {1, 2, 3, 4}); \
AddInputFromArray<int64_t>(TensorShape({2}), shape); \
\
AddInputFromArray<int64_t>(TensorShape({2, 2}), {0, 0, 1, 1}); \
AddInputFromArray<VALTYPE>(TensorShape({2}), {5, 6}); \
AddInputFromArray<int64_t>(TensorShape({2}), shape); \
\
if (val_dtype == DT_COMPLEX64) { \
AddInputFromArray<float>(TensorShape({}), {0}); \
} else if (val_dtype == DT_COMPLEX128) { \
AddInputFromArray<double>(TensorShape({}), {0}); \
} else { \
AddInputFromArray<VALTYPE>(TensorShape({}), {0}); \
} \
\
TF_ASSERT_OK(RunOpKernel()); \
\
const int expected_nnz = 6; \
Tensor expected_indices(allocator(), DT_INT64, \
TensorShape({expected_nnz, 2})); \
test::FillValues<int64_t>(&expected_indices, \
{0, 0, 0, 1, 1, 0, 1, 1, 2, 0, 2, 1}); \
test::ExpectTensorEqual<int64_t>(expected_indices, *GetOutput(0)); \
\
Tensor expected_values(allocator(), val_dtype, {expected_nnz}); \
test::FillValues<VALTYPE>(&expected_values, {5, 1, 2, 6, 3, 4}); \
test::ExpectTensorEqual<VALTYPE>(expected_values, *GetOutput(1)); \
\
Tensor expected_shape(allocator(), DT_INT64, \
{static_cast<int64_t>(shape.size())}); \
test::FillValues<int64_t>(&expected_shape, shape); \
test::ExpectTensorEqual<int64_t>(expected_shape, *GetOutput(2)); \
}
RUN_TEST(int64_t);
RUN_TEST(float);
RUN_TEST(double);
RUN_TEST(complex64);
RUN_TEST(complex128);
#undef RUN_TEST
#define RUN_TEST(VALTYPE, THRESH) \
TEST_F(SparseAddOpTest, TwoD_SmallValuesShouldVanish_##VALTYPE) { \
MakeOp<VALTYPE>(); \
DataType val_dtype = tensorflow::DataTypeToEnum<VALTYPE>::value; \
const auto indices_shape = TensorShape({4, 2}); \
std::initializer_list<int64_t> in{0, 1, 1, 0, 2, 0, 2, 1}; \
const gtl::ArraySlice<int64_t> indices(in); \
std::initializer_list<int64_t> sh{3, 2}; \
const gtl::ArraySlice<int64_t> shape(sh); \
\
auto AddSparseTensor = [indices, indices_shape, shape, \
this](bool negate) { \
AddInputFromArray<int64_t>(indices_shape, indices); \
if (!negate) { \
AddInputFromArray<VALTYPE>(TensorShape({4}), {1, 2, 3, 4}); \
} else { \
AddInputFromArray<VALTYPE>(TensorShape({4}), {-1, -2, -3, -4}); \
} \
AddInputFromArray<int64_t>(TensorShape({2}), shape); \
}; \
AddSparseTensor(false); \
AddSparseTensor(true); \
if (val_dtype == DT_COMPLEX64) { \
AddInputFromArray<float>(TensorShape({}), {THRESH}); \
} else if (val_dtype == DT_COMPLEX128) { \
AddInputFromArray<double>(TensorShape({}), {THRESH}); \
} else { \
AddInputFromArray<VALTYPE>(TensorShape({}), {THRESH}); \
} \
\
TF_ASSERT_OK(RunOpKernel()); \
\
Tensor expected_indices(allocator(), DT_INT64, TensorShape({0, 2})); \
test::ExpectTensorEqual<int64_t>(expected_indices, *GetOutput(0)); \
\
Tensor expected_values(allocator(), val_dtype, TensorShape({0})); \
test::ExpectTensorEqual<VALTYPE>(expected_values, *GetOutput(1)); \
\
Tensor expected_shape(allocator(), DT_INT64, \
{static_cast<int64_t>(shape.size())}); \
test::FillValues<int64_t>(&expected_shape, shape); \
test::ExpectTensorEqual<int64_t>(expected_shape, *GetOutput(2)); \
}
RUN_TEST(int64_t, 1);
RUN_TEST(float, 1e-3f);
RUN_TEST(double, 1e-3f);
RUN_TEST(complex64, 1e-3f);
RUN_TEST(complex128, 1e-3f);
#undef RUN_TEST
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_add_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/sparse_add_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
b05f06be-edfe-47f9-b53c-9ba3c2f004b0 | cpp | google/cel-cpp | exercise1 | codelab/solutions/exercise1.cc | codelab/exercise1_test.cc | #include "codelab/exercise1.h"
#include <memory>
#include <string>
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/arena.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_expression.h"
#include "eval/public/cel_options.h"
#include "eval/public/cel_value.h"
#include "internal/status_macros.h"
#include "parser/parser.h"
namespace google::api::expr::codelab {
namespace {
using ::google::api::expr::v1alpha1::ParsedExpr;
using ::google::api::expr::parser::Parse;
using ::google::api::expr::runtime::Activation;
using ::google::api::expr::runtime::CelExpression;
using ::google::api::expr::runtime::CelExpressionBuilder;
using ::google::api::expr::runtime::CelValue;
using ::google::api::expr::runtime::CreateCelExpressionBuilder;
using ::google::api::expr::runtime::InterpreterOptions;
using ::google::api::expr::runtime::RegisterBuiltinFunctions;
absl::StatusOr<std::string> ConvertResult(const CelValue& value) {
if (CelValue::StringHolder inner_value; value.GetValue(&inner_value)) {
return std::string(inner_value.value());
} else {
return absl::InvalidArgumentError(absl::StrCat(
"expected string result got '", CelValue::TypeName(value.type()), "'"));
}
}
}
absl::StatusOr<std::string> ParseAndEvaluate(absl::string_view cel_expr) {
InterpreterOptions options;
std::unique_ptr<CelExpressionBuilder> builder =
CreateCelExpressionBuilder(options);
CEL_RETURN_IF_ERROR(
RegisterBuiltinFunctions(builder->GetRegistry(), options));
ParsedExpr parsed_expr;
CEL_ASSIGN_OR_RETURN(parsed_expr, Parse(cel_expr));
google::protobuf::Arena arena;
Activation activation;
CEL_ASSIGN_OR_RETURN(std::unique_ptr<CelExpression> expression_plan,
builder->CreateExpression(&parsed_expr.expr(),
&parsed_expr.source_info()));
CEL_ASSIGN_OR_RETURN(CelValue result,
expression_plan->Evaluate(activation, &arena));
return ConvertResult(result);
}
} | #include "codelab/exercise1.h"
#include "internal/testing.h"
namespace google::api::expr::codelab {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
TEST(Exercise1, PrintHelloWorld) {
EXPECT_THAT(ParseAndEvaluate("'Hello, World!'"),
IsOkAndHolds("Hello, World!"));
}
TEST(Exercise1, WrongTypeResultError) {
EXPECT_THAT(ParseAndEvaluate("true"),
StatusIs(absl::StatusCode::kInvalidArgument,
"expected string result got 'bool'"));
}
TEST(Exercise1, Conditional) {
EXPECT_THAT(ParseAndEvaluate("(1 < 0)? 'Hello, World!' : '¡Hola, Mundo!'"),
IsOkAndHolds("¡Hola, Mundo!"));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/codelab/solutions/exercise1.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/codelab/exercise1_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
08070099-b020-4907-8983-05a3e1f40a7b | cpp | tensorflow/tensorflow | stablehlo_reduce_window | tensorflow/lite/kernels/stablehlo_reduce_window.cc | tensorflow/lite/kernels/stablehlo_reduce_window_test.cc | #include <algorithm>
#include <array>
#include <cassert>
#include <cstdint>
#include <cstring>
#include <functional>
#include <limits>
#include <memory>
#include <type_traits>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/builtin_op_data.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace {
constexpr int32_t kMaxReduceWindowRank = 6;
void StridedCopy(const int rank, const char* input, const int64_t* input_shape,
const int64_t* input_strides, char* output,
const int64_t* output_strides, const int64_t element_size,
const int depth) {
if (depth + 1 == rank) {
for (int64_t i = 0; i < input_shape[depth]; ++i) {
std::memcpy(output, input, element_size);
input += input_strides[depth];
output += output_strides[depth];
}
} else {
for (int64_t i = 0; i < input_shape[depth]; ++i) {
StridedCopy(rank, input, input_shape, input_strides, output,
output_strides, element_size, depth + 1);
input += input_strides[depth];
output += output_strides[depth];
}
}
}
}
namespace dilate {
namespace {
const int64_t kTFLiteDefaultBaseDilation[kMaxReduceWindowRank] = {1, 1, 1,
1, 1, 1};
struct DilateData {
DilateData() = default;
DilateData(const int rank, const int64_t* input_shape,
const int64_t* dilation, const int64_t element_size)
: rank(rank), init_element_size(element_size) {
std::copy_n(input_shape, rank, shape);
std::copy_n(dilation, rank, base_dilations);
ComputeOutputShapeAndSize(element_size);
skip = std::all_of(dilation, dilation + rank,
[](int64_t d) { return d == 1; });
if (skip) {
return;
}
MergeTrailingDilations(element_size);
ComputeInputStrides();
ComputeOutputStridesAndSizes();
}
void MergeTrailingDilations(int64_t element_size) {
for (int i = rank - 2; i >= 0; --i) {
if (base_dilations[i + 1] == 1) {
element_size *= shape[i + 1];
--rank;
} else {
break;
}
}
if (rank == 1 && base_dilations[0] == 1) {
element_size *= shape[0];
shape[0] = 1;
}
input_strides[rank - 1] = element_size;
}
void ComputeInputStrides() {
assert(input_strides[rank - 1] != 0);
for (int i = rank - 2; i >= 0; --i) {
input_strides[i] = shape[i + 1] * input_strides[i + 1];
}
}
void ComputeOutputStridesAndSizes() {
output_dimension_sizes[rank - 1] = input_strides[rank - 1];
output_strides[rank - 1] =
base_dilations[rank - 1] * output_dimension_sizes[rank - 1];
for (int i = rank - 2; i >= 0; --i) {
output_dimension_sizes[i] = ((shape[i + 1] - 1) * output_strides[i + 1] +
output_dimension_sizes[i + 1]);
output_strides[i] = base_dilations[i] * output_dimension_sizes[i];
}
}
void ComputeOutputShapeAndSize(const int64_t element_size) {
output_size = element_size;
for (int i = 0; i < rank; ++i) {
output_shape[i] = (shape[i] - 1) * base_dilations[i] + 1;
output_size *= output_shape[i];
}
}
int64_t ElementSize() const { return input_strides[rank - 1]; }
bool skip = true;
int rank = 0;
int64_t init_element_size = 0;
int64_t shape[kMaxReduceWindowRank] = {};
int64_t base_dilations[kMaxReduceWindowRank] = {};
int64_t output_strides[kMaxReduceWindowRank] = {};
int64_t output_dimension_sizes[kMaxReduceWindowRank] = {};
int64_t input_strides[kMaxReduceWindowRank] = {};
int64_t output_shape[kMaxReduceWindowRank] = {};
int64_t output_size = 1;
};
void Dilate(const DilateData& ctx, const char* input, const char* init_value,
char* output) {
assert(!ctx.skip);
{
std::memcpy(output, init_value, ctx.init_element_size);
int64_t remaining_bytes = ctx.output_size - ctx.init_element_size;
int64_t copied_bytes = ctx.init_element_size;
while (remaining_bytes) {
int64_t bytes = std::min(remaining_bytes, copied_bytes);
std::memcpy(output + copied_bytes, output, bytes);
remaining_bytes -= bytes;
copied_bytes += bytes;
}
}
StridedCopy(ctx.rank, input, ctx.shape, ctx.input_strides, output,
ctx.output_strides, ctx.ElementSize(), 0);
}
}
}
namespace pad {
namespace {
const int64_t kTFLiteDefaultPadding[kMaxReduceWindowRank] = {0, 0, 0, 0, 0, 0};
struct PadCropData {
PadCropData() = default;
PadCropData(int rank, const int64_t* dims, const int64_t* padding,
const int64_t element_size)
: rank(rank), element_size(element_size) {
assert(rank > 0);
assert(rank < kMaxReduceWindowRank);
output_size = element_size;
for (int i = 0; i < rank; ++i) {
output_shape[i] = dims[i] + padding[2 * i] + padding[2 * i + 1];
output_size *= output_shape[i];
}
skip = std::all_of(padding, padding + 2 * rank,
[](int64_t v) { return v == 0; });
if (skip) {
return;
}
output_strides[rank - 1] = element_size;
input_strides[rank - 1] = element_size;
for (int i = rank - 2; i >= 0; --i) {
output_strides[i] = output_shape[i + 1] * output_strides[i + 1];
input_strides[i] = dims[i + 1] * input_strides[i + 1];
}
for (int i = 0; i < rank; ++i) {
input_offset += std::max<int64_t>(-padding[2 * i], 0) * input_strides[i];
output_offset += std::max<int64_t>(padding[2 * i], 0) * output_strides[i];
cropped_input_shape[i] = dims[i] + std::min<int64_t>(padding[2 * i], 0) +
std::min<int64_t>(padding[2 * i + 1], 0);
}
}
bool skip = true;
int rank = 0;
int64_t element_size = 0;
int64_t cropped_input_shape[kMaxReduceWindowRank];
int64_t input_strides[kMaxReduceWindowRank];
int64_t output_shape[kMaxReduceWindowRank];
int64_t output_strides[kMaxReduceWindowRank];
int64_t input_offset = 0;
int64_t output_offset = 0;
int64_t output_size = 0;
};
void PadCrop(const PadCropData& ctx, const char* input, const char* init_value,
char* output) {
assert(!ctx.skip);
{
std::memcpy(output, init_value, ctx.element_size);
int64_t remaining_bytes = ctx.output_size - ctx.element_size;
int64_t copied_bytes = ctx.element_size;
while (remaining_bytes) {
int64_t bytes = std::min(remaining_bytes, copied_bytes);
std::memcpy(output + copied_bytes, output, bytes);
remaining_bytes -= bytes;
copied_bytes += bytes;
}
}
StridedCopy(ctx.rank, input + ctx.input_offset, ctx.cropped_input_shape,
ctx.input_strides, output + ctx.output_offset, ctx.output_strides,
ctx.element_size, 0);
}
}
}
namespace reduce_window {
namespace {
template <class Op, class Type>
void StridedReduce(const Type* input, const int64_t* const shape,
const int64_t* const strides, Type& accu, const int rank,
const int depth) {
const int64_t stride = strides[depth];
const int64_t size = shape[depth];
if (depth + 1 == rank) {
const Op op;
for (int64_t i = 0; i < size; ++i) {
accu = op(accu, *input);
input += stride;
}
} else {
for (int64_t i = 0; i < size; ++i) {
StridedReduce<Op, Type>(input, shape, strides, accu, rank, depth + 1);
input += stride;
}
}
}
template <class Op, class Type>
void ReduceWindowImpl(const Type* input, Type* output,
const int64_t* const output_shape,
const int64_t* const output_strides,
const int64_t* const window_offset_strides,
const int64_t* const window_shape,
const int64_t* const window_reduce_strides,
const Type init, const int rank, const int depth) {
if (depth + 1 == rank) {
for (int32_t dim = 0; dim < output_shape[depth]; ++dim) {
*output = init;
StridedReduce<Op, Type>(input, window_shape, window_reduce_strides,
*output, rank, 0);
input += window_offset_strides[depth];
output += output_strides[depth];
}
} else {
for (int32_t dim = 0; dim < output_shape[depth]; ++dim) {
ReduceWindowImpl<Op, Type>(input, output, output_shape, output_strides,
window_offset_strides, window_shape,
window_reduce_strides, init, rank, depth + 1);
input += window_offset_strides[depth];
output += output_strides[depth];
}
}
}
struct ReduceWindowData {
ReduceWindowData() = default;
ReduceWindowData(const int rank, const int64_t* input_shape,
const int64_t* window_shape, const int64_t* window_strides,
const int64_t* window_dilations)
: rank(rank),
input_shape(input_shape),
window_shape(window_shape),
window_dilations(window_dilations),
window_strides(window_strides) {
ComputeStrides(input_strides, input_shape);
Multiply(window_reduce_strides, input_strides, window_dilations);
Multiply(window_offset_strides, input_strides, window_strides);
ComputeOutputShape();
ComputeStrides(output_strides, output_shape);
}
void ComputeStrides(int64_t* strides, const int64_t* const shape) {
strides[rank - 1] = 1;
for (int64_t i = rank - 2; i >= 0; --i) {
strides[i] = shape[i + 1] * strides[i + 1];
}
}
void Multiply(int64_t* dst, const int64_t* const vec1,
const int64_t* const vec2) {
for (int64_t i = 0; i < rank; ++i) {
dst[i] = vec2[i] * vec1[i];
}
}
void ComputeOutputShape() {
int64_t dilated_window_shape[kMaxReduceWindowRank];
for (int64_t i = 0; i < rank; ++i) {
dilated_window_shape[i] = (window_shape[i] - 1) * window_dilations[i] + 1;
}
for (int64_t i = 0; i < rank; ++i) {
if (input_shape[i] < dilated_window_shape[i]) {
output_shape[i] = 0;
} else {
output_shape[i] =
(input_shape[i] - dilated_window_shape[i]) / window_strides[i] + 1;
}
}
}
int rank = 0;
const int64_t* input_shape;
const int64_t* window_shape;
const int64_t* window_dilations;
const int64_t* window_strides;
int64_t input_strides[kMaxReduceWindowRank] = {};
int64_t window_offset_strides[kMaxReduceWindowRank] = {};
int64_t window_reduce_strides[kMaxReduceWindowRank] = {};
int64_t output_shape[kMaxReduceWindowRank] = {};
int64_t output_strides[kMaxReduceWindowRank] = {};
};
template <class Op, class Type>
void ReduceWindow(const ReduceWindowData& ctx, const Type* const input,
const Type init, Type* output) {
ReduceWindowImpl<Op, Type>(input, output, ctx.output_shape,
ctx.output_strides, ctx.window_offset_strides,
ctx.window_shape, ctx.window_reduce_strides, init,
ctx.rank, 0);
}
}
}
namespace reduce_window_op {
namespace {
struct NodeData {
enum { kDilateOutput, kPadOutput, kTempTensorCount };
int temporary_tensor_offset = -1;
pad::PadCropData pad_ctx;
dilate::DilateData dilate_ctx;
reduce_window::ReduceWindowData reduce_window_ctx;
TfLiteReduceWindowFunction body;
};
struct OpData {
OpData(TfLiteContext* context, TfLiteNode* node)
: context(context), node(node) {}
TfLiteContext* context;
TfLiteNode* node;
TfLiteType type;
int rank;
int64_t element_size;
int64_t input_dims[kMaxReduceWindowRank];
const char* input;
const char* init_value;
const int64_t* window_dimensions;
const int64_t* window_strides;
const int64_t* base_dilations;
const int64_t* window_dilations;
const int64_t* padding;
char* dilate_output = nullptr;
char* pad_output = nullptr;
char* output;
TfLiteStatus ResizeTensor(TfLiteTensor* const tensor,
const int64_t* const shape) {
auto dims = BuildTfLiteArray<int32_t>(rank, shape);
return context->ResizeTensor(context, tensor, dims.release());
}
TfLiteStatus SetElementType(TfLiteType t) {
type = t;
size_t unsigned_element_size;
TF_LITE_ENSURE_OK(context,
GetSizeOfType(context, type, &unsigned_element_size));
TF_LITE_ENSURE_MSG(
context,
sizeof(unsigned_element_size) < sizeof(int64_t) ||
unsigned_element_size <= std::numeric_limits<int64_t>::max(),
"The element size cannot be contained in an int64_t value.");
element_size = unsigned_element_size;
return kTfLiteOk;
}
template <class Semantic>
TfLiteStatus InitializeBase() {
init_value = reinterpret_cast<const char*>(
GetInput(context, node, Semantic::kInitValue)->data.data);
const TfLiteTensor* const input_tensor =
GetInput(context, node, Semantic::kInput);
SetElementType(input_tensor->type);
rank = input_tensor->dims->size;
std::copy_n(input_tensor->dims->data, rank, input_dims);
input = reinterpret_cast<const char*>(input_tensor->data.data);
TfLiteTensor* const output_tensor =
GetOutput(context, node, Semantic::kOutput);
output = reinterpret_cast<char*>(output_tensor->data.data);
return kTfLiteOk;
}
};
struct StablehloData : public OpData {
enum InputTensorId { kInput, kInitValue, kNumInputTensors };
enum OutputTensorId { kOutput, kNumOutputTensors };
using OpData::OpData;
TfLiteTensor* GetTemporary(int id) {
return tflite::GetTemporary(context, node, id);
}
TfLiteStatus Check() const {
TF_LITE_ENSURE_EQ(context, NumInputs(node), kNumInputTensors);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), kNumOutputTensors);
const TfLiteTensor* const input_tensor = GetInput(context, node, kInput);
const TfLiteTensor* const output_tensor = GetOutput(context, node, kOutput);
const TfLiteTensor* const init_value_tensor =
GetInput(context, node, kInitValue);
TF_LITE_ENSURE_EQ(context, input_tensor->type, output_tensor->type);
TF_LITE_ENSURE_EQ(context, input_tensor->type, init_value_tensor->type);
TF_LITE_ENSURE(context, input_tensor->dims != nullptr);
TF_LITE_ENSURE(context, input_tensor->dims->size > 0);
TF_LITE_ENSURE(context, input_tensor->dims->size <= kMaxReduceWindowRank);
return kTfLiteOk;
}
TfLiteStatus Initialize() {
TF_LITE_ENSURE_OK(context, InitializeBase<StablehloData>());
const auto& params = *reinterpret_cast<TfLiteStablehloReduceWindowParams*>(
node->builtin_data);
window_dimensions = params.window_dimensions;
window_strides = params.window_strides;
base_dilations = params.base_dilations;
window_dilations = params.window_dilations;
padding = params.padding;
auto AllGtThanZero = [&](const int64_t* const attr) {
return std::all_of(attr, attr + rank, [](int64_t d) { return d > 0; });
};
TF_LITE_ENSURE(context, AllGtThanZero(base_dilations));
TF_LITE_ENSURE(context, AllGtThanZero(window_dimensions));
TF_LITE_ENSURE(context, AllGtThanZero(window_strides));
TF_LITE_ENSURE(context, AllGtThanZero(window_dilations));
if (node->temporaries &&
node->temporaries->size >= NodeData::kTempTensorCount) {
TfLiteTensor* const dilated_tensor =
GetTemporary(NodeData::kDilateOutput);
TfLiteTensor* const padded_tensor = GetTemporary(NodeData::kPadOutput);
TF_LITE_ENSURE(context, dilated_tensor != nullptr);
TF_LITE_ENSURE(context, padded_tensor != nullptr);
dilate_output = dilated_tensor->data.raw;
pad_output = padded_tensor->data.raw;
}
return kTfLiteOk;
}
TfLiteStatus Setup() {
NodeData& node_data = *reinterpret_cast<NodeData*>(node->user_data);
TfLiteIntArrayFree(node->temporaries);
node->temporaries = TfLiteIntArrayCreate(NodeData::kTempTensorCount);
for (int i = 0; i < NodeData::kTempTensorCount; ++i) {
node->temporaries->data[i] = node_data.temporary_tensor_offset + i;
}
node_data.body = GetBodyFunction();
node_data.dilate_ctx =
dilate::DilateData(rank, input_dims, base_dilations, element_size);
node_data.pad_ctx = pad::PadCropData(
rank, node_data.dilate_ctx.output_shape, padding, element_size);
node_data.reduce_window_ctx = reduce_window::ReduceWindowData(
rank, node_data.pad_ctx.output_shape, window_dimensions, window_strides,
window_dilations);
TfLiteTensor* const dilated_tensor = GetTemporary(NodeData::kDilateOutput);
TfLiteTensor* const padded_tensor = GetTemporary(NodeData::kPadOutput);
TfLiteTensor* const output_tensor = GetOutput(context, node, kOutput);
dilated_tensor->type = type;
dilated_tensor->allocation_type = kTfLiteArenaRw;
padded_tensor->type = type;
padded_tensor->allocation_type = kTfLiteArenaRw;
TF_LITE_ENSURE_OK(context, ResizeTensor(dilated_tensor,
node_data.dilate_ctx.output_shape));
TF_LITE_ENSURE_OK(
context, ResizeTensor(padded_tensor, node_data.pad_ctx.output_shape));
TF_LITE_ENSURE_OK(
context,
ResizeTensor(output_tensor, node_data.reduce_window_ctx.output_shape));
return kTfLiteOk;
}
TfLiteReduceWindowFunction GetBodyFunction() {
const TfLiteStablehloReduceWindowParams& params =
*reinterpret_cast<TfLiteStablehloReduceWindowParams*>(
node->builtin_data);
const int body_subgraph_index = params.body_subgraph_index;
const Subgraph& parent_subgraph =
*reinterpret_cast<Subgraph*>(context->impl_);
const std::vector<std::unique_ptr<Subgraph>>& subgraphs =
*parent_subgraph.GetSubgraphs();
if (body_subgraph_index >= subgraphs.size()) {
TF_LITE_KERNEL_LOG(
context, "Body subgraph not found for stablehlo.reduce_window: %d.",
body_subgraph_index);
return TfLiteReduceWindowFunctionUnsupported;
}
const Subgraph& body_subgraph = *subgraphs[body_subgraph_index];
const std::vector<int>& execution_plan =
body_subgraph.pre_delegation_execution_plan().empty()
? body_subgraph.execution_plan()
: body_subgraph.pre_delegation_execution_plan();
if (execution_plan.size() != 1) {
TF_LITE_KERNEL_LOG(context,
"Only one kernel is allowed within "
"stablehlo.reduce_window body. (%zu) kernels found.\n",
execution_plan.size());
return TfLiteReduceWindowFunctionUnsupported;
}
const int body_kernel_index = execution_plan[0];
const TfLiteRegistration& body_kernel_registration =
body_subgraph.node_and_registration(body_kernel_index)->second;
switch (body_kernel_registration.builtin_code) {
case kTfLiteBuiltinAdd:
case kTfLiteBuiltinStablehloAdd:
return TfLiteReduceWindowFunctionAdd;
case kTfLiteBuiltinMul:
case kTfLiteBuiltinStablehloMultiply:
return TfLiteReduceWindowFunctionMul;
case kTfLiteBuiltinMaximum:
case kTfLiteBuiltinStablehloMaximum:
return TfLiteReduceWindowFunctionMax;
case kTfLiteBuiltinMinimum:
case kTfLiteBuiltinStablehloMinimum:
return TfLiteReduceWindowFunctionMin;
case kTfLiteBuiltinLogicalAnd:
case kTfLiteBuiltinStablehloAnd:
return TfLiteReduceWindowFunctionAll;
case kTfLiteBuiltinLogicalOr:
case kTfLiteBuiltinStablehloOr:
return TfLiteReduceWindowFunctionAny;
default:
TF_LITE_KERNEL_LOG(
context, "%s:%d unsupported reduction body builtin code: %d.\n",
__FILE__, __LINE__, body_kernel_registration.builtin_code);
return TfLiteReduceWindowFunctionUnsupported;
}
}
};
struct TFLiteData : public OpData {
enum InputTensorId {
kInput,
kInitValue,
kWindowShape,
kWindowStrides,
kWindowDilations,
kNumInputTensors
};
enum OutputTensorId { kOutput, kNumOutputTensors };
using OpData::OpData;
TfLiteStatus Check() const {
TF_LITE_ENSURE_EQ(context, NumInputs(node), kNumInputTensors);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), kNumOutputTensors);
const TfLiteTensor* const input_tensor = GetInput(context, node, kInput);
const TfLiteTensor* const init_value_tensor =
GetInput(context, node, kInitValue);
const TfLiteTensor* const window_dimensions_tensor =
GetInput(context, node, kWindowShape);
const TfLiteTensor* const window_strides_tensor =
GetInput(context, node, kWindowStrides);
const TfLiteTensor* const window_dilations_tensor =
GetInput(context, node, kWindowDilations);
const TfLiteTensor* const output_tensor = GetOutput(context, node, kOutput);
TF_LITE_ENSURE(context, IsConstantTensor(window_dimensions_tensor));
TF_LITE_ENSURE(context, IsConstantTensor(window_strides_tensor));
TF_LITE_ENSURE(context, IsConstantTensor(window_dilations_tensor));
TF_LITE_ENSURE_EQ(context, input_tensor->type, output_tensor->type);
TF_LITE_ENSURE_EQ(context, input_tensor->type, init_value_tensor->type);
TF_LITE_ENSURE_EQ(context, window_dimensions_tensor->type, kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, window_strides_tensor->type, kTfLiteInt64);
TF_LITE_ENSURE_EQ(context, window_dilations_tensor->type, kTfLiteInt64);
TF_LITE_ENSURE(context, input_tensor->dims != nullptr);
TF_LITE_ENSURE(context, input_tensor->dims->size > 0);
TF_LITE_ENSURE(context, input_tensor->dims->size <= kMaxReduceWindowRank);
return kTfLiteOk;
}
TfLiteStatus Initialize() {
TF_LITE_ENSURE_OK(context, InitializeBase<TFLiteData>());
window_dimensions = reinterpret_cast<const int64_t*>(
GetInput(context, node, kWindowShape)->data.data);
window_strides = reinterpret_cast<const int64_t*>(
GetInput(context, node, kWindowStrides)->data.data);
base_dilations = dilate::kTFLiteDefaultBaseDilation;
window_dilations = reinterpret_cast<const int64_t*>(
GetInput(context, node, kWindowDilations)->data.data);
padding = pad::kTFLiteDefaultPadding;
return kTfLiteOk;
}
TfLiteStatus Setup() {
NodeData& node_data = *reinterpret_cast<NodeData*>(node->user_data);
const auto& params =
*reinterpret_cast<TfLiteReduceWindowParams*>(node->builtin_data);
node_data.body = params.reduce_function;
node_data.dilate_ctx.skip = true;
node_data.pad_ctx.skip = true;
node_data.reduce_window_ctx = reduce_window::ReduceWindowData(
rank, input_dims, window_dimensions, window_strides, window_dilations);
TfLiteTensor* const output_tensor = GetOutput(context, node, kOutput);
return context->ResizeTensor(
context, output_tensor,
BuildTfLiteArray<int32_t>(rank,
node_data.reduce_window_ctx.output_shape)
.release());
}
};
template <class Op, class Type>
void PadCropReduceWindow(const OpData& op_ctx) {
NodeData& node_data = *reinterpret_cast<NodeData*>(op_ctx.node->user_data);
const char* input = op_ctx.input;
const int64_t* input_shape = op_ctx.input_dims;
if (!node_data.dilate_ctx.skip) {
dilate::Dilate(node_data.dilate_ctx, input, op_ctx.init_value,
op_ctx.dilate_output);
input = op_ctx.dilate_output;
input_shape = node_data.dilate_ctx.output_shape;
}
if (!node_data.pad_ctx.skip) {
pad::PadCrop(node_data.pad_ctx, input, op_ctx.init_value,
op_ctx.pad_output);
input = op_ctx.pad_output;
input_shape = node_data.pad_ctx.output_shape;
}
reduce_window::ReduceWindow<Op, Type>(
node_data.reduce_window_ctx, reinterpret_cast<const Type*>(input),
*reinterpret_cast<const Type*>(op_ctx.init_value),
reinterpret_cast<Type*>(op_ctx.output));
}
template <class Op>
TfLiteStatus DispatchReduceWindowType(OpData& ctx) {
#define REDUCE_WINDOW_TYPE_CASE(CPP_TYPE, TENSOR_TYPE) \
case TENSOR_TYPE: \
PadCropReduceWindow<Op, CPP_TYPE>(ctx); \
break;
switch (ctx.type) {
REDUCE_WINDOW_TYPE_CASE(int8_t, kTfLiteBool);
REDUCE_WINDOW_TYPE_CASE(int8_t, kTfLiteInt8);
REDUCE_WINDOW_TYPE_CASE(int16_t, kTfLiteInt16);
REDUCE_WINDOW_TYPE_CASE(int32_t, kTfLiteInt32);
REDUCE_WINDOW_TYPE_CASE(int64_t, kTfLiteInt64);
REDUCE_WINDOW_TYPE_CASE(uint8_t, kTfLiteUInt8);
REDUCE_WINDOW_TYPE_CASE(float, kTfLiteFloat32);
REDUCE_WINDOW_TYPE_CASE(double, kTfLiteFloat64);
default:
TF_LITE_KERNEL_LOG(
ctx.context,
"%s:%d unsupported kernel data type (TfliteType: %d a.k.a %s).",
__FILE__, __LINE__, ctx.type, TfLiteTypeGetName(ctx.type));
return kTfLiteError;
}
#undef REDUCE_WINDOW_TYPE_CASE
return kTfLiteOk;
}
struct Max {
template <class T>
constexpr T operator()(const T& a, const T& b) const {
return a >= b ? a : b;
}
};
struct Min {
template <class T>
constexpr T operator()(const T& a, const T& b) const {
return a <= b ? a : b;
}
};
TfLiteStatus DispatchReduceWindowBody(OpData& ctx) {
const NodeData& node_data = *static_cast<NodeData*>(ctx.node->user_data);
switch (node_data.body) {
case TfLiteReduceWindowFunctionUnsupported:
TF_LITE_KERNEL_LOG(ctx.context, "%s:%d unsupported reduction body.\n",
__FILE__, __LINE__);
return kTfLiteError;
case TfLiteReduceWindowFunctionAdd:
return DispatchReduceWindowType<std::plus<>>(ctx);
case TfLiteReduceWindowFunctionMul:
return DispatchReduceWindowType<std::multiplies<>>(ctx);
case TfLiteReduceWindowFunctionAll:
return DispatchReduceWindowType<std::logical_and<>>(ctx);
case TfLiteReduceWindowFunctionAny:
return DispatchReduceWindowType<std::logical_or<>>(ctx);
case TfLiteReduceWindowFunctionMin:
return DispatchReduceWindowType<Min>(ctx);
case TfLiteReduceWindowFunctionMax:
return DispatchReduceWindowType<Max>(ctx);
}
TF_LITE_KERNEL_LOG(ctx.context, "%s:%d unhandled reduction body case.\n",
__FILE__, __LINE__);
return kTfLiteError;
}
void* StablehloInit(TfLiteContext* context, const char* options,
size_t options_len) {
NodeData* node_data = new NodeData();
context->AddTensors(context, NodeData::kTempTensorCount,
&node_data->temporary_tensor_offset);
return node_data;
}
void* TFLiteInit(TfLiteContext* context, const char* options,
size_t options_len) {
return new NodeData();
}
void Free(TfLiteContext* context, void* node_data) {
delete static_cast<NodeData*>(node_data);
}
template <class Semantic>
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
Semantic ctx(context, node);
TF_LITE_ENSURE_OK(context, ctx.Check());
TF_LITE_ENSURE_OK(context, ctx.Initialize());
return ctx.Setup();
}
template <class Semantic>
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
Semantic ctx(context, node);
TF_LITE_ENSURE_OK(context, ctx.Initialize());
NodeData& node_data = *reinterpret_cast<NodeData*>(node->user_data);
TF_LITE_ENSURE_MSG(
context, node_data.pad_ctx.skip || node_data.pad_ctx.output_size > 0,
"The padding specification of stablehlo.reduce_window gives an empty "
"tensor.");
return DispatchReduceWindowBody(ctx);
}
}
}
TfLiteRegistration* Register_STABLEHLO_REDUCE_WINDOW() {
static TfLiteRegistration r = {
reduce_window_op::StablehloInit,
reduce_window_op::Free,
reduce_window_op::Prepare<reduce_window_op::StablehloData>,
reduce_window_op::Eval<reduce_window_op::StablehloData>};
return &r;
}
TfLiteRegistration* Register_REDUCE_WINDOW() {
static TfLiteRegistration r = {
reduce_window_op::TFLiteInit,
reduce_window_op::Free,
reduce_window_op::Prepare<reduce_window_op::TFLiteData>,
reduce_window_op::Eval<reduce_window_op::TFLiteData>};
return &r;
}
}
}
} | #include <cstddef>
#include <cstdint>
#include <functional>
#include <initializer_list>
#include <limits>
#include <ostream>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/absl_log.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/distributions.h"
#include "absl/random/random.h"
#include "absl/types/span.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/stablehlo_reduce_window_test_util.h"
#include "tensorflow/lite/kernels/subgraph_test_util.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace reduce_window {
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
#define REDUCE_WINDOW_ENSURE_OK(expr) \
do { \
if (TfLiteStatus status = (expr); status != kTfLiteOk) { \
ABSL_LOG(ERROR) << #expr " failed.\n"; \
return status; \
} \
} while (false)
#define REDUCE_WINDOW_ENSURE_IMPL(expr, msg) \
do { \
if (!(expr)) { \
ABSL_LOG(ERROR) << #msg " failed.\n"; \
return kTfLiteError; \
} \
} while (false)
#define REDUCE_WINDOW_ENSURE(expr) REDUCE_WINDOW_ENSURE_IMPL((expr), #expr)
#define REDUCE_WINDOW_ENSURE_EQ(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) == (b), #a " == " #b)
#define REDUCE_WINDOW_ENSURE_NE(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) != (b), #a " != " #b)
#define REDUCE_WINDOW_ENSURE_GE(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) >= (b), #a " >= " #b)
#define REDUCE_WINDOW_ENSURE_LE(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) <= (b), #a " <= " #b)
#define REDUCE_WINDOW_ENSURE_GT(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) > (b), #a " > " #b)
#define REDUCE_WINDOW_ENSURE_LT(a, b) \
REDUCE_WINDOW_ENSURE_IMPL((a) < (b), #a " < " #b)
#define REDUCE_WINDOW_ENSURE_UNREACHABLE(msg) \
REDUCE_WINDOW_ENSURE_IMPL(false, msg)
template <class T>
struct TensorTypeFor;
#define TENSOR_TYPE_ASSOC(CPP_TYPE, TENSORTYPE_VALUE) \
template <> \
struct TensorTypeFor<CPP_TYPE> { \
static constexpr TensorType value = TENSORTYPE_VALUE; \
};
TENSOR_TYPE_ASSOC(int8_t, TensorType_INT8);
TENSOR_TYPE_ASSOC(int16_t, TensorType_INT16);
TENSOR_TYPE_ASSOC(int32_t, TensorType_INT32);
TENSOR_TYPE_ASSOC(int64_t, TensorType_INT64);
TENSOR_TYPE_ASSOC(uint8_t, TensorType_UINT8);
TENSOR_TYPE_ASSOC(uint16_t, TensorType_UINT16);
TENSOR_TYPE_ASSOC(uint32_t, TensorType_UINT32);
TENSOR_TYPE_ASSOC(uint64_t, TensorType_UINT64);
TENSOR_TYPE_ASSOC(float, TensorType_FLOAT32);
static_assert(sizeof(float) == 4, "float type is expected to be 32 bit long");
TENSOR_TYPE_ASSOC(double, TensorType_FLOAT64);
static_assert(sizeof(double) == 8, "double type is expected to be 64 bit long");
enum class BodyFunction {
kUnset,
kUnsupported,
kAdd,
kMul,
kMax,
kMin,
kAll,
kAny
};
std::ostream& operator<<(std::ostream& os, const BodyFunction& f) {
switch (f) {
case BodyFunction::kUnset:
return os << "unset";
case BodyFunction::kUnsupported:
return os << "unsupported";
case BodyFunction::kAdd:
return os << "add";
case BodyFunction::kMul:
return os << "mul";
case BodyFunction::kMax:
return os << "max";
case BodyFunction::kMin:
return os << "min";
case BodyFunction::kAll:
return os << "all";
case BodyFunction::kAny:
return os << "any";
}
return os;
}
template <class T>
class ReduceWindowOpModel : public SingleOpModel {
static constexpr TensorType kTensorType = TensorTypeFor<T>::value;
public:
void SetInput(absl::Span<const int64_t> shape) {
input_shape_.assign(shape.begin(), shape.end());
input_data_.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_iota(input_data_, 1);
}
void SetInput(absl::Span<const int64_t> shape, absl::Span<const T> data) {
input_shape_.assign(shape.begin(), shape.end());
input_data_.assign(data.begin(), data.end());
}
void SetInput(absl::Span<const int64_t> shape, absl::BitGenRef bitgen, T min,
T max) {
input_shape_.assign(shape.begin(), shape.end());
input_data_.resize(absl::c_accumulate(shape, 1, std::multiplies<>()));
absl::c_generate(input_data_, [&] {
return absl::Uniform(absl::IntervalClosed, bitgen, min, max);
});
}
void SetWindowDimensions(absl::Span<const int64_t> dimensions) {
window_dimensions_.assign(dimensions.begin(), dimensions.end());
}
void SetWindowStrides(absl::Span<const int64_t> strides) {
window_strides_.assign(strides.begin(), strides.end());
}
void SetBaseDilations(absl::Span<const int64_t> dilations) {
base_dilations_.assign(dilations.begin(), dilations.end());
}
void SetWindowDilations(absl::Span<const int64_t> dilations) {
window_dilations_.assign(dilations.begin(), dilations.end());
}
void SetPadding(absl::Span<const int64_t> padding) {
padding_.assign(padding.begin(), padding.end());
}
void SetInitValue(const T& val) { init_value_ = val; }
void SetBody(const BodyFunction func) { body_function_ = func; }
TfLiteStatus Build() {
constexpr int kBodySubGraphIndex = 1;
REDUCE_WINDOW_ENSURE(!input_shape_.empty());
REDUCE_WINDOW_ENSURE_EQ(window_dimensions_.size(), input_shape_.size());
REDUCE_WINDOW_ENSURE_EQ(window_strides_.size(), input_shape_.size());
REDUCE_WINDOW_ENSURE_EQ(base_dilations_.size(), input_shape_.size());
REDUCE_WINDOW_ENSURE_EQ(window_dilations_.size(), input_shape_.size());
REDUCE_WINDOW_ENSURE_EQ(padding_.size(), 2 * input_shape_.size());
REDUCE_WINDOW_ENSURE_NE(body_function_, BodyFunction::kUnset);
REDUCE_WINDOW_ENSURE_NE(body_function_, BodyFunction::kUnsupported);
input_tensor_id_ =
AddInput({kTensorType,
std::vector<int>(input_shape_.begin(), input_shape_.end())});
init_value_tensor_id_ = AddConstInput(kTensorType, {init_value_}, {1});
output_tensor_id_ = AddOutput(kTensorType);
SetBuiltinOp(BuiltinOperator_STABLEHLO_REDUCE_WINDOW,
BuiltinOptions2_StablehloReduceWindowOptions,
CreateStablehloReduceWindowOptions(
builder_, builder_.CreateVector(window_dimensions_),
builder_.CreateVector(window_strides_),
builder_.CreateVector(base_dilations_),
builder_.CreateVector(window_dilations_),
builder_.CreateVector(padding_), kBodySubGraphIndex)
.Union());
BuildInterpreter(
{std::vector<int>(input_shape_.begin(),
input_shape_.end())},
-1, false,
true, false,
false);
int body_subgraph_index;
AddSubgraphs(1, &body_subgraph_index);
REDUCE_WINDOW_ENSURE_EQ(body_subgraph_index, kBodySubGraphIndex);
switch (body_function_) {
case BodyFunction::kAdd:
subgraph_builder_.BuildAddSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kMul:
subgraph_builder_.BuildMulSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kMax:
subgraph_builder_.BuildMaximumSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kMin:
subgraph_builder_.BuildMinimumSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kAll:
subgraph_builder_.BuildLogicalAndSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
case BodyFunction::kAny:
subgraph_builder_.BuildLogicalOrSubgraph(
interpreter_->subgraph(body_subgraph_index));
break;
default:
REDUCE_WINDOW_ENSURE_UNREACHABLE("Unhandled body function enum value.");
}
AllocateAndDelegate(true);
PopulateTensor(input_tensor_id_, input_data_);
return kTfLiteOk;
}
TfLiteStatus BuildAndInvoke() {
REDUCE_WINDOW_ENSURE_OK(Build());
return Invoke();
}
absl::Span<const T> GetOutputData() {
return absl::Span<const T>(interpreter_->typed_tensor<T>(output_tensor_id_),
GetTensorSize(output_tensor_id_));
}
absl::Span<const int> GetOutputShape() {
const TfLiteIntArray& shape =
*(interpreter_->tensor(output_tensor_id_)->dims);
return absl::Span<const int>(shape.data, shape.size);
}
const std::vector<T>& GetInput() const { return input_data_; }
const std::vector<int64_t>& GetInputShape() const { return input_shape_; }
const std::vector<int64_t>& GetWindowDimensions() const {
return window_dimensions_;
}
const std::vector<int64_t>& GetWindowStrides() const {
return window_strides_;
}
const std::vector<int64_t>& GetBaseDilations() const {
return base_dilations_;
}
const std::vector<int64_t>& GetWindowDilations() const {
return window_dilations_;
}
const std::vector<int64_t>& GetPadding() const { return padding_; }
const T& GetInitValue() const { return init_value_; }
const BodyFunction& GetBodyFunction() const { return body_function_; }
friend std::ostream& operator<<(std::ostream& os,
const ReduceWindowOpModel& model) {
using Adapt = ReduceWindowOpModel::VectorOutputAdapter;
os << "input dimensions: {" << Adapt{model.GetInputShape()} << "}\n";
os << " base dilations: {" << Adapt{model.GetBaseDilations()} << "}\n";
os << " padding: {" << Adapt{model.GetPadding()} << "}\n";
os << " window dimensions: {" << Adapt{model.GetWindowDimensions()}
<< "}\n";
os << " window dilations: {" << Adapt{model.GetWindowDilations()} << "}\n";
os << " window strides: {" << Adapt{model.GetWindowStrides()} << "}\n";
os << " init value: " << +model.GetInitValue() << "\n";
os << " body function: " << model.GetBodyFunction() << "\n";
return os;
}
protected:
struct VectorOutputAdapter {
const std::vector<int64_t>& data;
friend std::ostream& operator<<(std::ostream& os,
const VectorOutputAdapter& vec) {
if (!vec.data.empty()) {
os << +vec.data[0];
for (size_t i = 1; i < vec.data.size(); ++i) {
os << ", " << +vec.data[i];
}
}
return os;
}
};
int input_tensor_id_ = -1;
int init_value_tensor_id_ = -1;
int output_tensor_id_ = -1;
std::vector<T> input_data_;
T init_value_;
std::vector<int64_t> input_shape_;
std::vector<int64_t> window_dimensions_;
std::vector<int64_t> window_strides_;
std::vector<int64_t> base_dilations_;
std::vector<int64_t> window_dilations_;
std::vector<int64_t> padding_;
BodyFunction body_function_{};
subgraph_test_util::SubgraphBuilder subgraph_builder_;
};
template <class StorageType>
class StablehloReduceWindowTest : public testing::Test {};
using TestList =
testing::Types<int8_t, int16_t, int32_t, int64_t, uint8_t, float, double>;
TYPED_TEST_SUITE(StablehloReduceWindowTest, TestList);
TYPED_TEST(StablehloReduceWindowTest, Identity) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 3));
EXPECT_THAT(model.GetOutputData(), ElementsAre(1, 2, 3, 4, 5, 6, 7, 8, 9));
}
TYPED_TEST(StablehloReduceWindowTest, Dilate) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({2, 2});
model.SetPadding({0, 0, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(5, 5));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({1, 0, 2, 0, 3, 0, 0, 0, 0, 0, 4, 0, 5,
0, 6, 0, 0, 0, 0, 0, 7, 0, 8, 0, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadTop) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({1, 0, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadBottom) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 1, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(4, 3));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadLeft) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 1, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 4));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({0, 1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadRight) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 1});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 4));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({1, 2, 3, 0, 4, 5, 6, 0, 7, 8, 9, 0}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityPadAll) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({1, 1, 1, 1});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(5, 5));
EXPECT_THAT(model.GetOutputData(),
ElementsAreArray({0, 0, 0, 0, 0, 0, 1, 2, 3, 0, 0, 4, 5,
6, 0, 0, 7, 8, 9, 0, 0, 0, 0, 0, 0}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropTop) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({-1, 0, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray({4, 5, 6, 7, 8, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropBottom) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, -1, 0, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 3));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray({1, 2, 3, 4, 5, 6}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropLeft) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, -1, 0});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray({2, 3, 5, 6, 8, 9}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropRight) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, -1});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(3, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAreArray({1, 2, 4, 5, 7, 8}));
}
TYPED_TEST(StablehloReduceWindowTest, IdentityCropAll) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({-1, -1, -1, -1});
model.SetWindowDimensions({1, 1});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(model.GetOutputData(), ElementsAre(5));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowFullWindow) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetWindowDimensions({3, 3});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
model.SetBody(BodyFunction::kAdd);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(model.GetOutputData(), ElementsAre(45));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowNoDilation) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAre(12, 16, 24, 28));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowFullWindowWithDilation) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({3, 3});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({2, 2});
model.SetInitValue(0);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 1));
EXPECT_THAT(model.GetOutputData(), ElementsAre(20));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowWithDilation) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({4, 4});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({1, 1});
model.SetWindowDilations({2, 2});
model.SetInitValue(0);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAre(24, 28, 40, 44));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowWithStrides) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({4, 4});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({2, 2});
model.SetWindowDilations({1, 1});
model.SetInitValue(0);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAre(14, 22, 46, 54));
}
TYPED_TEST(StablehloReduceWindowTest, ReduceWindowWithDilationAndStrides) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({5, 5});
model.SetBaseDilations({1, 1});
model.SetPadding({0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({2, 2});
model.SetWindowStrides({2, 2});
model.SetWindowDilations({2, 2});
model.SetInitValue(2);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(2, 2));
EXPECT_THAT(model.GetOutputData(), ElementsAre(30, 38, 70, 78));
}
TYPED_TEST(StablehloReduceWindowTest,
ReduceWindowOutputShapeRoundingIsCorrect) {
ReduceWindowOpModel<TypeParam> model;
model.SetInput({1, 64, 114, 114});
model.SetBaseDilations({1, 1, 1, 1});
model.SetPadding({0, 0, 0, 0, 0, 0, 0, 0});
model.SetBody(BodyFunction::kAdd);
model.SetWindowDimensions({1, 1, 3, 3});
model.SetWindowStrides({1, 1, 2, 2});
model.SetWindowDilations({1, 1, 1, 1});
model.SetInitValue(2);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAre(1, 64, 56, 56));
}
template <class T>
std::vector<T> RandomVector(absl::BitGen& bitgen, size_t size, T min, T max) {
std::vector<T> vec(size);
for (T& v : vec) {
v = absl::Uniform(absl::IntervalClosed, bitgen, min, max);
}
return vec;
}
struct Body {
static Body GetRandomSupported(absl::BitGen& bitgen, bool allow_mul) {
Body b;
b = Body{static_cast<BodyFunction>(absl::Uniform<int>(
absl::IntervalClosed, bitgen, static_cast<int>(BodyFunction::kAdd),
static_cast<int>(BodyFunction::kAny)))};
if (!allow_mul && b.func == BodyFunction::kMul) {
b.func = BodyFunction::kAdd;
}
return b;
}
template <class T>
T operator()(const T& a, const T& b) const noexcept {
switch (func) {
case BodyFunction::kUnset:
case BodyFunction::kUnsupported:
return -1;
case BodyFunction::kAdd:
return a + b;
case BodyFunction::kMul:
return a * b;
case BodyFunction::kMin:
return a <= b ? a : b;
case BodyFunction::kMax:
return a >= b ? a : b;
case BodyFunction::kAll:
return a && b;
case BodyFunction::kAny:
return a || b;
}
}
template <class T>
T init_value() const noexcept {
switch (func) {
case BodyFunction::kUnset:
case BodyFunction::kUnsupported:
return -1;
case BodyFunction::kAdd:
return 0;
case BodyFunction::kMul:
return 1;
case BodyFunction::kMin:
return std::numeric_limits<T>::max();
case BodyFunction::kMax:
return std::numeric_limits<T>::lowest();
case BodyFunction::kAll:
return true;
case BodyFunction::kAny:
return false;
}
}
BodyFunction func;
};
TYPED_TEST(StablehloReduceWindowTest, FuzzyTest) {
absl::BitGen bitgen;
for (size_t iteration = 0; iteration < 1000; ++iteration) {
const int rank = absl::Uniform(absl::IntervalClosed, bitgen, 1, 3);
ReduceWindowOpModel<TypeParam> model;
Body body = Body::GetRandomSupported(
bitgen, std::is_floating_point<TypeParam>::value);
model.SetInput(
RandomVector<int64_t>(bitgen, rank, 1, 10),
bitgen, -5, 5);
model.SetBaseDilations(
RandomVector<int64_t>(bitgen, rank, 1, 3));
model.SetPadding(
RandomVector<int64_t>(bitgen, 2 * rank, -5, 5));
model.SetWindowDimensions(
RandomVector<int64_t>(bitgen, rank, 1, 3));
model.SetWindowStrides(
RandomVector<int64_t>(bitgen, rank, 1, 3));
model.SetWindowDilations(
RandomVector<int64_t>(bitgen, rank, 1, 3));
model.SetInitValue(body.init_value<TypeParam>());
model.SetBody(body.func);
const std::vector<int64_t> padded_shape = reference::PadCropShape(
reference::DilateShape(model.GetInputShape(), model.GetBaseDilations()),
model.GetPadding());
if (absl::c_any_of(padded_shape, [](int64_t d) { return d <= 0; })) {
iteration = iteration > 1 ? iteration - 1 : 0;
continue;
}
const reference::Tensor<TypeParam> expected = reference::ReduceWindow(
reference::Tensor<TypeParam>{model.GetInputShape(),
model.GetInput()},
model.GetBaseDilations(), model.GetPadding(), model.GetInitValue(),
model.GetWindowDimensions(), model.GetWindowDilations(),
model.GetWindowStrides(), body);
ASSERT_EQ(model.BuildAndInvoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray(expected.shape))
<< model;
EXPECT_THAT(model.GetOutputData(), ElementsAreArray(expected.data))
<< model;
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_reduce_window.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/stablehlo_reduce_window_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ceb21061-0cca-4cc6-964c-1a8e710c929c | cpp | tensorflow/tensorflow | maximum | tensorflow/lite/experimental/shlo/ops/maximum.cc | tensorflow/lite/delegates/xnnpack/maximum_test.cc | #include "tensorflow/lite/experimental/shlo/ops/maximum.h"
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
struct Maximum {
template <class T>
constexpr auto operator()(const T a, const T b) {
return a > b ? a : b;
}
};
MaximumOp Create(MaximumOp::Attributes) { return {}; }
absl::Status Prepare(MaximumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("maximum"), lhs, IsBoolTensor, IsIntTensor,
IsFloatTensor, IsQuantizedPerTensorTensor));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("maximum"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(
CheckSameBaselineType(CheckCtx("maximum"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(MaximumOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
Maximum maximum;
if (IsBoolTensor(lhs) || IsIntTensor(lhs) || IsFloatTensor(lhs)) {
DISPATCH_BOOL_INT_FLOAT(detail::EvaluateNoQuantization,
lhs.tensor_element_type(), maximum, lhs, rhs,
output);
} else if (IsQuantizedPerTensorTensor(lhs)) {
DISPATCH_QUANTIZED(detail::DequantizeOpQuantizePerTensor,
lhs.quantized_per_tensor_element_type().StorageType(),
lhs.quantized_per_tensor_element_type().ExpressedType(),
maximum, lhs, rhs, output)
}
return absl::FailedPreconditionError(
"stablehlo.maximum: Unsupported tensor type.");
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace xnnpack {
TEST(Maximum, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
TEST(Maximum, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_MAXIMUM, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/maximum.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/maximum_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
189ba346-65b2-4a18-9e26-4f064de7f3b7 | cpp | google/tensorstore | dump | tensorstore/kvstore/ocdbt/format/dump.cc | tensorstore/kvstore/ocdbt/format/dump_test.cc | #include "tensorstore/kvstore/ocdbt/format/dump.h"
#include <map>
#include <string>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/escaping.h"
#include <nlohmann/json.hpp>
#include "re2/re2.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_array.h"
#include "tensorstore/internal/json_binding/std_variant.h"
#include "tensorstore/internal/path.h"
#include "tensorstore/internal/uri_utils.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/kvstore/ocdbt/config.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_ocdbt {
Result<LabeledIndirectDataReference> LabeledIndirectDataReference::Parse(
std::string_view s) {
LabeledIndirectDataReference r;
static LazyRE2 kPattern = {"([^:]+):([^:]*):([^:]*):([0-9]+):([0-9]+)"};
std::string_view label, encoded_base_path, encoded_relative_path;
if (!RE2::FullMatch(s, *kPattern, &label, &encoded_base_path,
&encoded_relative_path, &r.location.offset,
&r.location.length)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid indirect data reference: ", tensorstore::QuoteString(s)));
}
TENSORSTORE_ASSIGN_OR_RETURN(r.kind, ParseIndirectDataKind(label));
r.location.file_id.base_path = internal::PercentDecode(encoded_base_path);
r.location.file_id.relative_path =
internal::PercentDecode(encoded_relative_path);
TENSORSTORE_RETURN_IF_ERROR(r.location.Validate(false));
return r;
}
namespace {
namespace jb = tensorstore::internal_json_binding;
constexpr auto ConfigBinder = jb::Compose<ConfigConstraints>(
[](auto is_loading, const auto& options, auto* obj, auto* constraints) {
if constexpr (is_loading) {
CreateConfig(constraints, *obj);
if (ConfigConstraints(*obj) != *constraints) {
return absl::InvalidArgumentError("Config is not fully specified");
}
} else {
*constraints = ConfigConstraints(*obj);
}
return absl::OkStatus();
});
static inline constexpr internal::AsciiSet
kLabeledIndirectDataReferenceUnreservedChars{
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789"
"-_./"};
constexpr auto LabeledIndirectDataReferenceBinder =
[](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
if (auto* s = j->template get_ptr<const std::string*>()) {
TENSORSTORE_ASSIGN_OR_RETURN(*obj,
LabeledIndirectDataReference::Parse(*s));
} else {
return internal_json::ExpectedError(*j, "string");
}
} else {
if (obj->location.IsMissing()) {
*j = ::nlohmann::json::value_t::discarded;
} else {
*j = tensorstore::StrCat(
IndirectDataKindToString(obj->kind), ":",
internal::PercentEncodeReserved(
obj->location.file_id.base_path,
kLabeledIndirectDataReferenceUnreservedChars),
":",
internal::PercentEncodeReserved(
obj->location.file_id.relative_path,
kLabeledIndirectDataReferenceUnreservedChars),
":", obj->location.offset, ":", obj->location.length);
}
}
return absl::OkStatus();
};
constexpr auto IndirectDataReferenceBinder(IndirectDataKind kind) {
return jb::Compose<LabeledIndirectDataReference>(
[kind](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
*obj = j->location;
} else {
j->location = *obj;
j->kind = kind;
}
return absl::OkStatus();
},
LabeledIndirectDataReferenceBinder);
}
constexpr auto CommitTimeBinder = jb::Projection<&CommitTime::value>();
constexpr auto BtreeNodeStatisticsBinder = jb::Object(
jb::Member(
"num_indirect_value_bytes",
jb::Projection<&BtreeNodeStatistics::num_indirect_value_bytes>()),
jb::Member("num_tree_bytes",
jb::Projection<&BtreeNodeStatistics::num_tree_bytes>()),
jb::Member("num_keys", jb::Projection<&BtreeNodeStatistics::num_keys>()));
constexpr auto BtreeNodeReferenceBinder = jb::Object(
jb::Member("location",
jb::Projection<&BtreeNodeReference::location>(
IndirectDataReferenceBinder(IndirectDataKind::kBtreeNode))),
jb::Member("statistics", jb::Projection<&BtreeNodeReference::statistics>(
BtreeNodeStatisticsBinder)));
constexpr auto BtreeGenerationReferenceBinder = jb::Object(
jb::Member("root", jb::Projection<&BtreeGenerationReference::root>(
BtreeNodeReferenceBinder)),
jb::Member("generation_number",
jb::Projection<&BtreeGenerationReference::generation_number>()),
jb::Member("root_height",
jb::Projection<&BtreeGenerationReference::root_height>()),
jb::Member("commit_time",
jb::Projection<&BtreeGenerationReference::commit_time>(
CommitTimeBinder)));
constexpr auto VersionNodeReferenceBinder = jb::Object(
jb::Member("location", jb::Projection<&VersionNodeReference::location>(
IndirectDataReferenceBinder(
IndirectDataKind::kVersionNode))),
jb::Member("generation_number",
jb::Projection<&VersionNodeReference::generation_number>()),
jb::Member("height", jb::Projection<&VersionNodeReference::height>()),
jb::Member("num_generations",
jb::Projection<&VersionNodeReference::num_generations>()),
jb::Member(
"commit_time",
jb::Projection<&VersionNodeReference::commit_time>(CommitTimeBinder)));
constexpr auto ManifestBinder = jb::Object(
jb::Member("config", jb::Projection<&Manifest::config>(ConfigBinder)),
jb::Member("versions", jb::Projection<&Manifest::versions>(
jb::Array(BtreeGenerationReferenceBinder))),
jb::Member("version_tree_nodes",
jb::Projection<&Manifest::version_tree_nodes>(
jb::Array(VersionNodeReferenceBinder))));
constexpr auto BinaryCordBinder = [](auto is_loading, const auto& options,
auto* obj, auto* j) {
if constexpr (is_loading) {
if (auto* b = j->template get_ptr<const ::nlohmann::json::binary_t*>()) {
*obj = absl::Cord(std::string_view(
reinterpret_cast<const char*>(b->data()), b->size()));
return absl::OkStatus();
} else if (auto* s = j->template get_ptr<const std::string*>()) {
*obj = absl::Cord(*s);
return absl::OkStatus();
} else {
return internal_json::ExpectedError(*j, "string or byte string");
}
} else {
::nlohmann::json::binary_t v;
v.reserve(obj->size());
for (std::string_view chunk : obj->Chunks()) {
v.insert(v.end(), chunk.begin(), chunk.end());
}
*j = std::move(v);
return absl::OkStatus();
}
};
constexpr auto LeafNodeValueReferenceBinder = jb::Variant(
jb::Member("inline_value", BinaryCordBinder),
jb::Member("indirect_value",
IndirectDataReferenceBinder(IndirectDataKind::kValue)));
constexpr auto BtreeLeafNodeEntryBinder(std::string_view key_prefix) {
return
[=](std::false_type is_loading, const auto& options, auto* obj, auto* j) {
::nlohmann::json::binary_t key;
key.insert(key.end(), key_prefix.begin(), key_prefix.end());
key.insert(key.end(), obj->key.begin(), obj->key.end());
::nlohmann::json::object_t x{{"key", key}};
TENSORSTORE_RETURN_IF_ERROR(LeafNodeValueReferenceBinder(
std::false_type{}, IncludeDefaults{}, &obj->value_reference, &x));
*j = std::move(x);
return absl::OkStatus();
};
}
constexpr auto BtreeInteriorNodeEntryBinder(std::string_view key_prefix) {
return [=](std::false_type is_loading, const auto& options, auto* obj,
auto* j) {
::nlohmann::json::binary_t key;
key.insert(key.end(), key_prefix.begin(), key_prefix.end());
key.insert(key.end(), obj->key.begin(), obj->key.end());
auto common_prefix = key;
common_prefix.resize(obj->subtree_common_prefix_length + key_prefix.size());
::nlohmann::json::object_t x;
TENSORSTORE_RETURN_IF_ERROR(BtreeNodeReferenceBinder(
std::false_type{}, IncludeDefaults{}, &obj->node, &x));
x["key"] = key;
x["subtree_common_prefix"] = common_prefix;
*j = std::move(x);
return absl::OkStatus();
};
}
constexpr auto BtreeNodeBinder = jb::Object(
jb::Member("height", jb::Projection<&BtreeNode::height>()),
jb::Member("entries",
[](auto is_loading, const auto& options, auto* obj, auto* j) {
return jb::Variant(
jb::Array(BtreeLeafNodeEntryBinder(obj->key_prefix)),
jb::Array(BtreeInteriorNodeEntryBinder(obj->key_prefix)))(
is_loading, options, &obj->entries, j);
}));
constexpr auto VersionTreeNodeBinder = jb::Object(
jb::Member("height", jb::Projection<&VersionTreeNode::height>()),
jb::Member("version_tree_arity_log2",
jb::Projection<&VersionTreeNode::version_tree_arity_log2>()),
jb::Member("entries", jb::Projection<&VersionTreeNode::entries>(jb::Variant(
jb::Array(BtreeGenerationReferenceBinder),
jb::Array(VersionNodeReferenceBinder)))));
}
::nlohmann::json Dump(const Manifest& manifest) {
return jb::ToJson(manifest, ManifestBinder).value();
}
::nlohmann::json Dump(const BtreeNode& node) {
return jb::ToJson(node, BtreeNodeBinder).value();
}
::nlohmann::json Dump(const VersionTreeNode& node) {
return jb::ToJson(node, VersionTreeNodeBinder).value();
}
}
} | #include "tensorstore/kvstore/ocdbt/format/dump.h"
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/cord.h"
#include <nlohmann/json.hpp>
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/kvstore/ocdbt/format/btree.h"
#include "tensorstore/kvstore/ocdbt/format/config.h"
#include "tensorstore/kvstore/ocdbt/format/indirect_data_reference.h"
#include "tensorstore/kvstore/ocdbt/format/manifest.h"
#include "tensorstore/kvstore/ocdbt/format/version_tree.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status_testutil.h"
namespace {
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal_ocdbt::BtreeNode;
using ::tensorstore::internal_ocdbt::CommitTime;
using ::tensorstore::internal_ocdbt::DataFileId;
using ::tensorstore::internal_ocdbt::Dump;
using ::tensorstore::internal_ocdbt::IndirectDataKind;
using ::tensorstore::internal_ocdbt::IndirectDataReference;
using ::tensorstore::internal_ocdbt::LabeledIndirectDataReference;
using ::tensorstore::internal_ocdbt::Manifest;
TEST(LabeledIndirectDataReferenceTest, ParseBtreeNode) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value,
LabeledIndirectDataReference::Parse("btreenode:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, ParseValue) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse("value:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kValue, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, ParseVersionNode) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value,
LabeledIndirectDataReference::Parse("versionnode:abc:def%20:1:36"));
EXPECT_EQ(IndirectDataKind::kVersionNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(1, value.location.offset);
EXPECT_EQ(36, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, MaxOffset) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775807:0"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(9223372036854775807, value.location.offset);
EXPECT_EQ(0, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, MaxOffsetAndLength) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto value, LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775806:1"));
EXPECT_EQ(IndirectDataKind::kBtreeNode, value.kind);
EXPECT_EQ((DataFileId{"abc", "def "}), value.location.file_id);
EXPECT_EQ(9223372036854775806, value.location.offset);
EXPECT_EQ(1, value.location.length);
}
TEST(LabeledIndirectDataReferenceTest, OffsetTooLarge) {
EXPECT_THAT(
LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775808:0"),
MatchesStatus(absl::StatusCode::kDataLoss, "Invalid offset/length .*"));
}
TEST(LabeledIndirectDataReferenceTest, InvalidKind) {
EXPECT_THAT(LabeledIndirectDataReference::Parse("abc:abc:def:0:10"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid indirect data kind: abc"));
}
TEST(LabeledIndirectDataReferenceTest, LengthTooLarge) {
EXPECT_THAT(
LabeledIndirectDataReference::Parse(
"btreenode:abc:def%20:9223372036854775807:1"),
MatchesStatus(absl::StatusCode::kDataLoss, "Invalid offset/length .*"));
}
TEST(DumpTest, Manifest) {
Manifest manifest;
manifest.config.uuid = {
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}};
manifest.config.version_tree_arity_log2 = 1;
{
auto& x = manifest.versions.emplace_back();
x.root.location.file_id = {"abc", "def"};
x.root.location.offset = 10;
x.root.location.length = 42;
x.generation_number = 15;
x.root.statistics.num_indirect_value_bytes = 101;
x.root.statistics.num_tree_bytes = 220;
x.root.statistics.num_keys = 8;
x.root_height = 0;
x.commit_time = CommitTime{10};
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 8;
x.height = 3;
x.commit_time = CommitTime{1};
x.num_generations = 8;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 12;
x.height = 2;
x.commit_time = CommitTime{5};
x.num_generations = 4;
}
{
auto& x = manifest.version_tree_nodes.emplace_back();
x.location.file_id = {"abc", "def"};
x.location.offset = 10;
x.location.length = 42;
x.generation_number = 14;
x.height = 1;
x.commit_time = CommitTime{8};
x.num_generations = 2;
}
EXPECT_THAT(Dump(manifest),
MatchesJson({
{"config",
{{"uuid", "000102030405060708090a0b0c0d0e0f"},
{"compression", {{"id", "zstd"}}},
{"max_decoded_node_bytes", 8388608},
{"max_inline_value_bytes", 100},
{"version_tree_arity_log2", 1}}},
{"version_tree_nodes",
{{
{"commit_time", 1},
{"generation_number", 8},
{"height", 3},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 8},
},
{
{"commit_time", 5},
{"generation_number", 12},
{"height", 2},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 4},
},
{
{"commit_time", 8},
{"generation_number", 14},
{"height", 1},
{"location", "versionnode:abc:def:10:42"},
{"num_generations", 2},
}}},
{"versions",
{{{"commit_time", 10},
{"root",
{{"location", "btreenode:abc:def:10:42"},
{"statistics",
{{"num_indirect_value_bytes", 101},
{"num_keys", 8},
{"num_tree_bytes", 220}}}}},
{"generation_number", 15},
{"root_height", 0}}}},
}));
}
TEST(DumpTest, BtreeLeafNode) {
BtreeNode node;
node.height = 0;
node.key_prefix = "ab";
auto& entries = node.entries.emplace<BtreeNode::LeafNodeEntries>();
entries.push_back({"c",
absl::Cord("value1")});
entries.push_back({"d",
absl::Cord("value2")});
entries.push_back({"e",
IndirectDataReference{{"abc", "def"}, 1, 25}});
EXPECT_THAT(
Dump(node),
MatchesJson({
{"entries",
{
{
{"inline_value",
::nlohmann::json::binary_t{
std::vector<uint8_t>{'v', 'a', 'l', 'u', 'e', '1'}}},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'c'}}},
},
{
{"inline_value",
::nlohmann::json::binary_t{
std::vector<uint8_t>{'v', 'a', 'l', 'u', 'e', '2'}}},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'd'}}},
},
{
{"indirect_value", "value:abc:def:1:25"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'e'}}},
},
}},
{"height", 0},
}));
}
TEST(DumpTest, BtreeInteriorNode) {
BtreeNode node;
node.height = 2;
auto& entries = node.entries.emplace<BtreeNode::InteriorNodeEntries>();
entries.push_back({"abc",
1,
{
{
{"abc", "def"},
5,
6,
},
{
100,
200,
5,
},
}});
entries.push_back({"def",
1,
{
{
{"ghi", "jkl"},
42,
9,
},
{
101,
220,
8,
},
}});
EXPECT_THAT(
Dump(node),
MatchesJson({
{"entries",
{
{{"location", "btreenode:abc:def:5:6"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'a', 'b', 'c'}}},
{"subtree_common_prefix",
::nlohmann::json::binary_t{std::vector<uint8_t>{'a'}}},
{
"statistics",
{{"num_indirect_value_bytes", 100},
{"num_keys", 5},
{"num_tree_bytes", 200}},
}},
{
{"location", "btreenode:ghi:jkl:42:9"},
{"key", ::nlohmann::json::binary_t{std::vector<uint8_t>{
'd', 'e', 'f'}}},
{"subtree_common_prefix",
::nlohmann::json::binary_t{std::vector<uint8_t>{'d'}}},
{"statistics",
{{"num_indirect_value_bytes", 101},
{"num_keys", 8},
{"num_tree_bytes", 220}}},
},
}},
{"height", 2},
}));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/dump.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/kvstore/ocdbt/format/dump_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
84e73c58-4c71-4603-8fd2-133a99b2e99a | cpp | tensorflow/tensorflow | squared_difference | tensorflow/lite/delegates/hexagon/builders/squared_difference.cc | tensorflow/lite/delegates/xnnpack/squared_difference_test.cc | #include "hexagon/hexagon_nn_ops.h"
#include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/hexagon/builders/op_builder.h"
namespace tflite {
namespace delegates {
namespace hexagon {
class SquaredDifferenceOpBuilder : public OpBuilder {
public:
explicit SquaredDifferenceOpBuilder(GraphBuilder* graph_builder, int op_type)
: OpBuilder(graph_builder, op_type) {}
TfLiteStatus PopulateSubGraph(const TfLiteIntArray* inputs,
const TfLiteIntArray* outputs,
TfLiteContext* context) override;
TfLiteStatus RegisterOutputs(const TfLiteIntArray* outputs,
TfLiteContext* context) override;
private:
TensorID node_output_;
};
TfLiteStatus SquaredDifferenceOpBuilder::PopulateSubGraph(
const TfLiteIntArray* inputs, const TfLiteIntArray* outputs,
TfLiteContext* context) {
const int tensor_a_index = inputs->data[0];
const int tensor_b_index = inputs->data[1];
const auto& tensor_a = context->tensors[tensor_a_index];
const auto& tensor_b = context->tensors[tensor_b_index];
AddInput(graph_builder_->GetHexagonTensorId(tensor_a_index));
AddInput(graph_builder_->GetHexagonTensorId(tensor_b_index));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, tensor_a));
TF_LITE_ENSURE_STATUS(ComputeAndAddMinAndMax(context, tensor_b));
float output_min = -1, output_max = -1;
TF_LITE_ENSURE_STATUS(ComputeMinAndMaxQuantValues(
context->tensors[outputs->data[0]], &output_min, &output_max));
auto* output_min_const = graph_builder_->AddConstNodeWithData(
kScalarShape, reinterpret_cast<char*>(&output_min), sizeof(output_min));
auto* output_max_const = graph_builder_->AddConstNodeWithData(
kScalarShape, reinterpret_cast<char*>(&output_max), sizeof(output_max));
int output_batch_size, output_height_size, output_width_size,
output_depth_size;
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
auto sub_out = AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
auto sub_min = AddOutput(sizeof(float), 4, kScalarShape);
auto sub_max = AddOutput(sizeof(float), 4, kScalarShape);
auto* mul_op = graph_builder_->AddNode(GetTFLiteNodeID());
mul_op->SetOpType(OP_QuantizedMul_8x8to8);
mul_op->AddInput(sub_out);
mul_op->AddInput(sub_out);
mul_op->AddInput(sub_min);
mul_op->AddInput(sub_max);
mul_op->AddInput(sub_min);
mul_op->AddInput(sub_max);
mul_op->AddInput(TensorID(output_min_const->GetID(), 0));
mul_op->AddInput(TensorID(output_max_const->GetID(), 0));
node_output_ = mul_op->AddOutput(sizeof(uint8_t), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
mul_op->AddOutput(sizeof(float), 4, kScalarShape);
mul_op->AddOutput(sizeof(float), 4, kScalarShape);
return kTfLiteOk;
}
TfLiteStatus SquaredDifferenceOpBuilder::RegisterOutputs(
const TfLiteIntArray* outputs, TfLiteContext* context) {
graph_builder_->AddTensorWithID(outputs->data[0], node_output_.first,
node_output_.second);
return kTfLiteOk;
}
OpBuilder* CreateSquaredDifferenceOpBuilder(GraphBuilder* graph_builder,
int op_type) {
return new SquaredDifferenceOpBuilder(graph_builder, op_type);
}
}
}
} | #include <cstdint>
#include <functional>
#include <memory>
#include <random>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/xnnpack/binary_elementwise_tester.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
namespace tflite {
namespace xnnpack {
TEST(SquaredDifference, 4DBy4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DBy2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DBy1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DBy0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, 1, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, 1, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastWidth) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, 1, width, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, 1, width, 1})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastHeight) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastBatch) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, 1, 1, 1})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, 1, 1, 1})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic4DBroadcastHeightWidthChannels) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({1, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({1, height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic3D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({height, width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({width, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 4DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DByStatic2D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({batch, channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DByStatic1D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({channels})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({channels})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, 2DByStatic0D) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({})
.Input2Shape({batch, channels})
.Input1Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, channels})
.Input2Shape({})
.Input2Static(true)
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, FP16Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.FP16Weights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.FP16Weights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, INT8Weights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8Weights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8Weights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, INT8ChannelWiseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.INT8ChannelWiseWeights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, SparseWeights) {
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(nullptr),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input1Static(true)
.SparseWeights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Input2Static(true)
.SparseWeights()
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
TEST(SquaredDifference, MultiThreading) {
TfLiteXNNPackDelegateOptions delegate_options =
TfLiteXNNPackDelegateOptionsDefault();
delegate_options.num_threads = 2;
std::unique_ptr<TfLiteDelegate, decltype(&TfLiteXNNPackDelegateDelete)>
xnnpack_delegate(TfLiteXNNPackDelegateCreate(&delegate_options),
TfLiteXNNPackDelegateDelete);
std::random_device random_device;
auto rng = std::mt19937(random_device());
auto shape_rng =
std::bind(std::uniform_int_distribution<int32_t>(2, 5), std::ref(rng));
const auto batch = shape_rng();
const auto height = shape_rng();
const auto width = shape_rng();
const auto channels = shape_rng();
BinaryElementwiseTester()
.Input1Shape({batch, height, width, channels})
.Input2Shape({batch, height, width, channels})
.Test(BuiltinOperator_SQUARED_DIFFERENCE, xnnpack_delegate.get());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/hexagon/builders/squared_difference.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/xnnpack/squared_difference_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
96364946-0861-4cb2-be1c-71b60afa30b0 | cpp | google/quiche | tls_chlo_extractor | quiche/quic/core/tls_chlo_extractor.cc | quiche/quic/core/tls_chlo_extractor_test.cc | #include "quiche/quic/core/tls_chlo_extractor.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "openssl/ssl.h"
#include "quiche/quic/core/frames/quic_crypto_frame.h"
#include "quiche/quic/core/quic_data_reader.h"
#include "quiche/quic/core/quic_error_codes.h"
#include "quiche/quic/core/quic_framer.h"
#include "quiche/quic/core/quic_time.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/common/platform/api/quiche_logging.h"
namespace quic {
namespace {
bool HasExtension(const SSL_CLIENT_HELLO* client_hello, uint16_t extension) {
const uint8_t* unused_extension_bytes;
size_t unused_extension_len;
return 1 == SSL_early_callback_ctx_extension_get(client_hello, extension,
&unused_extension_bytes,
&unused_extension_len);
}
std::vector<uint16_t> GetSupportedGroups(const SSL_CLIENT_HELLO* client_hello) {
const uint8_t* extension_data;
size_t extension_len;
int rv = SSL_early_callback_ctx_extension_get(
client_hello, TLSEXT_TYPE_supported_groups, &extension_data,
&extension_len);
if (rv != 1) {
return {};
}
QuicDataReader named_groups_reader(
reinterpret_cast<const char*>(extension_data), extension_len);
uint16_t named_groups_len;
if (!named_groups_reader.ReadUInt16(&named_groups_len) ||
named_groups_len + sizeof(uint16_t) != extension_len) {
QUIC_CODE_COUNT(quic_chlo_supported_groups_invalid_length);
return {};
}
std::vector<uint16_t> named_groups;
while (!named_groups_reader.IsDoneReading()) {
uint16_t named_group;
if (!named_groups_reader.ReadUInt16(&named_group)) {
QUIC_CODE_COUNT(quic_chlo_supported_groups_odd_length);
QUIC_LOG_FIRST_N(WARNING, 10) << "Failed to read named groups";
break;
}
named_groups.push_back(named_group);
}
return named_groups;
}
std::vector<uint16_t> GetCertCompressionAlgos(
const SSL_CLIENT_HELLO* client_hello) {
const uint8_t* extension_data;
size_t extension_len;
int rv = SSL_early_callback_ctx_extension_get(
client_hello, TLSEXT_TYPE_cert_compression, &extension_data,
&extension_len);
if (rv != 1) {
return {};
}
QuicDataReader cert_compression_algos_reader(
reinterpret_cast<const char*>(extension_data), extension_len);
uint8_t algos_len;
if (!cert_compression_algos_reader.ReadUInt8(&algos_len) || algos_len == 0 ||
algos_len % sizeof(uint16_t) != 0 ||
algos_len + sizeof(uint8_t) != extension_len) {
QUIC_CODE_COUNT(quic_chlo_cert_compression_algos_invalid_length);
return {};
}
size_t num_algos = algos_len / sizeof(uint16_t);
std::vector<uint16_t> cert_compression_algos;
cert_compression_algos.reserve(num_algos);
for (size_t i = 0; i < num_algos; ++i) {
uint16_t cert_compression_algo;
if (!cert_compression_algos_reader.ReadUInt16(&cert_compression_algo)) {
QUIC_CODE_COUNT(quic_chlo_fail_to_read_cert_compression_algo);
return {};
}
cert_compression_algos.push_back(cert_compression_algo);
}
return cert_compression_algos;
}
}
TlsChloExtractor::TlsChloExtractor()
: crypto_stream_sequencer_(this),
state_(State::kInitial),
parsed_crypto_frame_in_this_packet_(false) {}
TlsChloExtractor::TlsChloExtractor(TlsChloExtractor&& other)
: TlsChloExtractor() {
*this = std::move(other);
}
TlsChloExtractor& TlsChloExtractor::operator=(TlsChloExtractor&& other) {
framer_ = std::move(other.framer_);
if (framer_) {
framer_->set_visitor(this);
}
crypto_stream_sequencer_ = std::move(other.crypto_stream_sequencer_);
crypto_stream_sequencer_.set_stream(this);
ssl_ = std::move(other.ssl_);
if (ssl_) {
std::pair<SSL_CTX*, int> shared_handles = GetSharedSslHandles();
int ex_data_index = shared_handles.second;
const int rv = SSL_set_ex_data(ssl_.get(), ex_data_index, this);
QUICHE_CHECK_EQ(rv, 1) << "Internal allocation failure in SSL_set_ex_data";
}
state_ = other.state_;
error_details_ = std::move(other.error_details_);
parsed_crypto_frame_in_this_packet_ =
other.parsed_crypto_frame_in_this_packet_;
supported_groups_ = std::move(other.supported_groups_);
cert_compression_algos_ = std::move(other.cert_compression_algos_);
alpns_ = std::move(other.alpns_);
server_name_ = std::move(other.server_name_);
client_hello_bytes_ = std::move(other.client_hello_bytes_);
return *this;
}
void TlsChloExtractor::IngestPacket(const ParsedQuicVersion& version,
const QuicReceivedPacket& packet) {
if (state_ == State::kUnrecoverableFailure) {
QUIC_DLOG(ERROR) << "Not ingesting packet after unrecoverable error";
return;
}
if (version == UnsupportedQuicVersion()) {
QUIC_DLOG(ERROR) << "Not ingesting packet with unsupported version";
return;
}
if (version.handshake_protocol != PROTOCOL_TLS1_3) {
QUIC_DLOG(ERROR) << "Not ingesting packet with non-TLS version " << version;
return;
}
if (framer_) {
if (!framer_->IsSupportedVersion(version)) {
QUIC_DLOG(ERROR)
<< "Not ingesting packet with version mismatch, expected "
<< framer_->version() << ", got " << version;
return;
}
} else {
framer_ = std::make_unique<QuicFramer>(
ParsedQuicVersionVector{version}, QuicTime::Zero(),
Perspective::IS_SERVER, 0);
framer_->set_visitor(this);
}
parsed_crypto_frame_in_this_packet_ = false;
const bool parse_success = framer_->ProcessPacket(packet);
if (state_ == State::kInitial && parsed_crypto_frame_in_this_packet_) {
state_ = State::kParsedPartialChloFragment;
}
if (!parse_success) {
QUIC_DLOG(ERROR) << "Failed to process packet";
return;
}
}
bool TlsChloExtractor::OnUnauthenticatedPublicHeader(
const QuicPacketHeader& header) {
if (header.form != IETF_QUIC_LONG_HEADER_PACKET) {
QUIC_DLOG(ERROR) << "Not parsing non-long-header packet " << header;
return false;
}
if (header.long_packet_type != INITIAL) {
QUIC_DLOG(ERROR) << "Not parsing non-initial packet " << header;
return false;
}
if (GetQuicRestartFlag(quic_dispatcher_ack_buffered_initial_packets)) {
if (framer_->GetDecrypter(ENCRYPTION_INITIAL) == nullptr) {
framer_->SetInitialObfuscators(header.destination_connection_id);
}
} else {
framer_->SetInitialObfuscators(header.destination_connection_id);
}
return true;
}
bool TlsChloExtractor::OnProtocolVersionMismatch(ParsedQuicVersion version) {
QUIC_BUG(quic_bug_10855_1) << "Unexpected version mismatch, expected "
<< framer_->version() << ", got " << version;
return false;
}
void TlsChloExtractor::OnUnrecoverableError(QuicErrorCode error,
const std::string& details) {
HandleUnrecoverableError(absl::StrCat(
"Crypto stream error ", QuicErrorCodeToString(error), ": ", details));
}
void TlsChloExtractor::OnUnrecoverableError(
QuicErrorCode error, QuicIetfTransportErrorCodes ietf_error,
const std::string& details) {
HandleUnrecoverableError(absl::StrCat(
"Crypto stream error ", QuicErrorCodeToString(error), "(",
QuicIetfTransportErrorCodeString(ietf_error), "): ", details));
}
bool TlsChloExtractor::OnCryptoFrame(const QuicCryptoFrame& frame) {
if (frame.level != ENCRYPTION_INITIAL) {
QUIC_BUG(quic_bug_10855_2) << "Parsed bad-level CRYPTO frame " << frame;
return false;
}
parsed_crypto_frame_in_this_packet_ = true;
crypto_stream_sequencer_.OnCryptoFrame(frame);
return true;
}
void TlsChloExtractor::OnDataAvailable() {
SetupSslHandle();
struct iovec iov;
while (crypto_stream_sequencer_.GetReadableRegion(&iov)) {
const int rv = SSL_provide_quic_data(
ssl_.get(), ssl_encryption_initial,
reinterpret_cast<const uint8_t*>(iov.iov_base), iov.iov_len);
if (rv != 1) {
HandleUnrecoverableError("SSL_provide_quic_data failed");
return;
}
crypto_stream_sequencer_.MarkConsumed(iov.iov_len);
}
(void)SSL_do_handshake(ssl_.get());
}
TlsChloExtractor* TlsChloExtractor::GetInstanceFromSSL(SSL* ssl) {
std::pair<SSL_CTX*, int> shared_handles = GetSharedSslHandles();
int ex_data_index = shared_handles.second;
return reinterpret_cast<TlsChloExtractor*>(
SSL_get_ex_data(ssl, ex_data_index));
}
int TlsChloExtractor::SetReadSecretCallback(
SSL* ssl, enum ssl_encryption_level_t ,
const SSL_CIPHER* , const uint8_t* ,
size_t ) {
GetInstanceFromSSL(ssl)->HandleUnexpectedCallback("SetReadSecretCallback");
return 0;
}
int TlsChloExtractor::SetWriteSecretCallback(
SSL* ssl, enum ssl_encryption_level_t ,
const SSL_CIPHER* , const uint8_t* ,
size_t ) {
GetInstanceFromSSL(ssl)->HandleUnexpectedCallback("SetWriteSecretCallback");
return 0;
}
int TlsChloExtractor::WriteMessageCallback(
SSL* ssl, enum ssl_encryption_level_t , const uint8_t* ,
size_t ) {
GetInstanceFromSSL(ssl)->HandleUnexpectedCallback("WriteMessageCallback");
return 0;
}
int TlsChloExtractor::FlushFlightCallback(SSL* ssl) {
GetInstanceFromSSL(ssl)->HandleUnexpectedCallback("FlushFlightCallback");
return 0;
}
void TlsChloExtractor::HandleUnexpectedCallback(
const std::string& callback_name) {
std::string error_details =
absl::StrCat("Unexpected callback ", callback_name);
QUIC_BUG(quic_bug_10855_3) << error_details;
HandleUnrecoverableError(error_details);
}
int TlsChloExtractor::SendAlertCallback(SSL* ssl,
enum ssl_encryption_level_t ,
uint8_t desc) {
GetInstanceFromSSL(ssl)->SendAlert(desc);
return 0;
}
void TlsChloExtractor::SendAlert(uint8_t tls_alert_value) {
if (tls_alert_value == SSL3_AD_HANDSHAKE_FAILURE && HasParsedFullChlo()) {
return;
}
HandleUnrecoverableError(absl::StrCat(
"BoringSSL attempted to send alert ", static_cast<int>(tls_alert_value),
" ", SSL_alert_desc_string_long(tls_alert_value)));
if (state_ == State::kUnrecoverableFailure) {
tls_alert_ = tls_alert_value;
}
}
enum ssl_select_cert_result_t TlsChloExtractor::SelectCertCallback(
const SSL_CLIENT_HELLO* client_hello) {
GetInstanceFromSSL(client_hello->ssl)->HandleParsedChlo(client_hello);
return ssl_select_cert_error;
}
void TlsChloExtractor::HandleParsedChlo(const SSL_CLIENT_HELLO* client_hello) {
const char* server_name =
SSL_get_servername(client_hello->ssl, TLSEXT_NAMETYPE_host_name);
if (server_name) {
server_name_ = std::string(server_name);
}
resumption_attempted_ =
HasExtension(client_hello, TLSEXT_TYPE_pre_shared_key);
early_data_attempted_ = HasExtension(client_hello, TLSEXT_TYPE_early_data);
QUICHE_DCHECK(client_hello_bytes_.empty());
client_hello_bytes_.assign(
client_hello->client_hello,
client_hello->client_hello + client_hello->client_hello_len);
const uint8_t* alpn_data;
size_t alpn_len;
int rv = SSL_early_callback_ctx_extension_get(
client_hello, TLSEXT_TYPE_application_layer_protocol_negotiation,
&alpn_data, &alpn_len);
if (rv == 1) {
QuicDataReader alpns_reader(reinterpret_cast<const char*>(alpn_data),
alpn_len);
absl::string_view alpns_payload;
if (!alpns_reader.ReadStringPiece16(&alpns_payload)) {
QUIC_CODE_COUNT_N(quic_chlo_alpns_invalid, 1, 2);
HandleUnrecoverableError("Failed to read alpns_payload");
return;
}
QuicDataReader alpns_payload_reader(alpns_payload);
while (!alpns_payload_reader.IsDoneReading()) {
absl::string_view alpn_payload;
if (!alpns_payload_reader.ReadStringPiece8(&alpn_payload)) {
QUIC_CODE_COUNT_N(quic_chlo_alpns_invalid, 2, 2);
HandleUnrecoverableError("Failed to read alpn_payload");
return;
}
alpns_.emplace_back(std::string(alpn_payload));
}
}
supported_groups_ = GetSupportedGroups(client_hello);
if (GetQuicReloadableFlag(quic_parse_cert_compression_algos_from_chlo)) {
cert_compression_algos_ = GetCertCompressionAlgos(client_hello);
if (cert_compression_algos_.empty()) {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_parse_cert_compression_algos_from_chlo,
1, 2);
} else {
QUIC_RELOADABLE_FLAG_COUNT_N(quic_parse_cert_compression_algos_from_chlo,
2, 2);
}
}
if (state_ == State::kInitial) {
state_ = State::kParsedFullSinglePacketChlo;
} else if (state_ == State::kParsedPartialChloFragment) {
state_ = State::kParsedFullMultiPacketChlo;
} else {
QUIC_BUG(quic_bug_10855_4)
<< "Unexpected state on successful parse " << StateToString(state_);
}
}
std::pair<SSL_CTX*, int> TlsChloExtractor::GetSharedSslHandles() {
static std::pair<SSL_CTX*, int>* shared_handles = []() {
CRYPTO_library_init();
SSL_CTX* ssl_ctx = SSL_CTX_new(TLS_with_buffers_method());
SSL_CTX_set_min_proto_version(ssl_ctx, TLS1_3_VERSION);
SSL_CTX_set_max_proto_version(ssl_ctx, TLS1_3_VERSION);
static const SSL_QUIC_METHOD kQuicCallbacks{
TlsChloExtractor::SetReadSecretCallback,
TlsChloExtractor::SetWriteSecretCallback,
TlsChloExtractor::WriteMessageCallback,
TlsChloExtractor::FlushFlightCallback,
TlsChloExtractor::SendAlertCallback};
SSL_CTX_set_quic_method(ssl_ctx, &kQuicCallbacks);
SSL_CTX_set_select_certificate_cb(ssl_ctx,
TlsChloExtractor::SelectCertCallback);
int ex_data_index =
SSL_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr);
return new std::pair<SSL_CTX*, int>(ssl_ctx, ex_data_index);
}();
return *shared_handles;
}
void TlsChloExtractor::SetupSslHandle() {
if (ssl_) {
return;
}
std::pair<SSL_CTX*, int> shared_handles = GetSharedSslHandles();
SSL_CTX* ssl_ctx = shared_handles.first;
int ex_data_index = shared_handles.second;
ssl_ = bssl::UniquePtr<SSL>(SSL_new(ssl_ctx));
const int rv = SSL_set_ex_data(ssl_.get(), ex_data_index, this);
QUICHE_CHECK_EQ(rv, 1) << "Internal allocation failure in SSL_set_ex_data";
SSL_set_accept_state(ssl_.get());
int use_legacy_extension = 0;
if (framer_->version().UsesLegacyTlsExtension()) {
use_legacy_extension = 1;
}
SSL_set_quic_use_legacy_codepoint(ssl_.get(), use_legacy_extension);
}
void TlsChloExtractor::HandleUnrecoverableError(
const std::string& error_details) {
if (HasParsedFullChlo()) {
QUIC_DLOG(ERROR) << "Ignoring error: " << error_details;
return;
}
QUIC_DLOG(ERROR) << "Handling error: " << error_details;
state_ = State::kUnrecoverableFailure;
if (error_details_.empty()) {
error_details_ = error_details;
} else {
error_details_ = absl::StrCat(error_details_, "; ", error_details);
}
}
std::string TlsChloExtractor::StateToString(State state) {
switch (state) {
case State::kInitial:
return "Initial";
case State::kParsedFullSinglePacketChlo:
return "ParsedFullSinglePacketChlo";
case State::kParsedFullMultiPacketChlo:
return "ParsedFullMultiPacketChlo";
case State::kParsedPartialChloFragment:
return "ParsedPartialChloFragment";
case State::kUnrecoverableFailure:
return "UnrecoverableFailure";
}
return absl::StrCat("Unknown(", static_cast<int>(state), ")");
}
std::ostream& operator<<(std::ostream& os,
const TlsChloExtractor::State& state) {
os << TlsChloExtractor::StateToString(state);
return os;
}
} | #include "quiche/quic/core/tls_chlo_extractor.h"
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "openssl/ssl.h"
#include "quiche/quic/core/http/quic_spdy_client_session.h"
#include "quiche/quic/core/quic_connection.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/crypto_test_utils.h"
#include "quiche/quic/test_tools/first_flight.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simple_session_cache.h"
#include "quiche/common/print_elements.h"
namespace quic {
namespace test {
namespace {
static int DummyCompressFunc(SSL* , CBB* , const uint8_t* ,
size_t ) {
return 1;
}
static int DummyDecompressFunc(SSL* , CRYPTO_BUFFER** ,
size_t ,
const uint8_t* , size_t ) {
return 1;
}
using testing::_;
using testing::AnyNumber;
class TlsChloExtractorTest : public QuicTestWithParam<ParsedQuicVersion> {
protected:
TlsChloExtractorTest() : version_(GetParam()), server_id_(TestServerId()) {}
void Initialize() {
tls_chlo_extractor_ = std::make_unique<TlsChloExtractor>();
AnnotatedPackets packets =
GetAnnotatedFirstFlightOfPackets(version_, config_);
packets_ = std::move(packets.packets);
crypto_stream_size_ = packets.crypto_stream_size;
QUIC_DLOG(INFO) << "Initialized with " << packets_.size()
<< " packets with crypto_stream_size:"
<< crypto_stream_size_;
}
void Initialize(std::unique_ptr<QuicCryptoClientConfig> crypto_config) {
tls_chlo_extractor_ = std::make_unique<TlsChloExtractor>();
AnnotatedPackets packets = GetAnnotatedFirstFlightOfPackets(
version_, config_, TestConnectionId(), EmptyQuicConnectionId(),
std::move(crypto_config));
packets_ = std::move(packets.packets);
crypto_stream_size_ = packets.crypto_stream_size;
QUIC_DLOG(INFO) << "Initialized with " << packets_.size()
<< " packets with crypto_stream_size:"
<< crypto_stream_size_;
}
void PerformFullHandshake(QuicCryptoClientConfig* crypto_config) const {
ASSERT_NE(crypto_config->session_cache(), nullptr);
MockQuicConnectionHelper client_helper, server_helper;
MockAlarmFactory alarm_factory;
ParsedQuicVersionVector supported_versions = {version_};
PacketSavingConnection* client_connection =
new PacketSavingConnection(&client_helper, &alarm_factory,
Perspective::IS_CLIENT, supported_versions);
client_connection->AdvanceTime(QuicTime::Delta::FromSeconds(1));
QuicSpdyClientSession client_session(config_, supported_versions,
client_connection, server_id_,
crypto_config);
client_session.Initialize();
std::unique_ptr<QuicCryptoServerConfig> server_crypto_config =
crypto_test_utils::CryptoServerConfigForTesting();
QuicConfig server_config;
EXPECT_CALL(*client_connection, SendCryptoData(_, _, _)).Times(AnyNumber());
client_session.GetMutableCryptoStream()->CryptoConnect();
crypto_test_utils::HandshakeWithFakeServer(
&server_config, server_crypto_config.get(), &server_helper,
&alarm_factory, client_connection,
client_session.GetMutableCryptoStream(),
AlpnForVersion(client_connection->version()));
SettingsFrame server_settings;
server_settings.values[SETTINGS_QPACK_MAX_TABLE_CAPACITY] =
kDefaultQpackMaxDynamicTableCapacity;
std::string settings_frame =
HttpEncoder::SerializeSettingsFrame(server_settings);
client_session.GetMutableCryptoStream()
->SetServerApplicationStateForResumption(
std::make_unique<ApplicationState>(
settings_frame.data(),
settings_frame.data() + settings_frame.length()));
}
void IngestPackets() {
for (const std::unique_ptr<QuicReceivedPacket>& packet : packets_) {
ReceivedPacketInfo packet_info(
QuicSocketAddress(TestPeerIPAddress(), kTestPort),
QuicSocketAddress(TestPeerIPAddress(), kTestPort), *packet);
std::string detailed_error;
std::optional<absl::string_view> retry_token;
const QuicErrorCode error = QuicFramer::ParsePublicHeaderDispatcher(
*packet, 0,
&packet_info.form, &packet_info.long_packet_type,
&packet_info.version_flag, &packet_info.use_length_prefix,
&packet_info.version_label, &packet_info.version,
&packet_info.destination_connection_id,
&packet_info.source_connection_id, &retry_token, &detailed_error);
ASSERT_THAT(error, IsQuicNoError()) << detailed_error;
tls_chlo_extractor_->IngestPacket(packet_info.version,
packet_info.packet);
}
packets_.clear();
}
void ValidateChloDetails(const TlsChloExtractor* extractor = nullptr) const {
if (extractor == nullptr) {
extractor = tls_chlo_extractor_.get();
}
EXPECT_TRUE(extractor->HasParsedFullChlo());
std::vector<std::string> alpns = extractor->alpns();
ASSERT_EQ(alpns.size(), 1u);
EXPECT_EQ(alpns[0], AlpnForVersion(version_));
EXPECT_EQ(extractor->server_name(), TestHostname());
EXPECT_EQ(extractor->client_hello_bytes().size(), crypto_stream_size_ - 4);
}
void IncreaseSizeOfChlo() {
constexpr auto kCustomParameterId =
static_cast<TransportParameters::TransportParameterId>(0xff33);
std::string kCustomParameterValue(2000, '-');
config_.custom_transport_parameters_to_send()[kCustomParameterId] =
kCustomParameterValue;
}
ParsedQuicVersion version_;
QuicServerId server_id_;
std::unique_ptr<TlsChloExtractor> tls_chlo_extractor_;
QuicConfig config_;
std::vector<std::unique_ptr<QuicReceivedPacket>> packets_;
uint64_t crypto_stream_size_;
};
INSTANTIATE_TEST_SUITE_P(TlsChloExtractorTests, TlsChloExtractorTest,
::testing::ValuesIn(AllSupportedVersionsWithTls()),
::testing::PrintToStringParamName());
TEST_P(TlsChloExtractorTest, Simple) {
Initialize();
EXPECT_EQ(packets_.size(), 1u);
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->state(),
TlsChloExtractor::State::kParsedFullSinglePacketChlo);
EXPECT_FALSE(tls_chlo_extractor_->resumption_attempted());
EXPECT_FALSE(tls_chlo_extractor_->early_data_attempted());
}
TEST_P(TlsChloExtractorTest, TlsExtensionInfo_ResumptionOnly) {
auto crypto_client_config = std::make_unique<QuicCryptoClientConfig>(
crypto_test_utils::ProofVerifierForTesting(),
std::make_unique<SimpleSessionCache>());
PerformFullHandshake(crypto_client_config.get());
SSL_CTX_set_early_data_enabled(crypto_client_config->ssl_ctx(), 0);
Initialize(std::move(crypto_client_config));
EXPECT_GE(packets_.size(), 1u);
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->state(),
TlsChloExtractor::State::kParsedFullSinglePacketChlo);
EXPECT_TRUE(tls_chlo_extractor_->resumption_attempted());
EXPECT_FALSE(tls_chlo_extractor_->early_data_attempted());
}
TEST_P(TlsChloExtractorTest, TlsExtensionInfo_ZeroRtt) {
auto crypto_client_config = std::make_unique<QuicCryptoClientConfig>(
crypto_test_utils::ProofVerifierForTesting(),
std::make_unique<SimpleSessionCache>());
PerformFullHandshake(crypto_client_config.get());
IncreaseSizeOfChlo();
Initialize(std::move(crypto_client_config));
EXPECT_GE(packets_.size(), 1u);
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->state(),
TlsChloExtractor::State::kParsedFullMultiPacketChlo);
EXPECT_TRUE(tls_chlo_extractor_->resumption_attempted());
EXPECT_TRUE(tls_chlo_extractor_->early_data_attempted());
}
TEST_P(TlsChloExtractorTest, TlsExtensionInfo_SupportedGroups) {
const std::vector<std::vector<uint16_t>> preferred_groups_to_test = {
{SSL_GROUP_X25519},
{SSL_GROUP_X25519_KYBER768_DRAFT00, SSL_GROUP_X25519},
};
for (const std::vector<uint16_t>& preferred_groups :
preferred_groups_to_test) {
auto crypto_client_config = std::make_unique<QuicCryptoClientConfig>(
crypto_test_utils::ProofVerifierForTesting());
crypto_client_config->set_preferred_groups(preferred_groups);
Initialize(std::move(crypto_client_config));
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->supported_groups(), preferred_groups);
}
}
TEST_P(TlsChloExtractorTest, TlsExtensionInfo_CertCompressionAlgos) {
const std::vector<std::vector<uint16_t>> supported_groups_to_test = {
{},
{1},
{1, 2},
{1, 2, 3},
{1, 2, 3, 65535},
};
for (const std::vector<uint16_t>& supported_cert_compression_algos :
supported_groups_to_test) {
auto crypto_client_config = std::make_unique<QuicCryptoClientConfig>(
crypto_test_utils::ProofVerifierForTesting());
for (uint16_t cert_compression_algo : supported_cert_compression_algos) {
ASSERT_TRUE(SSL_CTX_add_cert_compression_alg(
crypto_client_config->ssl_ctx(), cert_compression_algo,
DummyCompressFunc, DummyDecompressFunc));
}
Initialize(std::move(crypto_client_config));
IngestPackets();
ValidateChloDetails();
if (GetQuicReloadableFlag(quic_parse_cert_compression_algos_from_chlo)) {
EXPECT_EQ(tls_chlo_extractor_->cert_compression_algos(),
supported_cert_compression_algos)
<< quiche::PrintElements(
tls_chlo_extractor_->cert_compression_algos());
} else {
EXPECT_TRUE(tls_chlo_extractor_->cert_compression_algos().empty());
}
}
}
TEST_P(TlsChloExtractorTest, MultiPacket) {
IncreaseSizeOfChlo();
Initialize();
EXPECT_EQ(packets_.size(), 2u);
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->state(),
TlsChloExtractor::State::kParsedFullMultiPacketChlo);
}
TEST_P(TlsChloExtractorTest, MultiPacketReordered) {
IncreaseSizeOfChlo();
Initialize();
ASSERT_EQ(packets_.size(), 2u);
std::swap(packets_[0], packets_[1]);
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->state(),
TlsChloExtractor::State::kParsedFullMultiPacketChlo);
}
TEST_P(TlsChloExtractorTest, MoveAssignment) {
Initialize();
EXPECT_EQ(packets_.size(), 1u);
TlsChloExtractor other_extractor;
*tls_chlo_extractor_ = std::move(other_extractor);
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->state(),
TlsChloExtractor::State::kParsedFullSinglePacketChlo);
}
TEST_P(TlsChloExtractorTest, MoveAssignmentAfterExtraction) {
Initialize();
EXPECT_EQ(packets_.size(), 1u);
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->state(),
TlsChloExtractor::State::kParsedFullSinglePacketChlo);
TlsChloExtractor other_extractor = std::move(*tls_chlo_extractor_);
EXPECT_EQ(other_extractor.state(),
TlsChloExtractor::State::kParsedFullSinglePacketChlo);
ValidateChloDetails(&other_extractor);
}
TEST_P(TlsChloExtractorTest, MoveAssignmentBetweenPackets) {
IncreaseSizeOfChlo();
Initialize();
ASSERT_EQ(packets_.size(), 2u);
TlsChloExtractor other_extractor;
ReceivedPacketInfo packet_info(
QuicSocketAddress(TestPeerIPAddress(), kTestPort),
QuicSocketAddress(TestPeerIPAddress(), kTestPort), *packets_[0]);
std::string detailed_error;
std::optional<absl::string_view> retry_token;
const QuicErrorCode error = QuicFramer::ParsePublicHeaderDispatcher(
*packets_[0], 0,
&packet_info.form, &packet_info.long_packet_type,
&packet_info.version_flag, &packet_info.use_length_prefix,
&packet_info.version_label, &packet_info.version,
&packet_info.destination_connection_id, &packet_info.source_connection_id,
&retry_token, &detailed_error);
ASSERT_THAT(error, IsQuicNoError()) << detailed_error;
other_extractor.IngestPacket(packet_info.version, packet_info.packet);
packets_.erase(packets_.begin());
EXPECT_EQ(packets_.size(), 1u);
*tls_chlo_extractor_ = std::move(other_extractor);
IngestPackets();
ValidateChloDetails();
EXPECT_EQ(tls_chlo_extractor_->state(),
TlsChloExtractor::State::kParsedFullMultiPacketChlo);
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/tls_chlo_extractor.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/tls_chlo_extractor_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
346fb862-6591-4707-9ae5-a1961ad6018b | cpp | abseil/abseil-cpp | cord_data_edge | absl/strings/internal/cord_data_edge.h | absl/strings/internal/cord_data_edge_test.cc | #ifndef ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
#define ABSL_STRINGS_INTERNAL_CORD_DATA_EDGE_H_
#include <cassert>
#include <cstddef>
#include "absl/base/config.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_flat.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
inline bool IsDataEdge(const CordRep* edge) {
assert(edge != nullptr);
if (edge->tag == EXTERNAL || edge->tag >= FLAT) return true;
if (edge->tag == SUBSTRING) edge = edge->substring()->child;
return edge->tag == EXTERNAL || edge->tag >= FLAT;
}
inline absl::string_view EdgeData(const CordRep* edge) {
assert(IsDataEdge(edge));
size_t offset = 0;
const size_t length = edge->length;
if (edge->IsSubstring()) {
offset = edge->substring()->start;
edge = edge->substring()->child;
}
return edge->tag >= FLAT
? absl::string_view{edge->flat()->Data() + offset, length}
: absl::string_view{edge->external()->base + offset, length};
}
}
ABSL_NAMESPACE_END
}
#endif | #include "absl/strings/internal/cord_data_edge.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_test_util.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::absl::cordrep_testing::MakeExternal;
using ::absl::cordrep_testing::MakeFlat;
using ::absl::cordrep_testing::MakeSubstring;
TEST(CordDataEdgeTest, IsDataEdgeOnFlat) {
CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
EXPECT_TRUE(IsDataEdge(rep));
CordRep::Unref(rep);
}
TEST(CordDataEdgeTest, IsDataEdgeOnExternal) {
CordRep* rep = MakeExternal("Lorem ipsum dolor sit amet, consectetur ...");
EXPECT_TRUE(IsDataEdge(rep));
CordRep::Unref(rep);
}
TEST(CordDataEdgeTest, IsDataEdgeOnSubstringOfFlat) {
CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
CordRep* substr = MakeSubstring(1, 20, rep);
EXPECT_TRUE(IsDataEdge(substr));
CordRep::Unref(substr);
}
TEST(CordDataEdgeTest, IsDataEdgeOnSubstringOfExternal) {
CordRep* rep = MakeExternal("Lorem ipsum dolor sit amet, consectetur ...");
CordRep* substr = MakeSubstring(1, 20, rep);
EXPECT_TRUE(IsDataEdge(substr));
CordRep::Unref(substr);
}
TEST(CordDataEdgeTest, IsDataEdgeOnBtree) {
CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
CordRepBtree* tree = CordRepBtree::New(rep);
EXPECT_FALSE(IsDataEdge(tree));
CordRep::Unref(tree);
}
TEST(CordDataEdgeTest, IsDataEdgeOnBadSubstr) {
CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
CordRep* substr = MakeSubstring(1, 18, MakeSubstring(1, 20, rep));
EXPECT_FALSE(IsDataEdge(substr));
CordRep::Unref(substr);
}
TEST(CordDataEdgeTest, EdgeDataOnFlat) {
absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
CordRep* rep = MakeFlat(value);
EXPECT_EQ(EdgeData(rep), value);
CordRep::Unref(rep);
}
TEST(CordDataEdgeTest, EdgeDataOnExternal) {
absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
CordRep* rep = MakeExternal(value);
EXPECT_EQ(EdgeData(rep), value);
CordRep::Unref(rep);
}
TEST(CordDataEdgeTest, EdgeDataOnSubstringOfFlat) {
absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
CordRep* rep = MakeFlat(value);
CordRep* substr = MakeSubstring(1, 20, rep);
EXPECT_EQ(EdgeData(substr), value.substr(1, 20));
CordRep::Unref(substr);
}
TEST(CordDataEdgeTest, EdgeDataOnSubstringOfExternal) {
absl::string_view value = "Lorem ipsum dolor sit amet, consectetur ...";
CordRep* rep = MakeExternal(value);
CordRep* substr = MakeSubstring(1, 20, rep);
EXPECT_EQ(EdgeData(substr), value.substr(1, 20));
CordRep::Unref(substr);
}
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
TEST(CordDataEdgeTest, IsDataEdgeOnNullPtr) {
EXPECT_DEATH(IsDataEdge(nullptr), ".*");
}
TEST(CordDataEdgeTest, EdgeDataOnNullPtr) {
EXPECT_DEATH(EdgeData(nullptr), ".*");
}
TEST(CordDataEdgeTest, EdgeDataOnBtree) {
CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
CordRepBtree* tree = CordRepBtree::New(rep);
EXPECT_DEATH(EdgeData(tree), ".*");
CordRep::Unref(tree);
}
TEST(CordDataEdgeTest, EdgeDataOnBadSubstr) {
CordRep* rep = MakeFlat("Lorem ipsum dolor sit amet, consectetur ...");
CordRep* substr = MakeSubstring(1, 18, MakeSubstring(1, 20, rep));
EXPECT_DEATH(EdgeData(substr), ".*");
CordRep::Unref(substr);
}
#endif
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_data_edge.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_data_edge_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
5130d7d4-9f23-4265-be36-863bc3775da5 | cpp | tensorflow/tensorflow | api | tensorflow/lite/delegates/gpu/cl/api.cc | tensorflow/core/api_def/api_test.cc | #include "tensorflow/lite/delegates/gpu/cl/api.h"
#include <utility>
#ifndef CL_DELEGATE_NO_GL
#define CL_DELEGATE_ALLOW_GL
#endif
#include <algorithm>
#include <cstring>
#include <memory>
#include <variant>
#include <vector>
#include "absl/memory/memory.h"
#include "absl/types/span.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_command_queue.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_errors.h"
#include "tensorflow/lite/delegates/gpu/cl/cl_event.h"
#include "tensorflow/lite/delegates/gpu/cl/environment.h"
#include "tensorflow/lite/delegates/gpu/cl/inference_context.h"
#include "tensorflow/lite/delegates/gpu/cl/kernels/converter.h"
#include "tensorflow/lite/delegates/gpu/cl/opencl_wrapper.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor.h"
#include "tensorflow/lite/delegates/gpu/cl/tensor_type_util.h"
#include "tensorflow/lite/delegates/gpu/common/data_type.h"
#include "tensorflow/lite/delegates/gpu/common/precision.h"
#include "tensorflow/lite/delegates/gpu/common/shape.h"
#include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h"
#include "tensorflow/lite/delegates/gpu/common/tensor.h"
#include "tensorflow/lite/delegates/gpu/tflite_profile.h"
#ifdef CL_DELEGATE_ALLOW_GL
#include <EGL/eglext.h>
#include "tensorflow/lite/delegates/gpu/cl/egl_sync.h"
#include "tensorflow/lite/delegates/gpu/cl/gl_interop.h"
#endif
namespace tflite {
namespace gpu {
namespace cl {
namespace {
class NoopTensorTie : public TensorTie {
public:
NoopTensorTie(const TensorTieDef& def, TensorObject obj)
: TensorTie(def), obj_(obj) {}
static bool IsSupported(const TensorTieDef& def) {
return def.external_def == def.internal_def;
}
absl::Status SetExternalObject(TensorObject obj) final {
if (!def().external_def.object_def.user_provided) {
return absl::InvalidArgumentError("Tensor object is readonly.");
}
if (!IsValid(def().external_def, obj)) {
return absl::InvalidArgumentError("Given object is not valid");
}
obj_ = obj;
return absl::OkStatus();
}
TensorObject GetExternalObject() final { return obj_; }
absl::Status CopyToExternalObject() final { return absl::OkStatus(); }
absl::Status CopyFromExternalObject() final { return absl::OkStatus(); }
private:
TensorObject obj_;
};
class DefaultTensorTie : public TensorTie {
public:
DefaultTensorTie(const TensorTieDef& def, TensorObject internal_obj)
: TensorTie(def), internal_obj_(internal_obj) {}
static bool IsSupported(
const TensorTieDef& def,
const TensorObjectConverterBuilder& converter_builder) {
auto object_type = def.external_def.object_def.object_type;
#ifdef CL_DELEGATE_ALLOW_GL
if (def.external_def.object_def.user_provided &&
GlClBufferCopier::IsSupported(def.external_def.object_def,
def.internal_def.object_def)) {
return true;
}
#endif
return (object_type == ObjectType::OPENCL_BUFFER ||
object_type == ObjectType::OPENCL_TEXTURE ||
object_type == ObjectType::CPU_MEMORY) &&
converter_builder.IsSupported(def.internal_def, def.external_def) &&
converter_builder.IsSupported(def.external_def, def.internal_def);
}
static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
TensorObjectConverterBuilder* converter_builder,
Environment* env, std::unique_ptr<TensorTie>* tie) {
auto tie_impl = std::make_unique<DefaultTensorTie>(def, internal_object);
RETURN_IF_ERROR(tie_impl->Init(converter_builder, env));
*tie = std::move(tie_impl);
return absl::OkStatus();
}
absl::Status CopyToExternalObject() final {
if (!converter_to_) {
return absl::UnavailableError("Conversion is not available");
}
return converter_to_->Convert(internal_obj_, GetExternalObject());
}
absl::Status CopyFromExternalObject() final {
if (!converter_from_) {
return absl::UnavailableError("Conversion is not available");
}
return converter_from_->Convert(GetExternalObject(), internal_obj_);
}
absl::Status SetExternalObject(TensorObject obj) final {
if (!def().external_def.object_def.user_provided) {
return absl::InvalidArgumentError("External object is read-only");
}
if (!IsValid(def().external_def, obj)) {
return absl::InvalidArgumentError("Given object is not valid");
}
external_obj_ = obj;
return absl::OkStatus();
}
TensorObject GetExternalObject() final { return external_obj_; }
private:
absl::Status Init(TensorObjectConverterBuilder* converter_builder,
Environment* env) {
#ifdef CL_DELEGATE_ALLOW_GL
if (def().external_def.object_def.user_provided &&
GlClBufferCopier::IsSupported(def().external_def.object_def,
def().internal_def.object_def)) {
converter_from_ = std::make_unique<GlClBufferCopier>(
def().internal_def, def().external_def, env);
} else {
RETURN_IF_ERROR(converter_builder->MakeConverter(
def().external_def, def().internal_def, &converter_from_));
}
if (def().external_def.object_def.user_provided &&
GlClBufferCopier::IsSupported(def().internal_def.object_def,
def().external_def.object_def)) {
converter_to_ = std::make_unique<GlClBufferCopier>(
def().internal_def, def().external_def, env);
} else {
RETURN_IF_ERROR(converter_builder->MakeConverter(
def().internal_def, def().external_def, &converter_to_));
}
#else
RETURN_IF_ERROR(converter_builder->MakeConverter(
def().external_def, def().internal_def, &converter_from_));
RETURN_IF_ERROR(converter_builder->MakeConverter(
def().internal_def, def().external_def, &converter_to_));
#endif
return MaybeAllocateExternalObject(env);
}
absl::Status MaybeAllocateExternalObject(Environment* env) {
const TensorObjectDef& d = def().external_def;
if (d.object_def.user_provided) {
return absl::OkStatus();
}
switch (d.object_def.object_type) {
case ObjectType::CPU_MEMORY: {
size_t bytes_size = NumElements(d) * SizeOf(d.object_def.data_type);
cpu_memory_.resize(bytes_size);
external_obj_ = CpuMemory{cpu_memory_.data(), cpu_memory_.size()};
break;
}
case ObjectType::OPENCL_TEXTURE:
case ObjectType::OPENCL_BUFFER: {
auto& dims = d.dimensions;
const BHWC shape(dims.b, dims.h, dims.w, dims.c);
TensorStorageType storage_type = ToTensorStorageType(
d.object_def.object_type, d.object_def.data_layout);
TensorDescriptor desc = CreateBhwcTensorDescriptor(
d.object_def.data_type, storage_type, shape);
RETURN_IF_ERROR(
AllocateTensorMemory(env->context(), desc, &cl_memory_));
if (d.object_def.object_type == ObjectType::OPENCL_TEXTURE) {
external_obj_ = OpenClTexture{cl_memory_.memory()};
} else {
external_obj_ = OpenClBuffer{cl_memory_.memory()};
}
break;
}
default:
return absl::InternalError("Unexpected object type");
}
return absl::OkStatus();
}
const TensorObject internal_obj_;
TensorObject external_obj_;
CLMemory cl_memory_;
std::vector<uint8_t> cpu_memory_;
std::unique_ptr<TensorObjectConverter> converter_to_;
std::unique_ptr<TensorObjectConverter> converter_from_;
};
class TwoStepTensorTie : public TensorTie {
public:
explicit TwoStepTensorTie(const TensorTieDef& def) : TensorTie(def) {}
static bool IsSupported(
const TensorTieDef& def,
const TensorObjectConverterBuilder& converter_builder) {
auto defs = MakeOuterInnerDefs(def);
return DefaultTensorTie::IsSupported(defs.first, converter_builder) &&
DefaultTensorTie::IsSupported(defs.second, converter_builder);
}
static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
TensorObjectConverterBuilder* converter_builder,
Environment* env, std::unique_ptr<TensorTie>* tie) {
auto tie_impl = std::make_unique<TwoStepTensorTie>(def);
RETURN_IF_ERROR(tie_impl->Init(internal_object, converter_builder, env));
*tie = std::move(tie_impl);
return absl::OkStatus();
}
absl::Status CopyToExternalObject() final {
RETURN_IF_ERROR(inner_tie_->CopyToExternalObject());
return outer_tie_->CopyToExternalObject();
}
absl::Status CopyFromExternalObject() final {
RETURN_IF_ERROR(outer_tie_->CopyFromExternalObject());
return inner_tie_->CopyFromExternalObject();
}
absl::Status SetExternalObject(TensorObject obj) final {
return outer_tie_->SetExternalObject(obj);
}
TensorObject GetExternalObject() final {
return outer_tie_->GetExternalObject();
}
private:
static std::pair<TensorTieDef, TensorTieDef> MakeOuterInnerDefs(
const TensorTieDef& def) {
TensorTieDef outer_def;
outer_def.external_def = def.external_def;
outer_def.internal_def = def.external_def;
outer_def.internal_def.object_def.object_type = ObjectType::OPENCL_BUFFER;
outer_def.internal_def.object_def.user_provided = true;
TensorTieDef inner_def;
inner_def.external_def = outer_def.internal_def;
inner_def.external_def.object_def.user_provided = false;
inner_def.internal_def = def.internal_def;
return std::make_pair(outer_def, inner_def);
}
absl::Status Init(TensorObject internal_object,
TensorObjectConverterBuilder* converter_builder,
Environment* env) {
auto defs = MakeOuterInnerDefs(def());
RETURN_IF_ERROR(DefaultTensorTie::New(defs.second, internal_object,
converter_builder, env, &inner_tie_));
return DefaultTensorTie::New(defs.first, inner_tie_->GetExternalObject(),
converter_builder, env, &outer_tie_);
}
std::unique_ptr<TensorTie> inner_tie_;
std::unique_ptr<TensorTie> outer_tie_;
};
#ifdef CL_DELEGATE_ALLOW_GL
class GlBufferHolder : public TensorTie {
public:
GlBufferHolder(const TensorTieDef& def, GlInteropFabric* gl_interop_fabric,
Environment* env)
: TensorTie(def),
gl_interop_fabric_(gl_interop_fabric),
environment_(env) {}
static bool IsSupported(
const TensorTieDef& def,
const TensorObjectConverterBuilder& converter_builder) {
if (!def.external_def.object_def.user_provided ||
def.external_def.object_def.object_type != ObjectType::OPENGL_SSBO) {
return false;
}
return DefaultTensorTie::IsSupported(MakeClDef(def), converter_builder);
}
static absl::Status New(const TensorTieDef& def, TensorObject internal_object,
TensorObjectConverterBuilder* converter_builder,
GlInteropFabric* gl_interop_fabric, Environment* env,
std::unique_ptr<TensorTie>* tie) {
auto tie_impl =
std::make_unique<GlBufferHolder>(def, gl_interop_fabric, env);
RETURN_IF_ERROR(DefaultTensorTie::New(MakeClDef(def), internal_object,
converter_builder, env,
&tie_impl->tie_));
*tie = std::move(tie_impl);
return absl::OkStatus();
}
absl::Status SetExternalObject(TensorObject obj) final {
auto ssbo = std::get_if<OpenGlBuffer>(&obj);
if (!ssbo) {
return absl::InvalidArgumentError("Missing OpenGL SSBO");
}
auto old_ssbo = std::get_if<OpenGlBuffer>(&external_obj_);
if (old_ssbo && ssbo->id == old_ssbo->id) {
return absl::OkStatus();
}
if (cl_object_.memory()) {
gl_interop_fabric_->UnregisterMemory(cl_object_.memory());
}
RETURN_IF_ERROR(CreateClMemoryFromGlBuffer(
ssbo->id, def().access_type, &environment_->context(), &cl_object_));
external_obj_ = obj;
RETURN_IF_ERROR(tie_->SetExternalObject(OpenClBuffer{cl_object_.memory()}));
gl_interop_fabric_->RegisterMemory(cl_object_.memory());
return absl::OkStatus();
}
TensorObject GetExternalObject() final { return external_obj_; }
absl::Status CopyFromExternalObject() final {
return tie_->CopyFromExternalObject();
}
absl::Status CopyToExternalObject() final {
return tie_->CopyToExternalObject();
}
private:
static TensorTieDef MakeClDef(const TensorTieDef& def) {
auto cl_def = def;
cl_def.external_def.object_def.object_type = ObjectType::OPENCL_BUFFER;
cl_def.external_def.object_def.user_provided = true;
return cl_def;
}
CLMemory cl_object_;
GlInteropFabric* gl_interop_fabric_;
Environment* environment_;
std::unique_ptr<TensorTie> tie_;
TensorObject external_obj_;
};
#endif
TensorObject TensorToObj(const Tensor& tensor) {
if (tensor.GetStorageType() == TensorStorageType::BUFFER) {
return OpenClBuffer{tensor.GetMemoryPtr()};
}
if (tensor.GetStorageType() == TensorStorageType::IMAGE_BUFFER) {
return OpenClBuffer{tensor.GetMemoryPtrForWriting()};
}
return OpenClTexture{tensor.GetMemoryPtr()};
}
class TensorTieFactory {
public:
TensorTieFactory(Environment* env, InferenceContext* context
#ifdef CL_DELEGATE_ALLOW_GL
,
GlInteropFabric* gl_interop_fabric
#endif
)
: env_(*env),
context_(*context),
#ifdef CL_DELEGATE_ALLOW_GL
gl_interop_fabric_(gl_interop_fabric),
#endif
converter_builder_(NewConverterBuilder(env)) {
}
bool IsSupported(const TensorTieDef& def) const {
return IsValid(def.external_def.object_def) &&
(NoopTensorTie::IsSupported(def) ||
DefaultTensorTie::IsSupported(def, *converter_builder_) ||
#ifdef CL_DELEGATE_ALLOW_GL
(gl_interop_fabric_ &&
GlBufferHolder::IsSupported(def, *converter_builder_)) ||
#endif
TwoStepTensorTie::IsSupported(def, *converter_builder_));
}
absl::Status NewTensorTie(const TensorTieDef& def,
std::unique_ptr<TensorTie>* tie) {
TensorObject internal_object = TensorToObj(*context_.GetTensor(def.id));
auto converter = converter_builder_.get();
if (NoopTensorTie::IsSupported(def)) {
*tie = std::make_unique<NoopTensorTie>(def, internal_object);
return absl::OkStatus();
}
if (DefaultTensorTie::IsSupported(def, *converter)) {
return DefaultTensorTie::New(def, internal_object, converter, &env_, tie);
}
#ifdef CL_DELEGATE_ALLOW_GL
if (gl_interop_fabric_ && GlBufferHolder::IsSupported(def, *converter)) {
return GlBufferHolder::New(def, internal_object, converter,
gl_interop_fabric_, &env_, tie);
}
#endif
if (TwoStepTensorTie::IsSupported(def, *converter)) {
return TwoStepTensorTie::New(def, internal_object, converter, &env_, tie);
}
return absl::UnimplementedError("Unsupported tensor tie definition.");
}
private:
Environment& env_;
InferenceContext& context_;
#ifdef CL_DELEGATE_ALLOW_GL
GlInteropFabric* gl_interop_fabric_;
#endif
std::unique_ptr<TensorObjectConverterBuilder> converter_builder_;
};
class InferenceRunnerImpl : public CLInferenceRunner {
public:
InferenceRunnerImpl(Environment* environment,
std::unique_ptr<InferenceContext> context
#ifdef CL_DELEGATE_ALLOW_GL
,
std::unique_ptr<GlInteropFabric> gl_interop_fabric
#endif
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
,
int gpu_invoke_loop_times
#endif
)
: queue_(environment->queue()),
profiling_queue_(environment->profiling_queue()),
context_(std::move(context))
#ifdef CL_DELEGATE_ALLOW_GL
,
gl_interop_fabric_(std::move(gl_interop_fabric))
#endif
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
,
gpu_invoke_loop_times_(gpu_invoke_loop_times)
#endif
{
}
absl::Status Initialize(const std::vector<TensorTieDef>& inputs,
const std::vector<TensorTieDef>& outputs,
TensorTieFactory* factory) {
RETURN_IF_ERROR(LinkTensors(inputs, factory, &inputs_));
return LinkTensors(outputs, factory, &outputs_);
}
std::vector<TensorObjectDef> inputs() const override {
return GetExternalDefinitions(inputs_);
}
std::vector<TensorObjectDef> outputs() const override {
return GetExternalDefinitions(outputs_);
}
absl::Status GetInputObject(int index, TensorObject* object) override {
if (index < 0 || index >= inputs_.size()) {
return absl::OutOfRangeError("Index is out of range");
}
*object = inputs_[index]->GetExternalObject();
return absl::OkStatus();
}
absl::Status GetOutputObject(int index, TensorObject* object) override {
if (index < 0 || index >= outputs_.size()) {
return absl::OutOfRangeError("Index is out of range");
}
*object = outputs_[index]->GetExternalObject();
return absl::OkStatus();
}
absl::Status SetInputObject(int index, TensorObject object) override {
if (index < 0 || index >= inputs_.size()) {
return absl::OutOfRangeError("Input index is out of range");
}
return inputs_[index]->SetExternalObject(object);
}
absl::Status SetOutputObject(int index, TensorObject object) override {
if (index < 0 || index >= outputs_.size()) {
return absl::OutOfRangeError("Output index is out of range");
}
return outputs_[index]->SetExternalObject(object);
}
absl::Status CopyFromExternalInput(int index) override {
if (index > inputs_.size()) {
return absl::NotFoundError(
absl::StrCat("Input id ", index, " is an invalid input index."));
}
return inputs_[index]->CopyFromExternalObject();
}
absl::Status CopyToExternalOutput(int index) override {
if (index > outputs_.size()) {
return absl::NotFoundError(
absl::StrCat("Output id ", index, " is an invalid output index"));
}
return outputs_[index]->CopyToExternalObject();
}
absl::Status Run() override {
#ifdef CL_DELEGATE_ALLOW_GL
if (gl_interop_fabric_) {
RETURN_IF_ERROR(gl_interop_fabric_->Start());
}
#endif
for (const auto& input : inputs_) {
RETURN_IF_ERROR(input->CopyFromExternalObject());
}
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
if (gpu_invoke_loop_times_ <= 0) {
return absl::InvalidArgumentError(
"gpu_invoke_loop_times must be positive");
}
for (int i = 0; i < gpu_invoke_loop_times_; i++) {
RETURN_IF_ERROR(RunWithoutExternalBufferCopy());
}
#else
RETURN_IF_ERROR(RunWithoutExternalBufferCopy());
#endif
bool has_async_copies = false;
for (const auto& output : outputs_) {
RETURN_IF_ERROR(output->CopyToExternalObject());
if (output->def().external_def.object_def.object_type ==
ObjectType::CPU_MEMORY) {
has_async_copies = true;
}
}
#ifdef CL_DELEGATE_ALLOW_GL
if (gl_interop_fabric_) {
RETURN_IF_ERROR(gl_interop_fabric_->Finish());
}
#endif
if (has_async_copies) {
RETURN_IF_ERROR(queue_->WaitForCompletion());
}
return absl::OkStatus();
}
absl::Status RunWithoutExternalBufferCopy() override {
if (IsTfLiteProfilerActive()) {
ProfilingInfo profiling_info;
RETURN_IF_ERROR(context_->Profile(profiling_queue_, &profiling_info));
AddTfLiteProfilerEvents(&profiling_info);
}
RETURN_IF_ERROR(context_->AddToQueue(queue_));
context_->FlushQueue(queue_);
return absl::OkStatus();
}
private:
static absl::Status LinkTensors(
const std::vector<TensorTieDef>& defs, TensorTieFactory* factory,
std::vector<std::unique_ptr<TensorTie>>* objects) {
objects->reserve(defs.size());
for (auto& def : defs) {
std::unique_ptr<TensorTie> object;
RETURN_IF_ERROR(factory->NewTensorTie(def, &object));
objects->push_back(std::move(object));
}
return absl::OkStatus();
}
static std::vector<TensorObjectDef> GetExternalDefinitions(
const std::vector<std::unique_ptr<TensorTie>>& objects) {
std::vector<TensorObjectDef> defs;
defs.reserve(objects.size());
for (auto& obj : objects) {
defs.push_back(obj->def().external_def);
}
return defs;
}
CLCommandQueue* queue_;
ProfilingCommandQueue* profiling_queue_;
std::unique_ptr<InferenceContext> context_;
#ifdef CL_DELEGATE_ALLOW_GL
std::unique_ptr<GlInteropFabric> gl_interop_fabric_;
#endif
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
int gpu_invoke_loop_times_;
#endif
std::vector<std::unique_ptr<TensorTie>> inputs_;
std::vector<std::unique_ptr<TensorTie>> outputs_;
};
TensorObjectDef TensorToDef(const Tensor& tensor) {
TensorObjectDef def;
def.dimensions.b = tensor.Batch();
def.dimensions.h = tensor.Height();
def.dimensions.w = tensor.Width();
def.dimensions.c = tensor.Channels();
def.object_def.data_layout = ToDataLayout(tensor.GetStorageType());
def.object_def.data_type = tensor.GetDataType();
def.object_def.object_type = ToObjectType(tensor.GetStorageType());
def.object_def.user_provided = false;
return def;
}
CalculationsPrecision GetPrecision(const Environment& env,
const InferenceOptions& options) {
CalculationsPrecision precision;
switch (GetPosition(options, InferencePriority::MAX_PRECISION)) {
case 1:
precision = CalculationsPrecision::F32;
break;
case 2:
precision = CalculationsPrecision::F32_F16;
break;
case 3:
precision = CalculationsPrecision::F16;
break;
default:
precision = CalculationsPrecision::F16;
break;
}
if (!env.IsSupported(precision)) {
precision = CalculationsPrecision::F32_F16;
if (!env.IsSupported(precision)) {
precision = CalculationsPrecision::F32;
}
}
return precision;
}
TensorStorageType GetStorageTypeFromOptions(const Environment& env,
const InferenceOptions& options) {
std::vector<TensorStorageType> preferred_storage_types;
if (GetRelativeImportance(options, InferencePriority::MIN_LATENCY,
InferencePriority::MIN_MEMORY_USAGE) ==
PriorityImportance::HIGHER) {
preferred_storage_types = {GetFastestStorageType(env.device().GetInfo()),
TensorStorageType::BUFFER};
} else {
preferred_storage_types = {
GetStorageTypeWithMinimalMemoryConsumption(env.device().GetInfo()),
TensorStorageType::BUFFER};
}
for (TensorStorageType storage_type : preferred_storage_types) {
if (env.IsSupported(storage_type)) {
return storage_type;
}
}
return TensorStorageType::UNKNOWN;
}
CreateGpuModelInfo GetCreateInfo(const Environment& environment,
const InferenceOptions& options) {
CreateGpuModelInfo create_info;
create_info.precision = GetPrecision(environment, options);
create_info.storage_type = GetStorageTypeFromOptions(environment, options);
if (options.usage == InferenceUsage::FAST_SINGLE_ANSWER) {
create_info.hints.Add(ModelHints::kReduceKernelsCount);
create_info.hints.Add(ModelHints::kFastTuning);
} else if (options.usage == InferenceUsage::BALANCED) {
create_info.hints.Add(ModelHints::kReduceKernelsCount);
} else if (options.usage == InferenceUsage::SUSTAINED_SPEED) {
create_info.hints.Add(ModelHints::kAllowSpecialKernels);
}
if (GetRelativeImportance(options, InferencePriority::MIN_MEMORY_USAGE,
InferencePriority::MIN_LATENCY) ==
PriorityImportance::HIGHER) {
create_info.hints.Add(ModelHints::kNoWinogradOptimizations);
create_info.hints.Add(ModelHints::kReuseConvWeights);
}
return create_info;
}
class InferenceBuilderImpl : public InferenceBuilder {
public:
explicit InferenceBuilderImpl(Environment* environment)
: environment_(environment) {}
absl::Status Initialize(const InferenceOptions& options,
const InferenceEnvironmentOptions& env_options,
const GraphFloat32& graph) {
context_ = std::make_unique<InferenceContext>();
CreateGpuModelInfo create_info = GetCreateInfo(*environment_, options);
RETURN_IF_ERROR(context_->InitFromGraph(create_info, graph, environment_));
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
gpu_invoke_loop_times_ = options.gpu_invoke_loop_times;
#endif
#ifdef CL_DELEGATE_ALLOW_GL
if (env_options.IsGlAware() &&
IsGlSharingSupported(environment_->device())) {
gl_interop_fabric_ = std::make_unique<GlInteropFabric>(
env_options.egl_display, environment_);
}
tie_factory_ = std::make_unique<TensorTieFactory>(
environment_, context_.get(), gl_interop_fabric_.get());
#else
tie_factory_ =
std::make_unique<TensorTieFactory>(environment_, context_.get());
#endif
inputs_ = LinkTensors(context_->GetInputIds(), AccessType::READ);
outputs_ = LinkTensors(context_->GetOutputIds(), AccessType::WRITE);
return absl::OkStatus();
}
absl::Status Initialize(const InferenceEnvironmentOptions& env_options,
const absl::Span<const uint8_t> serialized_model) {
context_ = std::make_unique<InferenceContext>();
RETURN_IF_ERROR(
context_->RestoreDeserialized(serialized_model, environment_));
#ifdef CL_DELEGATE_ALLOW_GL
if (env_options.IsGlAware() &&
IsGlSharingSupported(environment_->device())) {
gl_interop_fabric_ = std::make_unique<GlInteropFabric>(
env_options.egl_display, environment_);
}
tie_factory_ = std::make_unique<TensorTieFactory>(
environment_, context_.get(), gl_interop_fabric_.get());
#else
tie_factory_ =
std::make_unique<TensorTieFactory>(environment_, context_.get());
#endif
inputs_ = LinkTensors(context_->GetInputIds(), AccessType::READ);
outputs_ = LinkTensors(context_->GetOutputIds(), AccessType::WRITE);
return absl::OkStatus();
}
std::vector<TensorObjectDef> inputs() const override {
return GetExternalDefinitions(inputs_);
}
std::vector<TensorObjectDef> outputs() const override {
return GetExternalDefinitions(outputs_);
}
absl::Status SetInputShape(int index, const Dimensions& dimensions) override {
if (index < 0 || index >= inputs_.size()) {
return absl::OutOfRangeError("Index is out of range");
}
return absl::UnimplementedError("Changing input shapes is not supported");
}
absl::Status SetInputObjectDef(int index, ObjectDef new_def) override {
if (index < 0 || index >= inputs_.size()) {
return absl::OutOfRangeError("Input index is out of range");
}
auto def = inputs_[index];
def.external_def.object_def = new_def;
if (!tie_factory_->IsSupported(def)) {
return absl::InvalidArgumentError(
"New input object definition is not supported.");
}
inputs_[index] = def;
return absl::OkStatus();
}
absl::Status SetOutputObjectDef(int index, ObjectDef new_def) override {
if (index < 0 || index >= outputs_.size()) {
return absl::OutOfRangeError("Output index is out of range");
}
auto def = outputs_[index];
def.external_def.object_def = new_def;
if (!tie_factory_->IsSupported(def)) {
return absl::InvalidArgumentError(
"New output object definition is not supported.");
}
outputs_[index] = def;
return absl::OkStatus();
}
absl::Status Build(std::unique_ptr<InferenceRunner>* runner) override {
#ifdef CL_DELEGATE_ALLOW_GL
if (gl_interop_fabric_ && !HasGlObjects()) {
gl_interop_fabric_.reset(nullptr);
}
auto runner_impl = std::make_unique<InferenceRunnerImpl>(
environment_, std::move(context_), std::move(gl_interop_fabric_)
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
,
gpu_invoke_loop_times_
#endif
);
#else
auto runner_impl =
std::make_unique<InferenceRunnerImpl>(environment_, std::move(context_)
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
,
gpu_invoke_loop_times_
#endif
);
#endif
RETURN_IF_ERROR(
runner_impl->Initialize(inputs_, outputs_, tie_factory_.get()));
*runner = std::move(runner_impl);
return absl::OkStatus();
}
private:
std::vector<TensorTieDef> LinkTensors(const std::vector<ValueId>& ids,
AccessType access) {
std::vector<TensorTieDef> links;
links.reserve(ids.size());
for (const auto& id : ids) {
TensorObjectDef def = TensorToDef(*context_->GetTensor(id));
links.push_back({id, access, def, def});
}
return links;
}
bool HasGlObjects() const {
#ifdef CL_DELEGATE_ALLOW_GL
auto is_gl = [](ObjectType t) {
return t == ObjectType::OPENGL_SSBO || t == ObjectType::OPENGL_TEXTURE;
};
for (const TensorTieDef& def : inputs_) {
if (is_gl(def.external_def.object_def.object_type)) {
return true;
}
}
for (const TensorTieDef& def : outputs_) {
if (is_gl(def.external_def.object_def.object_type)) {
return true;
}
}
#endif
return false;
}
static std::vector<TensorObjectDef> GetExternalDefinitions(
const std::vector<TensorTieDef>& links) {
std::vector<TensorObjectDef> defs;
defs.reserve(links.size());
for (auto& desc : links) {
defs.push_back(desc.external_def);
}
return defs;
}
std::unique_ptr<InferenceContext> context_;
#ifdef CL_DELEGATE_ALLOW_GL
std::unique_ptr<GlInteropFabric> gl_interop_fabric_;
#endif
#ifdef TFLITE_GPU_ENABLE_INVOKE_LOOP
int gpu_invoke_loop_times_;
#endif
Environment* environment_;
std::vector<TensorTieDef> inputs_;
std::vector<TensorTieDef> outputs_;
std::unique_ptr<TensorTieFactory> tie_factory_;
};
class InferenceEnvironmentImpl : public InferenceEnvironment {
public:
explicit InferenceEnvironmentImpl(const InferenceEnvironmentOptions& options)
: options_(options) {}
absl::Status Init() {
RETURN_IF_ERROR(LoadOpenCL());
properties_.is_opencl_available = true;
CLDevice device;
if (options_.device) {
cl_platform_id platform;
RETURN_IF_ERROR(GetDeviceInfo<cl_platform_id>(
options_.device, CL_DEVICE_PLATFORM, &platform));
device = CLDevice(options_.device, platform);
} else {
RETURN_IF_ERROR(CreateDefaultGPUDevice(&device));
}
#ifdef CL_DELEGATE_ALLOW_GL
properties_.is_gl_sharing_supported = IsGlSharingSupported(device);
properties_.is_gl_to_cl_fast_sync_supported =
IsClEventFromEglSyncSupported(device);
properties_.is_cl_to_gl_fast_sync_supported =
IsEglSyncFromClEventSupported();
#endif
CLContext context;
if (options_.context) {
#ifdef CL_DELEGATE_ALLOW_GL
if (options_.IsGlAware()) {
return absl::InvalidArgumentError(
"OpenCL context and EGL parameters are set in the same time.");
}
#endif
context =
CLContext(options_.context, false, device);
} else {
#ifdef CL_DELEGATE_ALLOW_GL
if (options_.IsGlAware() && properties_.is_gl_sharing_supported) {
RETURN_IF_ERROR(CreateCLGLContext(
device,
reinterpret_cast<cl_context_properties>(options_.egl_context),
reinterpret_cast<cl_context_properties>(options_.egl_display),
&context));
} else {
RETURN_IF_ERROR(CreateCLContext(device, &context));
}
#else
RETURN_IF_ERROR(CreateCLContext(device, &context));
#endif
}
CLCommandQueue queue;
if (options_.command_queue) {
queue =
CLCommandQueue(options_.command_queue, false);
} else {
RETURN_IF_ERROR(CreateCLCommandQueue(device, context, &queue));
}
ProfilingCommandQueue profiling_queue;
RETURN_IF_ERROR(
CreateProfilingCommandQueue(device, context, &profiling_queue));
environment_ = Environment(std::move(device), std::move(context),
std::move(queue), std::move(profiling_queue));
return environment_.Init();
}
absl::Status BuildSerializedModel(
const InferenceOptions& options, GraphFloat32 model,
std::vector<uint8_t>* serialized_model) final {
if (!IsValid(options)) {
return absl::InvalidArgumentError("InferenceOptions are invalid.");
}
InferenceOptions resolved_options = options;
ResolveAutoPriority(&resolved_options);
if (environment_.program_cache() &&
!options_.serialized_binary_cache.empty()) {
environment_.program_cache()
->AddSerializedCache(environment_.context(), environment_.device(),
options_.serialized_binary_cache)
.IgnoreError();
}
RETURN_IF_ERROR(RunGraphTransformsForGpuModel(&model));
InferenceContext context;
CreateGpuModelInfo create_info = GetCreateInfo(environment_, options);
RETURN_IF_ERROR(context.InitFromGraph(create_info, model, &environment_,
serialized_model));
return absl::OkStatus();
}
absl::Status NewInferenceBuilder(
const InferenceOptions& options, GraphFloat32 model,
std::unique_ptr<InferenceBuilder>* builder) final {
if (!IsValid(options)) {
return absl::InvalidArgumentError("InferenceOptions are invalid.");
}
InferenceOptions resolved_options = options;
ResolveAutoPriority(&resolved_options);
if (environment_.program_cache() &&
!options_.serialized_binary_cache.empty()) {
environment_.program_cache()
->AddSerializedCache(environment_.context(), environment_.device(),
options_.serialized_binary_cache)
.IgnoreError();
}
RETURN_IF_ERROR(RunGraphTransformsForGpuModel(&model));
auto builder_impl = std::make_unique<InferenceBuilderImpl>(&environment_);
RETURN_IF_ERROR(
builder_impl->Initialize(resolved_options, options_, model));
*builder = std::move(builder_impl);
return absl::OkStatus();
}
absl::Status NewInferenceBuilder(
const absl::Span<const uint8_t> serialized_model,
std::unique_ptr<InferenceBuilder>* builder) final {
if (environment_.program_cache() &&
!options_.serialized_binary_cache.empty()) {
environment_.program_cache()
->AddSerializedCache(environment_.context(), environment_.device(),
options_.serialized_binary_cache)
.IgnoreError();
}
auto builder_impl = std::make_unique<InferenceBuilderImpl>(&environment_);
RETURN_IF_ERROR(builder_impl->Initialize(options_, serialized_model));
*builder = std::move(builder_impl);
return absl::OkStatus();
}
std::vector<uint8_t> GetSerializedBinaryCache() const final {
std::vector<uint8_t> data;
environment_.program_cache()
->GetSerializedCache(environment_.device(), &data)
.IgnoreError();
return data;
}
const InferenceEnvironmentProperties& properties() const {
return properties_;
}
private:
const InferenceEnvironmentOptions options_;
Environment environment_;
InferenceEnvironmentProperties properties_;
};
}
absl::Status NewInferenceEnvironment(
const InferenceEnvironmentOptions& options,
std::unique_ptr<InferenceEnvironment>* environment,
InferenceEnvironmentProperties* properties) {
auto env_impl = std::make_unique<InferenceEnvironmentImpl>(options);
absl::Status status = env_impl->Init();
if (properties) {
*properties = env_impl->properties();
}
RETURN_IF_ERROR(status);
*environment = std::move(env_impl);
return absl::OkStatus();
}
}
}
} | #include <ctype.h>
#include <algorithm>
#include <string>
#include <unordered_map>
#include <vector>
#include "tensorflow/core/api_def/excluded_ops.h"
#include "tensorflow/core/framework/api_def.pb.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_def.pb.h"
#include "tensorflow/core/framework/op_gen_lib.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/io/path.h"
#include "tensorflow/core/lib/strings/stringprintf.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/protobuf.h"
#include "tensorflow/core/platform/resource_loader.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
namespace tensorflow {
namespace {
constexpr char kApiDefFilePattern[] = "api_def_*.pbtxt";
string DefaultApiDefDir() {
return GetDataDependencyFilepath(
io::JoinPath("tensorflow", "core", "api_def", "base_api"));
}
string PythonApiDefDir() {
return GetDataDependencyFilepath(
io::JoinPath("tensorflow", "core", "api_def", "python_api"));
}
void GetGoldenApiDefs(Env* env, const string& api_files_dir,
std::unordered_map<string, ApiDef>* name_to_api_def) {
std::vector<string> matching_paths;
TF_CHECK_OK(env->GetMatchingPaths(
io::JoinPath(api_files_dir, kApiDefFilePattern), &matching_paths));
for (auto& file_path : matching_paths) {
string file_contents;
TF_CHECK_OK(ReadFileToString(env, file_path, &file_contents));
file_contents = PBTxtFromMultiline(file_contents);
ApiDefs api_defs;
QCHECK(tensorflow::protobuf::TextFormat::ParseFromString(file_contents,
&api_defs))
<< "Failed to load " << file_path;
CHECK_EQ(api_defs.op_size(), 1);
(*name_to_api_def)[api_defs.op(0).graph_op_name()] = api_defs.op(0);
}
}
void TestAllApiDefsHaveCorrespondingOp(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
std::unordered_set<string> op_names;
for (const auto& op : ops.op()) {
op_names.insert(op.name());
}
for (const auto& name_and_api_def : api_defs_map) {
ASSERT_TRUE(op_names.find(name_and_api_def.first) != op_names.end())
<< name_and_api_def.first << " op has ApiDef but missing from ops. "
<< "Does api_def_" << name_and_api_def.first << " need to be deleted?";
}
}
void TestAllApiDefInputArgsAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_arg : api_def.in_arg()) {
bool found_arg = false;
for (const auto& op_arg : op.input_arg()) {
if (api_def_arg.name() == op_arg.name()) {
found_arg = true;
break;
}
}
ASSERT_TRUE(found_arg)
<< "Input argument " << api_def_arg.name()
<< " (overwritten in api_def_" << op.name()
<< ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestAllApiDefOutputArgsAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_arg : api_def.out_arg()) {
bool found_arg = false;
for (const auto& op_arg : op.output_arg()) {
if (api_def_arg.name() == op_arg.name()) {
found_arg = true;
break;
}
}
ASSERT_TRUE(found_arg)
<< "Output argument " << api_def_arg.name()
<< " (overwritten in api_def_" << op.name()
<< ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestAllApiDefAttributeNamesAreValid(
const OpList& ops, const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& op : ops.op()) {
const auto api_def_iter = api_defs_map.find(op.name());
if (api_def_iter == api_defs_map.end()) {
continue;
}
const auto& api_def = api_def_iter->second;
for (const auto& api_def_attr : api_def.attr()) {
bool found_attr = false;
for (const auto& op_attr : op.attr()) {
if (api_def_attr.name() == op_attr.name()) {
found_attr = true;
}
}
ASSERT_TRUE(found_attr)
<< "Attribute " << api_def_attr.name() << " (overwritten in api_def_"
<< op.name() << ".pbtxt) is not defined in OpDef for " << op.name();
}
}
}
void TestDeprecatedAttributesSetCorrectly(
const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& name_and_api_def : api_defs_map) {
int num_deprecated_endpoints = 0;
const auto& api_def = name_and_api_def.second;
for (const auto& endpoint : api_def.endpoint()) {
if (endpoint.deprecated()) {
++num_deprecated_endpoints;
}
}
const auto& name = name_and_api_def.first;
ASSERT_TRUE(api_def.deprecation_message().empty() ||
num_deprecated_endpoints == 0)
<< "Endpoints are set to 'deprecated' for deprecated op " << name
<< ". If an op is deprecated (i.e. deprecation_message is set), "
<< "all the endpoints are deprecated implicitly and 'deprecated' "
<< "field should not be set.";
if (num_deprecated_endpoints > 0) {
ASSERT_NE(num_deprecated_endpoints, api_def.endpoint_size())
<< "All " << name << " endpoints are deprecated. Please, set "
<< "deprecation_message in api_def_" << name << ".pbtxt instead. "
<< "to indicate that the op is deprecated.";
}
}
}
void TestDeprecationVersionSetCorrectly(
const std::unordered_map<string, ApiDef>& api_defs_map) {
for (const auto& name_and_api_def : api_defs_map) {
const auto& name = name_and_api_def.first;
const auto& api_def = name_and_api_def.second;
if (api_def.deprecation_version() != 0) {
ASSERT_TRUE(api_def.deprecation_version() > 0)
<< "Found ApiDef with negative deprecation_version";
ASSERT_FALSE(api_def.deprecation_message().empty())
<< "ApiDef that includes deprecation_version > 0 must also specify "
<< "a deprecation_message. Op " << name
<< " has deprecation_version > 0 but deprecation_message is not set.";
}
}
}
class BaseApiTest : public ::testing::Test {
protected:
BaseApiTest() {
OpRegistry::Global()->Export(false, &ops_);
const std::vector<string> multi_line_fields = {"description"};
Env* env = Env::Default();
GetGoldenApiDefs(env, DefaultApiDefDir(), &api_defs_map_);
}
OpList ops_;
std::unordered_map<string, ApiDef> api_defs_map_;
};
TEST_F(BaseApiTest, AllOpsAreInApiDef) {
auto* excluded_ops = GetExcludedOps();
for (const auto& op : ops_.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
EXPECT_TRUE(api_defs_map_.find(op.name()) != api_defs_map_.end())
<< op.name() << " op does not have api_def_*.pbtxt file. "
<< "Please add api_def_" << op.name() << ".pbtxt file "
<< "under tensorflow/core/api_def/base_api/ directory.";
}
}
TEST_F(BaseApiTest, AllApiDefsHaveCorrespondingOp) {
TestAllApiDefsHaveCorrespondingOp(ops_, api_defs_map_);
}
string GetOpDefHasDocStringError(const string& op_name) {
return strings::Printf(
"OpDef for %s has a doc string. "
"Doc strings must be defined in ApiDef instead of OpDef. "
"Please, add summary and descriptions in api_def_%s"
".pbtxt file instead",
op_name.c_str(), op_name.c_str());
}
TEST_F(BaseApiTest, OpDefsShouldNotHaveDocs) {
auto* excluded_ops = GetExcludedOps();
for (const auto& op : ops_.op()) {
if (excluded_ops->find(op.name()) != excluded_ops->end()) {
continue;
}
ASSERT_TRUE(op.summary().empty()) << GetOpDefHasDocStringError(op.name());
ASSERT_TRUE(op.description().empty())
<< GetOpDefHasDocStringError(op.name());
for (const auto& arg : op.input_arg()) {
ASSERT_TRUE(arg.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
for (const auto& arg : op.output_arg()) {
ASSERT_TRUE(arg.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
for (const auto& attr : op.attr()) {
ASSERT_TRUE(attr.description().empty())
<< GetOpDefHasDocStringError(op.name());
}
}
}
TEST_F(BaseApiTest, AllApiDefInputArgsAreValid) {
TestAllApiDefInputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, AllApiDefOutputArgsAreValid) {
TestAllApiDefOutputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, AllApiDefAttributeNamesAreValid) {
TestAllApiDefAttributeNamesAreValid(ops_, api_defs_map_);
}
TEST_F(BaseApiTest, DeprecationSetCorrectly) {
TestDeprecatedAttributesSetCorrectly(api_defs_map_);
}
TEST_F(BaseApiTest, DeprecationVersionSetCorrectly) {
TestDeprecationVersionSetCorrectly(api_defs_map_);
}
class PythonApiTest : public ::testing::Test {
protected:
PythonApiTest() {
OpRegistry::Global()->Export(false, &ops_);
const std::vector<string> multi_line_fields = {"description"};
Env* env = Env::Default();
GetGoldenApiDefs(env, PythonApiDefDir(), &api_defs_map_);
}
OpList ops_;
std::unordered_map<string, ApiDef> api_defs_map_;
};
TEST_F(PythonApiTest, AllApiDefsHaveCorrespondingOp) {
TestAllApiDefsHaveCorrespondingOp(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefInputArgsAreValid) {
TestAllApiDefInputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefOutputArgsAreValid) {
TestAllApiDefOutputArgsAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, AllApiDefAttributeNamesAreValid) {
TestAllApiDefAttributeNamesAreValid(ops_, api_defs_map_);
}
TEST_F(PythonApiTest, DeprecationSetCorrectly) {
TestDeprecatedAttributesSetCorrectly(api_defs_map_);
}
TEST_F(PythonApiTest, DeprecationVersionSetCorrectly) {
TestDeprecationVersionSetCorrectly(api_defs_map_);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/api.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/api_def/api_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6479cac6-c669-44ce-a8f3-f49af593b676 | cpp | tensorflow/tensorflow | rocm_version_parser | third_party/xla/xla/stream_executor/rocm/rocm_version_parser.cc | third_party/xla/xla/stream_executor/rocm/rocm_version_parser_test.cc | #include "xla/stream_executor/rocm/rocm_version_parser.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/stream_executor/semantic_version.h"
namespace stream_executor {
absl::StatusOr<SemanticVersion> ParseRocmVersion(int rocm_version) {
if (rocm_version < 0) {
return absl::InvalidArgumentError("Version numbers cannot be negative.");
}
int major = rocm_version / 10'000'000;
int minor = (rocm_version % 10'000'000) / 100'000;
int patch = rocm_version % 100'000;
return SemanticVersion(major, minor, patch);
}
} | #include "xla/stream_executor/rocm/rocm_version_parser.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "rocm/include/hip/hip_version.h"
#include "xla/stream_executor/semantic_version.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/test.h"
namespace stream_executor {
namespace {
using tsl::testing::IsOkAndHolds;
using tsl::testing::StatusIs;
TEST(ParseRocmVersionTest, Simple) {
EXPECT_THAT(stream_executor::ParseRocmVersion(60'100'002),
IsOkAndHolds(SemanticVersion(6, 1, 2)));
}
TEST(RocmVersionParserTest, NegativeIntegerIsNotAValidVersion) {
EXPECT_THAT(ParseRocmVersion(-42),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(RocmVersionParserTest, AlignsWithHIPVersion) {
EXPECT_THAT(ParseRocmVersion(HIP_VERSION),
IsOkAndHolds(SemanticVersion{HIP_VERSION_MAJOR, HIP_VERSION_MINOR,
HIP_VERSION_PATCH}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_version_parser.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/stream_executor/rocm/rocm_version_parser_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c22eae69-295e-43e4-812d-9bd98c6b6375 | cpp | tensorflow/tensorflow | and | tensorflow/lite/experimental/shlo/ops/and.cc | tensorflow/lite/experimental/shlo/ops/and_test.cc | #include "tensorflow/lite/experimental/shlo/ops/and.h"
#include <functional>
#include "absl/status/status.h"
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/dispatch.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise.h"
#include "tensorflow/lite/experimental/shlo/ops/util.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
namespace shlo_ref {
template <DataType>
struct And : std::bit_and<void> {};
template <>
struct And<DataType::kI1> : std::logical_and<void> {};
AndOp Create(AndOp::Attributes) { return {}; }
absl::Status Prepare(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
SHLO_REF_RETURN_ON_ERROR(Propagate(lhs.shape(), rhs.shape(), output.shape()));
SHLO_REF_RETURN_ON_ERROR(
CheckSupportedTypes(CheckCtx("and"), lhs, IsBoolTensor, IsIntTensor));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("and"), lhs, output));
SHLO_REF_RETURN_ON_ERROR(CheckSameBaselineType(CheckCtx("and"), rhs, output));
return absl::OkStatus();
}
absl::Status Evaluate(AndOp& op, const Tensor& lhs, const Tensor& rhs,
Tensor& output) {
if (IsIntTensor(lhs)) {
And<DataType::kSI32> and_func;
DISPATCH_INT(detail::EvaluateNoQuantization, lhs.tensor_element_type(),
and_func, lhs, rhs, output);
} else if (IsBoolTensor(lhs)) {
And<DataType::kI1> and_func;
detail::EvaluateNoQuantization<DataType::kI1>(and_func, lhs, rhs, output);
return absl::OkStatus();
}
return absl::FailedPreconditionError(
"stablehlo.and: Unsupported tensor type in Evaluate.");
}
} | #include "tensorflow/lite/experimental/shlo/ops/and.h"
#include <functional>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/shlo/data_type.h"
#include "tensorflow/lite/experimental/shlo/ops/binary_elementwise_test_util.h"
#include "tensorflow/lite/experimental/shlo/ops/test_util.h"
#include "tensorflow/lite/experimental/shlo/shape.h"
#include "tensorflow/lite/experimental/shlo/status_matcher.h"
#include "tensorflow/lite/experimental/shlo/tensor.h"
using testing::FloatEq;
using testing::Pointwise;
namespace shlo_ref {
template <>
struct ParamName<AndOp> {
static std::string Get() { return "And"; }
};
template <DataType>
struct And : std::bit_and<void> {};
template <>
struct And<DataType::kI1> : std::logical_and<void> {};
template <>
struct SupportedOpDataType<AndOp> {
static constexpr DataType kStorageType = DataType::kSI32;
};
namespace {
INSTANTIATE_TYPED_TEST_SUITE_P(And, BinaryElementwiseOpShapePropagationTest,
AndOp, TestParamNames);
using MultipyBaselineContraintTypes = BinaryElementwiseBaselineConstraintTypes<
AndOp, ConcatTypes<BoolTestType, BaselineConstraintIntTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(
And, BinaryElementwiseSameBaselineElementTypeConstraintTest,
MultipyBaselineContraintTypes, TestParamNames);
using UnsupportedTypes =
WithOpTypes<AndOp, ConcatTypes<FloatTestTypes, PerTensorQuantizedTestTypes,
PerAxisQuantizedTestTypes>>;
INSTANTIATE_TYPED_TEST_SUITE_P(And, BinaryElementwiseUnsupportedTypeTest,
UnsupportedTypes, TestParamNames);
using SupportedTypes = ConcatTypes<BoolTestType, IntTestTypes>;
template <class T>
struct AndTest : ::testing::Test {};
TYPED_TEST_SUITE(AndTest, SupportedTypes, TestParamNames);
TYPED_TEST(AndTest, ArithmeticTestTypesTensorsWork) {
using StorageT = typename TypeParam::StorageT;
const Shape shape({2, 3, 4});
Vector<StorageT> lhs_data =
RandomBuffer<TypeParam::kStorage>(shape, -50, 50);
Vector<StorageT> rhs_data =
RandomBuffer<TypeParam::kStorage>(shape, 1, 5);
Vector<StorageT> output_data(shape.NumElements());
Tensor lhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = lhs_data.data()};
Tensor rhs_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = rhs_data.data()};
Tensor output_tensor{
.type = TensorType{.shape = shape, .element_type = TypeParam::kStorage},
.data = output_data.data()};
Vector<StorageT> expected_data(shape.NumElements());
absl::c_transform(lhs_data, rhs_data, expected_data.begin(),
And<TypeParam::kStorage>());
auto op = Create(AndOp::Attributes{});
ASSERT_OK(Prepare(op, lhs_tensor, rhs_tensor, output_tensor));
ASSERT_OK(Evaluate(op, lhs_tensor, rhs_tensor, output_tensor));
EXPECT_THAT(output_data, Pointwise(FloatEq(), expected_data));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/and.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/ops/and_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fa06c82b-257e-4064-877d-65c16a46a30e | cpp | tensorflow/tensorflow | deep_conv2d | tensorflow/core/kernels/deep_conv2d.cc | tensorflow/core/kernels/deep_conv2d_test.cc | #define USE_EIGEN_TENSOR
#define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/deep_conv2d.h"
#include <stdlib.h>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/kernels/winograd_transform.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
static int64_t GetDeepConvCost(int input_tile_rows, int input_tile_cols,
int out_tile_rows, int out_tile_cols,
int in_depth, int out_depth, int out_rows,
int out_cols) {
const int64_t input_tile_spatial_size = input_tile_rows * input_tile_cols;
const int64_t input_transform_cost =
input_tile_spatial_size * input_tile_spatial_size * in_depth;
const int64_t product_cost = input_tile_spatial_size * in_depth * out_depth;
const int64_t output_tile_spatial_size = out_tile_rows * out_tile_cols;
const int64_t output_transform_cost =
output_tile_spatial_size * input_tile_spatial_size * out_depth;
const int64_t row_tiles = (out_rows + out_tile_rows - 1) / out_tile_rows;
const int64_t col_tiles = (out_cols + out_tile_cols - 1) / out_tile_cols;
const int64_t num_tiles = row_tiles * col_tiles;
return num_tiles *
(input_transform_cost + product_cost + output_transform_cost);
}
static int64_t GetDirectConvCost(int filter_rows, int filter_cols, int in_depth,
int out_depth, int out_rows, int out_cols) {
return filter_rows * filter_cols * in_depth * out_depth * out_rows * out_cols;
}
static bool ReadBoolFromEnvVar(const char* env_var_name, bool default_val) {
const char* tf_env_var_val = getenv(env_var_name);
if (tf_env_var_val != nullptr) {
StringPiece tf_env_var_val_str(tf_env_var_val);
if (tf_env_var_val_str == "0") {
return false;
}
return true;
}
return default_val;
}
bool CanUseDeepConv2D(int stride_rows, int stride_cols, int filter_rows,
int filter_cols, int in_depth, int out_depth,
int out_rows, int out_cols) {
if (stride_rows > 1 || stride_cols > 1 || filter_rows != 3 ||
filter_cols != 3) {
return false;
}
if (!ReadBoolFromEnvVar("TF_USE_DEEP_CONV2D", false)) {
return false;
}
WinogradTransform<float> t;
const int64_t deep_conv_cost = GetDeepConvCost(
t.input_shape().rows, t.input_shape().cols, t.output_shape().rows,
t.output_shape().cols, in_depth, out_depth, out_rows, out_cols);
const int64_t direct_conv_cost = GetDirectConvCost(
filter_rows, filter_cols, in_depth, out_depth, out_rows, out_cols);
VLOG(2) << "CanUseDeepConv2D"
<< " deep_conv_cost: " << deep_conv_cost
<< " direct_conv_cost: " << direct_conv_cost << " deep_direct_ratio: "
<< (static_cast<float>(deep_conv_cost) /
static_cast<float>(direct_conv_cost))
<< " use_deep_conv: " << (deep_conv_cost < direct_conv_cost);
return deep_conv_cost < direct_conv_cost;
}
typedef Eigen::ThreadPoolDevice CPUDevice;
template <typename T>
struct CopyFilterDepth {
void operator()(const Conv2DArgs& args, const T* filter_in, T* filter_buf) {
typedef typename Eigen::internal::packet_traits<T>::type Packet;
static constexpr int64_t kPacketSize = (sizeof(Packet) / sizeof(T));
const int64_t vectorized_size = args.in_depth / kPacketSize;
const int64_t scalar_size = args.in_depth % kPacketSize;
const int64_t input_stride = args.out_depth * kPacketSize;
for (int64_t d = 0; d < vectorized_size; ++d) {
auto v = Eigen::internal::pgather<T, Packet>(filter_in + d * input_stride,
args.out_depth);
Eigen::internal::pstoreu<T>(filter_buf + d * kPacketSize, v);
}
const int64_t in_scalar_base = vectorized_size * input_stride;
const int64_t buf_scalar_base = vectorized_size * kPacketSize;
for (int64_t d = 0; d < scalar_size; ++d) {
filter_buf[buf_scalar_base + d] =
filter_in[in_scalar_base + d * args.out_depth];
}
}
};
template <typename T>
struct ComputeFilterRangeTransform {
typedef typename Eigen::internal::packet_traits<T>::type Packet;
static constexpr int64_t kPacketSize = (sizeof(Packet) / sizeof(T));
typedef Eigen::Map<
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
MatrixMap;
typedef Eigen::Map<
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
ConstMatrixMap;
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t od_start, const int64_t num_filters,
const int64_t shard_rows, const int64_t shard_cols,
const T* filter_in, const int64_t in_stride,
const int64_t out_stride, const T* transform_matrix,
T* out_buffer, T* filter_out) {
namespace ei = Eigen::internal;
const int64_t in_depth = args.in_depth;
const int64_t base_filter_rows = transform->filter_shape().rows;
const int64_t base_filter_cols = transform->filter_shape().cols;
const int64_t base_filter_spatial_size =
base_filter_rows * base_filter_cols;
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t tile_spatial_size = tile_rows * tile_cols;
ConstMatrixMap A(transform_matrix, tile_spatial_size,
base_filter_spatial_size);
ConstMatrixMap B(filter_in, base_filter_spatial_size, in_stride);
MatrixMap C(out_buffer, tile_spatial_size, in_stride);
C.noalias() = A * B;
const int64_t scalar_size = in_depth % kPacketSize;
const int64_t vectorized_size = in_depth / kPacketSize;
const int64_t shard_stride = args.in_depth;
const int64_t out_depth_stride = shard_rows * shard_cols * shard_stride;
for (int64_t od = 0; od < num_filters; ++od) {
const int64_t out_depth_buf_base = od * out_depth_stride;
const int64_t out_depth_base = (od_start + od) * out_depth_stride;
for (int64_t s_r = 0; s_r < shard_rows; ++s_r) {
for (int64_t s_c = 0; s_c < shard_cols; ++s_c) {
const int64_t shard_base = shard_stride * (s_r * shard_cols + s_c);
for (int64_t i = 0; i < tile_spatial_size; ++i) {
const int64_t in_base =
i * in_stride + out_depth_buf_base + shard_base;
const int64_t out_base =
i * out_stride + out_depth_base + shard_base;
for (int64_t d = 0; d < vectorized_size; ++d) {
auto v =
ei::ploadu<Packet>(out_buffer + in_base + d * kPacketSize);
ei::pstoreu<T>(filter_out + out_base + d * kPacketSize, v);
}
const int64_t scalar_base = vectorized_size * kPacketSize;
for (int64_t d = 0; d < scalar_size; ++d) {
filter_out[out_base + scalar_base + d] =
out_buffer[in_base + scalar_base + d];
}
}
}
}
}
}
};
template <typename T>
struct TransformFilterRange {
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t od_start, const int64_t od_limit,
const T* filter_in, const T* transform_matrix, T* out_buffer,
T* filter_buf, T* filter_out) {
const int64_t num_filters = od_limit - od_start;
const int64_t base_filter_rows = transform->filter_shape().rows;
const int64_t base_filter_cols = transform->filter_shape().cols;
const int64_t base_filter_spatial_size =
base_filter_rows * base_filter_cols;
const int64_t residual_row =
std::max(int64_t{0}, args.filter_rows - base_filter_rows);
const int64_t shard_rows = 1 + (residual_row + 2 - 1) / 2;
const int64_t residual_col =
std::max(int64_t{0}, args.filter_cols - base_filter_cols);
const int64_t shard_cols = 1 + (residual_col + 2 - 1) / 2;
const int64_t shard_stride = args.in_depth;
const int64_t out_depth_stride = shard_rows * shard_cols * shard_stride;
const int64_t coord_stride = out_depth_stride * args.out_depth;
const int64_t filter_buf_stride =
num_filters * shard_rows * shard_cols * args.in_depth;
const int64_t tile_stride_rows = transform->output_shape().rows;
const int64_t tile_stride_cols = transform->output_shape().cols;
const int64_t filter_buf_size = base_filter_spatial_size * num_filters *
shard_rows * shard_cols * args.in_depth;
memset(filter_buf, 0, sizeof(T) * filter_buf_size);
for (int64_t od = 0; od < num_filters; ++od) {
const int64_t out_depth_base = od * out_depth_stride;
for (int64_t s_r = 0; s_r < shard_rows; ++s_r) {
const int64_t row_offset = s_r == 0 ? 0 : 1;
for (int64_t s_c = 0; s_c < shard_cols; ++s_c) {
const int64_t col_offset = s_c == 0 ? 0 : 1;
const int64_t f_r_start = s_r * tile_stride_rows;
const int64_t f_c_start = s_c * tile_stride_cols;
const int64_t shard_base = shard_stride * (s_r * shard_cols + s_c);
for (int64_t b_r = row_offset; b_r < base_filter_rows; ++b_r) {
const int64_t f_r = f_r_start + b_r;
if (f_r >= args.filter_rows) continue;
for (int64_t b_c = col_offset; b_c < base_filter_cols; ++b_c) {
const int64_t f_c = f_c_start + b_c;
if (f_c >= args.filter_cols) continue;
const int64_t in_index =
args.out_depth *
(args.in_depth * (f_r * args.filter_cols + f_c)) +
(od_start + od);
const int64_t buf_index =
filter_buf_stride * (b_r * base_filter_cols + b_c) +
out_depth_base + shard_base;
CopyFilterDepth<T>()(args, filter_in + in_index,
filter_buf + buf_index);
}
}
}
}
}
ComputeFilterRangeTransform<T>()(args, transform, od_start, num_filters,
shard_rows, shard_cols, filter_buf,
filter_buf_stride, coord_stride,
transform_matrix, out_buffer, filter_out);
}
};
template <typename T>
struct TransformFilters {
void operator()(OpKernelContext* ctx, const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t filter_shards_row,
const int64_t filter_shards_col, const T* filter_in,
T* filter_out) {
const int64_t in_depth = args.in_depth;
const int64_t out_depth = args.out_depth;
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t tile_spatial_size = tile_rows * tile_cols;
const int64_t base_filter_rows = transform->filter_shape().rows;
const int64_t base_filter_cols = transform->filter_shape().cols;
const int64_t base_filter_spatial_size =
base_filter_rows * base_filter_cols;
const int64_t filter_shards_total = filter_shards_row * filter_shards_col;
const int64_t cache_size = (256LL << 10) / sizeof(T);
const int64_t filter_transform_matrix_size =
tile_spatial_size * base_filter_spatial_size;
const int64_t filter_total_size =
base_filter_spatial_size * in_depth * filter_shards_total;
const int64_t filter_transform_buffer_size =
base_filter_spatial_size * filter_shards_total * in_depth;
const int64_t filter_out_buf_size =
tile_spatial_size * filter_shards_total * in_depth;
const int64_t per_filter_cost =
filter_total_size + filter_transform_buffer_size + filter_out_buf_size;
const int64_t num_filters_cache =
std::max(int64_t{1},
(cache_size - filter_transform_matrix_size) / per_filter_cost);
const int64_t num_filters_transform =
std::min(out_depth, num_filters_cache);
Tensor filter_transform_matrix;
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({tile_spatial_size, base_filter_spatial_size}),
&filter_transform_matrix));
T* transform_matrix = filter_transform_matrix.template flat<T>().data();
transform->GetFilterTransformMatrix(
tile_spatial_size, base_filter_spatial_size, transform_matrix);
auto shard = [&ctx, &args, &transform, &base_filter_rows, &base_filter_cols,
&num_filters_transform, &in_depth, &filter_shards_row,
&filter_shards_col, &tile_spatial_size, &filter_in,
&transform_matrix,
&filter_out](int64_t start, int64_t limit) {
Tensor filter_transform_buffer;
OP_REQUIRES_OK(ctx,
ctx->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({base_filter_rows, base_filter_cols,
num_filters_transform, filter_shards_row,
filter_shards_col, in_depth}),
&filter_transform_buffer));
T* filter_buf = filter_transform_buffer.template flat<T>().data();
Tensor filter_output_buffer;
OP_REQUIRES_OK(
ctx,
ctx->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({tile_spatial_size, num_filters_transform,
filter_shards_row, filter_shards_col, in_depth}),
&filter_output_buffer));
T* out_buffer = filter_output_buffer.template flat<T>().data();
const int64_t num_filters = limit - start;
const int64_t od_unroll = num_filters_transform;
const int64_t od_unroll_limit = (num_filters / od_unroll) * od_unroll;
for (int64_t od = start; od < od_unroll_limit; od += od_unroll) {
TransformFilterRange<T>()(args, transform, od, od + od_unroll,
filter_in, transform_matrix, out_buffer,
filter_buf, filter_out);
}
if (od_unroll_limit < limit) {
TransformFilterRange<T>()(args, transform, od_unroll_limit, limit,
filter_in, transform_matrix, out_buffer,
filter_buf, filter_out);
}
};
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
const int64_t shard_cost = args.filter_rows * args.filter_cols * in_depth *
filter_shards_total * tile_spatial_size;
Shard(1, worker_threads.workers, out_depth, shard_cost, shard);
}
};
template <typename T>
class GemmFilterPacker {
public:
typedef Eigen::internal::const_blas_data_mapper<T, int64_t, Eigen::RowMajor>
LhsMapper;
typedef Eigen::internal::gebp_traits<T, T> Traits;
Eigen::internal::gemm_pack_lhs<
T, int64_t, LhsMapper, Traits::mr, Traits::LhsProgress,
typename Traits::LhsPacket4Packing, Eigen::RowMajor>
pack_lhs;
GemmFilterPacker(const int64_t rows, const int64_t depth, const T* lhs_input,
T* lhs_block)
: rows_(rows),
depth_(depth),
lhs_block_(lhs_block),
lhs_mapper_(lhs_input, depth_) {}
void Run() { pack_lhs(lhs_block_, lhs_mapper_, depth_, rows_); }
private:
const int64_t rows_;
const int64_t depth_;
T* lhs_block_;
LhsMapper lhs_mapper_;
};
template <typename T>
struct PackFilters {
void operator()(OpKernelContext* ctx, const Conv2DArgs& args,
const int64_t tile_spatial_size,
const int64_t filter_shards_row,
const int64_t filter_shards_col,
const T* filter_transform_data,
std::vector<Tensor>* packed_filters) {
const int64_t in_depth = args.in_depth;
const int64_t out_depth = args.out_depth;
const int64_t num_filters =
filter_shards_row * filter_shards_col * out_depth;
auto shard = [&ctx, &packed_filters, &filter_transform_data, &in_depth,
&out_depth, &filter_shards_row, &filter_shards_col,
&num_filters](int64_t start, int64_t limit) {
const int64_t filter_coord_stride = num_filters * in_depth;
for (int64_t i = start; i < limit; ++i) {
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({out_depth, filter_shards_row,
filter_shards_col, in_depth}),
&(*packed_filters)[i]));
T* packed_filter = (*packed_filters)[i].template flat<T>().data();
GemmFilterPacker<T> packer(
num_filters, in_depth,
filter_transform_data + i * filter_coord_stride, packed_filter);
packer.Run();
}
};
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
Shard(worker_threads.num_threads, worker_threads.workers, tile_spatial_size,
num_filters * in_depth, shard);
}
};
template <typename T>
class GemmState {
public:
typedef Eigen::internal::const_blas_data_mapper<T, int64_t, Eigen::ColMajor>
RhsMapper;
typedef Eigen::internal::blas_data_mapper<T, int64_t, Eigen::ColMajor>
OutputMapper;
typedef Eigen::internal::gebp_traits<T, T> Traits;
Eigen::internal::gemm_pack_rhs<T, int64_t, RhsMapper, Traits::nr,
Eigen::ColMajor>
pack_rhs;
Eigen::internal::gebp_kernel<T, T, int64_t, OutputMapper, Traits::mr,
Traits::nr, false, false>
gebp;
GemmState(const int64_t rows, const int64_t cols, const int64_t depth,
const int64_t out_buffer_size, const T* lhs_block,
const T* rhs_input, T* rhs_block, T* out_buffer)
: rows_(rows),
cols_(cols),
depth_(depth),
out_buffer_size_(out_buffer_size),
lhs_block_(lhs_block),
rhs_block_(rhs_block),
out_buffer_(out_buffer),
rhs_mapper_(rhs_input, depth_),
out_mapper_(out_buffer, rows_) {}
void PackRhs() { pack_rhs(rhs_block_, rhs_mapper_, depth_, cols_); }
void Compute() {
memset(out_buffer_, 0, sizeof(T) * out_buffer_size_);
gebp(out_mapper_, lhs_block_, rhs_block_, rows_, depth_, cols_, 1.0);
}
private:
const int64_t rows_;
const int64_t cols_;
const int64_t depth_;
const int64_t out_buffer_size_;
const T* lhs_block_;
T* rhs_block_;
T* out_buffer_;
RhsMapper rhs_mapper_;
OutputMapper out_mapper_;
};
template <typename T>
struct CopyInputTile {
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t num_tiles, const int64_t in_r_start,
const int64_t in_c_start, const T* input, T* tile_buffer) {
typedef typename Eigen::internal::packet_traits<T>::type Packet;
static const int64_t kPacketSize = (sizeof(Packet) / sizeof(T));
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t coord_stride = num_tiles * args.in_depth;
const int64_t input_vectorized_size =
(args.in_depth / kPacketSize) * kPacketSize;
const int64_t input_scalar_size = args.in_depth % kPacketSize;
for (int64_t r = 0; r < tile_rows; ++r) {
const int64_t in_r = in_r_start + r;
if (in_r < 0 || in_r >= args.in_rows) continue;
for (int64_t c = 0; c < tile_cols; ++c) {
const int64_t in_c = in_c_start + c;
if (in_c < 0 || in_c >= args.in_cols) continue;
auto* in = input + (in_r * args.in_cols + in_c) * args.in_depth;
auto* tile = tile_buffer + coord_stride * (r * tile_rows + c);
for (int64_t d = 0; d < input_vectorized_size; d += kPacketSize) {
auto v = Eigen::internal::ploadu<Packet>(in + d);
Eigen::internal::pstoreu<T>(tile, v);
tile += kPacketSize;
}
for (int64_t d = 0; d < input_scalar_size; ++d) {
tile[d] = in[input_vectorized_size + d];
}
}
}
}
};
template <typename T>
struct TransformInputTiles {
typedef Eigen::Map<
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
MatrixMap;
typedef Eigen::Map<
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
ConstMatrixMap;
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t num_tiles, const int64_t in_r_start,
const int64_t in_c_start, const T* input,
const T* transform_matrix, T* tile_buffer,
T* tile_transform) {
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t tile_spatial_size = tile_rows * tile_cols;
const int64_t tile_stride_cols = transform->output_shape().cols;
const int64_t coord_stride = num_tiles * args.in_depth;
const int64_t num_tiles_stride = args.in_depth;
memset(tile_buffer, 0, sizeof(T) * tile_spatial_size * coord_stride);
const int64_t in_r = in_r_start;
for (int64_t t = 0; t < num_tiles; ++t) {
const int64_t num_tiles_base = t * num_tiles_stride;
const int64_t in_c = in_c_start + t * tile_stride_cols;
CopyInputTile<T>()(args, transform, num_tiles, in_r, in_c, input,
tile_buffer + num_tiles_base);
}
ConstMatrixMap A(transform_matrix, tile_spatial_size, tile_spatial_size);
ConstMatrixMap B(tile_buffer, tile_spatial_size, coord_stride);
MatrixMap C(tile_transform, tile_spatial_size, coord_stride);
C.noalias() = A * B;
}
};
template <typename T>
struct TransformOutputTile {
typedef Eigen::Map<
Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
MatrixMap;
typedef Eigen::Map<
const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>>
ConstMatrixMap;
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const int64_t num_tiles, const int64_t in_r,
const int64_t in_c, const int64_t filter_shards_row,
const int64_t filter_shards_col,
const T* out_transform_matrix, const T* out_buffer,
T* out_transform_buffer, T* output) {
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t tile_spatial_size = tile_rows * tile_cols;
const int64_t out_buf_stride =
num_tiles * args.out_depth * filter_shards_row * filter_shards_col;
const int64_t out_tile_rows = transform->output_shape().rows;
const int64_t out_tile_cols = transform->output_shape().cols;
const int64_t out_tile_spatial_size = out_tile_rows * out_tile_cols;
ConstMatrixMap A(out_transform_matrix, out_tile_spatial_size,
tile_spatial_size);
ConstMatrixMap B(out_buffer, tile_spatial_size, out_buf_stride);
MatrixMap C(out_transform_buffer, out_tile_spatial_size, out_buf_stride);
C.noalias() = A * B;
const int64_t tile_stride_rows = transform->output_shape().rows;
const int64_t tile_stride_cols = transform->output_shape().cols;
const int64_t out_depth_stride = filter_shards_row * filter_shards_col;
const int64_t num_tiles_stride = args.out_depth * out_depth_stride;
for (int64_t t = 0; t < num_tiles; ++t) {
const int64_t tile_base = t * num_tiles_stride;
for (int64_t od = 0; od < args.out_depth; ++od) {
const int64_t out_depth_base = od * out_depth_stride;
for (int64_t sr = 0; sr < filter_shards_row; ++sr) {
for (int64_t sc = 0; sc < filter_shards_col; ++sc) {
const int64_t shard_base = sr * filter_shards_col + sc;
const int64_t out_buf_base =
tile_base + out_depth_base + shard_base;
const int64_t out_r_start =
in_r + args.pad_rows - sr * tile_stride_rows;
const int64_t out_c_start = (in_c + t * tile_stride_cols) +
args.pad_cols - sc * tile_stride_cols;
if (out_r_start < 0 || out_r_start >= args.out_rows ||
out_c_start < 0 || out_c_start >= args.out_cols) {
continue;
}
const bool inc_output = (sr == 0 && sc == 0) ? false : true;
for (int64_t ot_row = 0; ot_row < out_tile_rows; ++ot_row) {
const int64_t out_r = out_r_start + ot_row;
if (out_r >= args.out_rows) continue;
for (int64_t ot_col = 0; ot_col < out_tile_cols; ++ot_col) {
const int64_t out_c = out_c_start + ot_col;
if (out_c >= args.out_cols) continue;
const int64_t out_buf_index = ot_row * out_tile_cols + ot_col;
const T out_val =
out_transform_buffer[out_buf_base +
out_buf_index * out_buf_stride];
const int64_t output_index =
args.out_depth * (out_r * args.out_cols + out_c) + od;
if (inc_output) {
output[output_index] += out_val;
} else {
output[output_index] = out_val;
}
}
}
}
}
}
}
}
};
template <typename T>
struct Conv2DState {
Conv2DState(const int64_t tile_spatial_size, const int64_t filter_shards_row,
const int64_t filter_shards_col, const T* input,
const T* tile_transform_matrix, const T* output_transform_matrix,
T* buffer1, T* buffer2, T* packed_tile_buffer,
T* gemm_output_buffer)
: tile_spatial_size(tile_spatial_size),
filter_shards_row(filter_shards_row),
filter_shards_col(filter_shards_col),
input(input),
tile_transform_matrix(tile_transform_matrix),
output_transform_matrix(output_transform_matrix),
buffer1(buffer1),
buffer2(buffer2),
packed_tile_buffer(packed_tile_buffer),
gemm_output_buffer(gemm_output_buffer) {}
const int64_t tile_spatial_size;
const int64_t filter_shards_row;
const int64_t filter_shards_col;
const T* input;
const T* tile_transform_matrix;
const T* output_transform_matrix;
T* buffer1;
T* buffer2;
T* packed_tile_buffer;
T* gemm_output_buffer;
};
template <typename T>
struct ComputeConv2D {
void operator()(const Conv2DArgs& args,
const DeepConv2DTransform<T>* transform,
const Conv2DState<T>& cs, const int64_t in_r,
const int64_t in_c, const int64_t num_tiles,
const std::vector<Tensor>& packed_filters, const T* input,
T* output) {
TransformInputTiles<T>()(args, transform, num_tiles, in_r, in_c, input,
cs.tile_transform_matrix, cs.buffer1, cs.buffer2);
const int64_t in_depth = args.in_depth;
const int64_t out_depth = args.out_depth;
const int64_t num_filters =
cs.filter_shards_row * cs.filter_shards_col * out_depth;
const int64_t tile_coord_stride = num_tiles * in_depth;
const int64_t gemm_out_buf_size = num_tiles * num_filters;
const int64_t gemm_out_buf_bytes = gemm_out_buf_size * sizeof(T);
for (int64_t i = 0; i < cs.tile_spatial_size; ++i) {
GemmState<T> gemm(num_filters, num_tiles, in_depth, gemm_out_buf_size,
packed_filters[i].template flat<T>().data(),
cs.buffer2 + i * tile_coord_stride,
cs.packed_tile_buffer, cs.gemm_output_buffer);
gemm.PackRhs();
gemm.Compute();
memcpy(cs.buffer1 + i * gemm_out_buf_size, cs.gemm_output_buffer,
gemm_out_buf_bytes);
}
TransformOutputTile<T>()(args, transform, num_tiles, in_r, in_c,
cs.filter_shards_row, cs.filter_shards_col,
cs.output_transform_matrix, cs.buffer1, cs.buffer2,
output);
}
};
namespace functor {
template <typename T>
struct DeepConv2D<CPUDevice, T> {
void operator()(OpKernelContext* ctx, const Conv2DArgs& args, const T* input,
const T* filter, T* output) {
std::unique_ptr<DeepConv2DTransform<T>> transform(new WinogradTransform<T>);
const int64_t in_depth = args.in_depth;
const int64_t out_depth = args.out_depth;
const int64_t tile_rows = transform->input_shape().rows;
const int64_t tile_cols = transform->input_shape().cols;
const int64_t tile_spatial_size = tile_rows * tile_cols;
const int64_t out_tile_rows = transform->output_shape().rows;
const int64_t out_tile_cols = transform->output_shape().cols;
const int64_t out_tile_spatial_size = out_tile_rows * out_tile_cols;
const int64_t base_filter_rows = transform->filter_shape().rows;
const int64_t filter_residual_row =
std::max(int64_t{0}, args.filter_rows - base_filter_rows);
const int64_t filter_shards_row = 1 + (filter_residual_row + 2 - 1) / 2;
const int64_t filter_residual_col =
std::max(int64_t{0}, args.filter_cols - base_filter_rows);
const int64_t filter_shards_col = 1 + (filter_residual_col + 2 - 1) / 2;
Tensor filter_transform;
OP_REQUIRES_OK(
ctx, ctx->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({tile_rows, tile_cols, out_depth,
filter_shards_row, filter_shards_col, in_depth}),
&filter_transform));
T* filter_transform_data = filter_transform.template flat<T>().data();
TransformFilters<T>()(ctx, args, transform.get(), filter_shards_row,
filter_shards_col, filter, filter_transform_data);
std::vector<Tensor> packed_filters(tile_spatial_size);
PackFilters<T>()(ctx, args, tile_spatial_size, filter_shards_row,
filter_shards_col, filter_transform_data, &packed_filters);
Tensor tile_transform_matrix_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(
DataTypeToEnum<T>::value,
TensorShape({tile_spatial_size, tile_spatial_size}),
&tile_transform_matrix_tensor));
T* tile_transform_matrix =
tile_transform_matrix_tensor.template flat<T>().data();
transform->GetInputTransformMatrix(tile_spatial_size, tile_spatial_size,
tile_transform_matrix);
Tensor output_transform_matrix_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({out_tile_spatial_size,
tile_spatial_size}),
&output_transform_matrix_tensor));
T* output_transform_matrix =
output_transform_matrix_tensor.template flat<T>().data();
transform->GetOutputTransformMatrix(
out_tile_spatial_size, tile_spatial_size, output_transform_matrix);
auto shard = [&ctx, &args, &transform, &packed_filters, &in_depth,
out_depth, out_tile_rows, out_tile_cols, filter_shards_row,
filter_shards_col, tile_spatial_size, &input,
&tile_transform_matrix, &output_transform_matrix,
&output](int64_t batch_start, int64_t batch_limit) {
const int64_t row_tiles =
(args.out_rows + out_tile_rows - 1) / out_tile_rows +
filter_shards_row - 1;
const int64_t col_tiles =
(args.out_cols + out_tile_cols - 1) / out_tile_cols +
filter_shards_col - 1;
const int64_t filter_shard_size = filter_shards_row * filter_shards_col;
const int64_t out_tile_spatial_size = out_tile_rows * out_tile_cols;
const int64_t cache_size = (256LL << 10) / sizeof(T);
const int64_t tile_transform_matrix_size =
tile_spatial_size * tile_spatial_size;
const int64_t output_transform_matrix_size =
out_tile_spatial_size * tile_spatial_size;
const int64_t filter_depth_size =
in_depth * out_depth * filter_shard_size;
const bool small_filter = ((filter_depth_size * 100) / cache_size) <= 25;
const int64_t cache_reserve_size =
small_filter ? filter_depth_size : 1024;
const int64_t total_fixed_cost = tile_transform_matrix_size +
output_transform_matrix_size +
cache_reserve_size;
const int64_t buffer1_per_tile_size =
tile_spatial_size * std::max(in_depth, out_depth * filter_shard_size);
const int64_t buffer2_per_tile_size =
std::max(tile_spatial_size * in_depth,
out_tile_spatial_size * out_depth * filter_shard_size);
const int64_t packed_tile_per_tile_size = in_depth;
const int64_t gemm_out_per_tile_size = out_depth * filter_shard_size;
const int64_t total_per_tile_cost =
buffer1_per_tile_size + buffer2_per_tile_size +
packed_tile_per_tile_size + gemm_out_per_tile_size;
const int64_t num_tiles_cache = std::max(
int64{4}, (cache_size - total_fixed_cost) / total_per_tile_cost);
const int64_t num_tiles = std::min(num_tiles_cache, col_tiles);
const int64_t buffer1_tile_size =
tile_spatial_size * num_tiles * in_depth;
const int64_t buffer1_out_size =
tile_spatial_size * num_tiles * out_depth * filter_shard_size;
const int64_t buffer1_size =
std::max(buffer1_tile_size, buffer1_out_size);
Tensor buffer1_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({buffer1_size}),
&buffer1_tensor));
T* buffer1 = buffer1_tensor.template flat<T>().data();
const int64_t buffer2_tile_transform_size =
tile_spatial_size * num_tiles * in_depth;
const int64_t buffer2_out_transform_size =
out_tile_spatial_size * num_tiles * out_depth * filter_shard_size;
const int64_t buffer2_size =
std::max(buffer2_tile_transform_size, buffer2_out_transform_size);
Tensor buffer2_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({buffer2_size}),
&buffer2_tensor));
T* buffer2 = buffer2_tensor.template flat<T>().data();
Tensor packed_tile_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({num_tiles, in_depth}),
&packed_tile_tensor));
T* packed_tile_buffer = packed_tile_tensor.template flat<T>().data();
Tensor gemm_output_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_temp(DataTypeToEnum<T>::value,
TensorShape({num_tiles, out_depth,
filter_shards_row,
filter_shards_col}),
&gemm_output_tensor));
T* gemm_output_buffer = gemm_output_tensor.template flat<T>().data();
Conv2DState<T> conv_state(tile_spatial_size, filter_shards_row,
filter_shards_col, input, tile_transform_matrix,
output_transform_matrix, buffer1, buffer2,
packed_tile_buffer, gemm_output_buffer);
const int64_t row_pad = args.pad_rows;
const int64_t col_pad = args.pad_cols;
const int64_t unroll_col_limit = (col_tiles / num_tiles) * num_tiles;
const int64_t input_image_size = args.in_rows * args.in_cols * in_depth;
const int64_t output_image_size =
args.out_rows * args.out_cols * out_depth;
const int64_t tile_stride_rows = transform->output_shape().rows;
const int64_t tile_stride_cols = transform->output_shape().cols;
for (int64_t b = batch_start; b < batch_limit; ++b) {
const int64_t in_base = b * input_image_size;
const int64_t out_base = b * output_image_size;
for (int64_t tile_r = 0; tile_r < row_tiles; ++tile_r) {
const int64_t in_r = tile_r * tile_stride_rows - row_pad;
for (int64_t tile_c = 0; tile_c < unroll_col_limit;
tile_c += num_tiles) {
const int64_t in_c = tile_c * tile_stride_cols - col_pad;
ComputeConv2D<T>()(args, transform.get(), conv_state, in_r, in_c,
num_tiles, packed_filters, input + in_base,
output + out_base);
}
if (unroll_col_limit < col_tiles) {
const int64_t rem_tiles = col_tiles - unroll_col_limit;
const int64_t in_c = unroll_col_limit * tile_stride_cols - col_pad;
ComputeConv2D<T>()(args, transform.get(), conv_state, in_r, in_c,
rem_tiles, packed_filters, input + in_base,
output + out_base);
}
}
}
};
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
const int64_t shard_cost = args.out_rows * args.out_cols * args.out_depth *
tile_spatial_size * args.in_depth;
Shard(worker_threads.num_threads, worker_threads.workers, args.batch,
shard_cost, shard);
}
};
}
template struct functor::DeepConv2D<CPUDevice, float>;
} | #include "tensorflow/core/kernels/winograd_transform.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
static void ComputeKroneckerProduct(const int rows, const int cols,
const float* matrix, float* matrix_out) {
for (int i = 0; i < rows; ++i) {
for (int j = 0; j < cols; ++j) {
const float v = matrix[i * cols + j];
const int output_index_base = cols * (i * rows * cols + j);
for (int k = 0; k < rows; ++k) {
for (int l = 0; l < cols; ++l) {
const int input_index = k * cols + l;
const int output_index = k * cols * cols + l;
matrix_out[output_index_base + output_index] =
matrix[input_index] * v;
}
}
}
}
}
TEST(DeepConv2DTransformTest, Basic) {
const int rows = 2;
const int cols = 2;
float transform_matrix[] = {1, 2, 3, 4};
const int kron_rows = rows * rows;
const int kron_cols = cols * cols;
float transform_matrix_kron[kron_rows * kron_cols];
ComputeKroneckerProduct(rows, cols, &transform_matrix[0],
&transform_matrix_kron[0]);
float transform_matrix_test[] = {1, 2, 2, 4, 3, 4, 6, 8,
3, 6, 4, 8, 9, 12, 12, 16};
for (int i = 0; i < kron_rows * kron_cols; ++i) {
EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]);
}
}
TEST(DeepConv2DTransformTest, WingradFilterTransformMatrix) {
const int rows = 4;
const int cols = 3;
float transform_matrix[] = {1, 0, 0, 0.5, 0.5, 0.5, 0.5, -0.5, 0.5, 0, 0, 1};
const int kron_rows = rows * rows;
const int kron_cols = cols * cols;
float transform_matrix_kron[kron_rows * kron_cols];
ComputeKroneckerProduct(rows, cols, &transform_matrix[0],
&transform_matrix_kron[0]);
float transform_matrix_test[kron_rows * kron_cols];
WinogradTransform<float> t;
t.GetFilterTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]);
for (int i = 0; i < kron_rows * kron_cols; ++i) {
EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]);
}
}
TEST(DeepConv2DTransformTest, WingradInputTransformMatrix) {
const int rows = 4;
const int cols = 4;
float transform_matrix[] = {1, 0, -1, 0, 0, 1, 1, 0,
0, -1, 1, 0, 0, 1, 0, -1};
const int kron_rows = rows * rows;
const int kron_cols = cols * cols;
float transform_matrix_kron[kron_rows * kron_cols];
ComputeKroneckerProduct(rows, cols, &transform_matrix[0],
&transform_matrix_kron[0]);
float transform_matrix_test[kron_rows * kron_cols];
WinogradTransform<float> t;
t.GetInputTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]);
for (int i = 0; i < kron_rows * kron_cols; ++i) {
EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]);
}
}
TEST(DeepConv2DTransformTest, WingradOutputTransformMatrix) {
const int rows = 2;
const int cols = 4;
float transform_matrix[] = {1, 1, 1, 0, 0, 1, -1, -1};
const int kron_rows = rows * rows;
const int kron_cols = cols * cols;
float transform_matrix_kron[kron_rows * kron_cols];
ComputeKroneckerProduct(rows, cols, &transform_matrix[0],
&transform_matrix_kron[0]);
float transform_matrix_test[kron_rows * kron_cols];
WinogradTransform<float> t;
t.GetOutputTransformMatrix(kron_rows, kron_cols, &transform_matrix_test[0]);
for (int i = 0; i < kron_rows * kron_cols; ++i) {
EXPECT_FLOAT_EQ(transform_matrix_kron[i], transform_matrix_test[i]);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/deep_conv2d.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/deep_conv2d_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
695cb3a0-9478-4b86-ac63-a285b09c8693 | cpp | google/arolla | expr_node | arolla/expr/expr_node.cc | arolla/expr/expr_node_test.cc | #include "arolla/expr/expr_node.h"
#include <cstddef>
#include <deque>
#include <memory>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/cleanup/cleanup.h"
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/qtype/typed_value.h"
#include "arolla/util/fingerprint.h"
namespace arolla::expr {
std::ostream& operator<<(std::ostream& os, ExprNodeType t) {
switch (t) {
case expr::ExprNodeType::kLiteral: {
return os << "kLiteral";
}
case expr::ExprNodeType::kLeaf: {
return os << "kLeaf";
}
case expr::ExprNodeType::kOperator: {
return os << "kOperator";
}
case expr::ExprNodeType::kPlaceholder: {
return os << "kPlaceholder";
}
}
return os << "ExprNodeType(" << static_cast<int>(t) << ")";
}
ExprNodePtr ExprNode::MakeLiteralNode(TypedValue&& qvalue) {
FingerprintHasher hasher("LiteralNode");
hasher.Combine(qvalue.GetFingerprint());
auto self = std::make_unique<ExprNode>(PrivateConstructorTag());
self->type_ = ExprNodeType::kLiteral;
self->attr_ = ExprAttributes(std::move(qvalue));
self->fingerprint_ = std::move(hasher).Finish();
return ExprNodePtr::Own(std::move(self));
}
ExprNodePtr ExprNode::MakeLeafNode(absl::string_view leaf_key) {
auto self = std::make_unique<ExprNode>(PrivateConstructorTag());
self->type_ = ExprNodeType::kLeaf;
self->leaf_key_ = std::string(leaf_key);
self->fingerprint_ = FingerprintHasher("LeafNode").Combine(leaf_key).Finish();
return ExprNodePtr::Own(std::move(self));
}
ExprNodePtr ExprNode::MakePlaceholderNode(absl::string_view placeholder_key) {
auto self = std::make_unique<ExprNode>(PrivateConstructorTag());
self->type_ = ExprNodeType::kPlaceholder;
self->placeholder_key_ = std::string(placeholder_key);
self->fingerprint_ =
FingerprintHasher("PlaceholderNode").Combine(placeholder_key).Finish();
return ExprNodePtr::Own(std::move(self));
}
ExprNodePtr ExprNode::UnsafeMakeOperatorNode(
ExprOperatorPtr&& op, std::vector<ExprNodePtr>&& node_deps,
ExprAttributes&& attr) {
FingerprintHasher hasher("OpNode");
DCHECK(op);
hasher.Combine(op->fingerprint());
for (const auto& node_dep : node_deps) {
DCHECK(node_dep != nullptr);
hasher.Combine(node_dep->fingerprint());
}
hasher.Combine(attr);
auto self = std::make_unique<ExprNode>(PrivateConstructorTag());
self->type_ = ExprNodeType::kOperator;
self->op_ = std::move(op);
self->node_deps_ = std::move(node_deps);
self->attr_ = std::move(attr);
self->fingerprint_ = std::move(hasher).Finish();
return ExprNodePtr::Own(std::move(self));
}
ExprNode::~ExprNode() {
if (node_deps_.empty()) {
return;
}
constexpr size_t kMaxDepth = 32;
thread_local absl::NoDestructor<std::deque<std::vector<ExprNodePtr>>> deps;
thread_local size_t destructor_depth = 0;
if (destructor_depth > kMaxDepth) {
deps->push_back(std::move(node_deps_));
return;
}
destructor_depth++;
absl::Cleanup decrease_depth = [&] { --destructor_depth; };
node_deps_.clear();
if (destructor_depth == 1 && !deps->empty()) {
while (!deps->empty()) {
auto tmp = std::move(deps->back());
deps->pop_back();
}
deps->shrink_to_fit();
}
}
} | #include "arolla/expr/expr_node.h"
#include <memory>
#include <sstream>
#include <vector>
#include "gtest/gtest.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/testing/test_operators.h"
namespace arolla::expr {
namespace {
using ::arolla::expr::testing::DummyOp;
TEST(ExprNodeTest, ExprNodeTypeIsConvertibleToString) {
std::stringstream ss;
ss << ExprNodeType::kLiteral;
EXPECT_EQ(ss.str(), "kLiteral");
ss.str("");
ss << ExprNodeType::kLeaf;
EXPECT_EQ(ss.str(), "kLeaf");
ss.str("");
ss << ExprNodeType::kOperator;
EXPECT_EQ(ss.str(), "kOperator");
ss.str("");
ss << ExprNodeType::kPlaceholder;
EXPECT_EQ(ss.str(), "kPlaceholder");
ss.str("");
ss << static_cast<ExprNodeType>(255);
EXPECT_EQ(ss.str(), "ExprNodeType(255)");
}
TEST(ExprNodeTest, DeepTreeNoStackOverflow) {
#ifndef NDEBUG
constexpr int depth = 50000;
#else
constexpr int depth = 1000000;
#endif
ExprOperatorPtr op = std::make_shared<DummyOp>(
"op.name", ExprOperatorSignature::MakeVariadicArgs());
auto a = ExprNode::MakeLeafNode("a");
auto deep = a;
for (int i = depth; i != 0; --i) {
deep = ExprNode::UnsafeMakeOperatorNode(ExprOperatorPtr(op), {deep, a}, {});
}
}
using ExprNodeMsanTest = ::testing::TestWithParam<ExprNodePtr>;
TEST_P(ExprNodeMsanTest, Msan) {
const auto& expr = GetParam();
ASSERT_NE(expr, nullptr);
}
INSTANTIATE_TEST_SUITE_P(ExprNodeMsanTestSuite, ExprNodeMsanTest,
::testing::ValuesIn([]() -> std::vector<ExprNodePtr> {
constexpr int depth = 64;
ExprOperatorPtr op = std::make_shared<DummyOp>(
"op.name",
ExprOperatorSignature::MakeVariadicArgs());
auto expr = ExprNode::MakeLeafNode("a");
for (int i = depth; i != 0; --i) {
expr = ExprNode::UnsafeMakeOperatorNode(
ExprOperatorPtr(op), {expr}, {});
}
return {{expr}};
}()));
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_node.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/expr_node_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
97498087-bc6d-4463-bd2c-846b5f2c5f4d | cpp | tensorflow/tensorflow | stream_attribute_annotator | third_party/xla/xla/service/gpu/transforms/stream_attribute_annotator.cc | third_party/xla/xla/service/gpu/transforms/stream_attribute_annotator_test.cc | #include "xla/service/gpu/transforms/stream_attribute_annotator.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
bool IsOnlyRootNonDefaultStream(HloComputation* computation) {
HloInstruction* root = computation->root_instruction();
auto root_gpu_config = root->backend_config<GpuBackendConfig>();
if (!root_gpu_config.ok() || root->opcode() == HloOpcode::kTuple) {
return false;
}
int64_t root_stream_id = root_gpu_config->operation_queue_id();
VLOG(2) << "Found fusion computation's root stream id to be "
<< root_stream_id;
if (root_stream_id == Thunk::kDefaultExecutionStreamId.value()) {
return false;
}
for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {
if (instr == root) {
continue;
}
int64_t instr_stream_id =
instr->backend_config<GpuBackendConfig>()->operation_queue_id();
if (instr_stream_id != Thunk::kDefaultExecutionStreamId.value() &&
instr_stream_id != root_stream_id) {
return false;
}
}
return true;
}
absl::StatusOr<bool> AnnotateStreamAttributesForInstruction(
HloInstruction* instr, GpuBackendConfig& instr_gpu_config) {
if (instr->called_computations().size() != 1) {
return false;
}
HloComputation* called_comp = instr->called_computations()[0];
int64_t stream_id = instr_gpu_config.operation_queue_id();
if (!IsOnlyRootNonDefaultStream(called_comp) ||
stream_id != Thunk::kDefaultExecutionStreamId.value()) {
return false;
}
auto comp_root_gpu_config =
called_comp->root_instruction()->backend_config<GpuBackendConfig>();
instr_gpu_config.set_operation_queue_id(
comp_root_gpu_config->operation_queue_id());
*instr_gpu_config.mutable_wait_on_operation_queues() =
comp_root_gpu_config->wait_on_operation_queues();
TF_RETURN_IF_ERROR(instr->set_backend_config(instr_gpu_config));
return true;
}
absl::StatusOr<bool> AnnotateStreamAttributesForCopyStart(
HloInstruction* instr, int64_t channel_id,
GpuBackendConfig& instr_gpu_config) {
if (instr_gpu_config.operation_queue_id() !=
Thunk::kDefaultExecutionStreamId.value()) {
return false;
}
instr_gpu_config.set_operation_queue_id(channel_id);
TF_RETURN_IF_ERROR(instr->set_backend_config(instr_gpu_config));
VLOG(3) << "Add copy-start's backend config: " << channel_id;
return true;
}
absl::StatusOr<bool> WrapIntoFusionAndAnnotateStreamAttributes(
HloInstruction* instruction, int64_t channel_id,
GpuBackendConfig& instr_gpu_config) {
auto* computation = instruction->parent();
auto* module = computation->parent();
auto* fusion_instruction =
computation->AddInstruction(HloInstruction::CreateFusion(
instruction->shape(), ChooseFusionKind(*instruction, *instruction),
instruction));
const absl::string_view wrapped_opcode =
HloOpcodeString(instruction->opcode());
module->SetAndUniquifyInstrName(fusion_instruction,
absl::StrCat("wrapped_", wrapped_opcode));
module->SetAndUniquifyComputationName(
fusion_instruction->fused_instructions_computation(),
absl::StrCat("wrapped_", wrapped_opcode, "_computation"));
if (module->has_schedule()) {
fusion_instruction->set_metadata_scheduling_name(
fusion_instruction->name());
HloInstruction* root = fusion_instruction->fused_expression_root();
root->set_metadata_scheduling_name(root->name());
module->schedule().replace_instruction(computation, instruction,
fusion_instruction);
}
TF_RETURN_IF_ERROR(fusion_instruction->CopyAllControlDepsFrom(instruction));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(fusion_instruction));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction));
instr_gpu_config.set_operation_queue_id(channel_id);
TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(instr_gpu_config));
VLOG(3) << "Add async stream " << channel_id << " and wrapped instruction "
<< instruction->ToString();
VLOG(3) << " Fusion wrapper: " << fusion_instruction->ToString();
return true;
}
absl::StatusOr<bool> AnnotateStreamAttributesForUsers(
HloInstruction* instr, GpuBackendConfig& instr_gpu_config) {
bool changed = false;
int64_t stream_id = instr_gpu_config.operation_queue_id();
if (stream_id == Thunk::kDefaultExecutionStreamId.value()) {
return changed;
}
std::vector<HloInstruction*> all_consumers;
for (auto user : instr->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
user = user->users()[0];
}
all_consumers.push_back(user);
}
for (auto user : all_consumers) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
user->backend_config<GpuBackendConfig>());
auto it = absl::c_find(gpu_config.wait_on_operation_queues(), stream_id);
if (it == gpu_config.wait_on_operation_queues().end() &&
gpu_config.operation_queue_id() != stream_id) {
gpu_config.mutable_wait_on_operation_queues()->Add(stream_id);
TF_RETURN_IF_ERROR(user->set_backend_config(gpu_config));
changed = true;
}
}
return changed;
}
}
absl::StatusOr<bool> StreamAttributeAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
5, "StreamAttributeAnnotator::Run(), before:\n" + module->ToString());
bool changed = false;
int64_t channel_id = hlo_query::NextChannelId(*module);
for (const HloComputation* comp :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
auto instr_gpu_config = instr->backend_config<GpuBackendConfig>();
if (!instr_gpu_config.ok()) {
continue;
}
if (instr->opcode() == HloOpcode::kFusion) {
TF_ASSIGN_OR_RETURN(bool comp_result,
AnnotateStreamAttributesForInstruction(
instr, instr_gpu_config.value()));
changed |= comp_result;
} else if (instr->opcode() == HloOpcode::kCopyStart) {
TF_ASSIGN_OR_RETURN(bool comp_result,
AnnotateStreamAttributesForCopyStart(
instr, channel_id, instr_gpu_config.value()));
changed |= comp_result;
continue;
} else if (comp->IsAsyncComputation() &&
(instr->opcode() == HloOpcode::kDynamicSlice ||
instr->opcode() == HloOpcode::kDynamicUpdateSlice)) {
TF_ASSIGN_OR_RETURN(bool comp_result,
WrapIntoFusionAndAnnotateStreamAttributes(
instr, channel_id, instr_gpu_config.value()));
changed |= comp_result;
continue;
}
TF_ASSIGN_OR_RETURN(
bool user_result,
AnnotateStreamAttributesForUsers(instr, instr_gpu_config.value()));
changed |= user_result;
}
}
XLA_VLOG_LINES(
5, "StreamAttributeAnnotator::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/gpu/transforms/stream_attribute_annotator.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using StreamAttributeAnnotatorTest = HloTestBase;
TEST_F(StreamAttributeAnnotatorTest, AllUsersAreAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
ENTRY entry {
p1_32 = f32[1] parameter(0)
p2_32 = f32[1] parameter(1)
add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[]}
exp_32 = f32[1] exponential(add_32)
neg32 = f32[1] negate(add_32)
ROOT add_out_32 = f32[1] add(neg32, exp_32)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* add = FindInstruction(module.get(), "add_32");
for (auto user : add->users()) {
EXPECT_TRUE(user->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
user->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.wait_on_operation_queues()[0], 1);
}
}
TEST_F(StreamAttributeAnnotatorTest, MultipleStreamsAreCombined) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
ENTRY entry {
p1_32 = f32[1] parameter(0)
p2_32 = f32[1] parameter(1)
add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[]}
exp_32 = f32[1] exponential(p2_32), backend_config={"operation_queue_id":"2", "wait_on_operation_queues":[]}
ROOT add_out_32 = f32[1] add(add_32, exp_32)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_TRUE(root->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
root->backend_config<GpuBackendConfig>());
std::vector<int64_t> expected_stream_ids = {1, 2};
for (auto id : expected_stream_ids) {
auto it = absl::c_find(gpu_config.wait_on_operation_queues(), id);
EXPECT_NE(it, gpu_config.wait_on_operation_queues().end());
}
}
TEST_F(StreamAttributeAnnotatorTest, GTEUserIsAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
ENTRY entry {
p1_32 = f32[16,32] parameter(0)
p2_32 = f32[32,16] parameter(1)
custom-call.3 = (f32[16,16], s8[1028]{0}) custom-call(p1_32, p2_32), custom_call_target="__cublas$gemm", backend_config={"operation_queue_id":"1","wait_on_operation_queues":[],"gemm_backend_config":{"alpha_real":1,"alpha_imag":0,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":["1"],"rhs_contracting_dimensions":["0"],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT","grad_x":false,"grad_y":false}}
get-tuple-element.24 = f32[16,16] get-tuple-element(custom-call.3), index=0
exp_32 = f32[16,16] exponential(get-tuple-element.24)
ROOT neg32 = f32[16,16] negate(exp_32)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* exp = FindInstruction(module.get(), "exp_32");
EXPECT_TRUE(exp->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
exp->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.wait_on_operation_queues()[0], 1);
}
TEST_F(StreamAttributeAnnotatorTest, FusionIsAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithFusion
fused_computation.1 {
fusion_p0_32 = f32[16,16] parameter(0)
fusion_p2_32 = f32[16,16] parameter(1)
ROOT add = f32[16,16] add(fusion_p0_32, fusion_p2_32), backend_config={"operation_queue_id":"1","wait_on_operation_queues":[]}
}
ENTRY entry {
p1_32 = f32[16,16] parameter(0)
p2_32 = f32[16,16] parameter(1)
ROOT fusion.1 = f32[16,16] fusion(p1_32, p2_32), kind=kLoop, calls=fused_computation.1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* fusion = FindInstruction(module.get(), "fusion.1");
EXPECT_TRUE(fusion->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
fusion->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
}
TEST_F(StreamAttributeAnnotatorTest, CopyStartIsAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule offloading
ENTRY %main (param_0: f32[1024], param_1: f32[1024]) -> f32[1024] {
%param_1 = f32[1024]{0} parameter(1)
%param_0 = f32[1024]{0} parameter(0)
%res_3 = f32[1024]{0} add(f32[1024]{0} %param_0, f32[1024]{0} %param_1)
%copy-start = (f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) copy-start(f32[1024]{0} %res_3)
%res_4 = f32[1024]{0} tanh(f32[1024]{0} %res_3)
%copy-start.2 = (f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) copy-start(f32[1024]{0} %res_4)
%res_5 = f32[1024]{0} tanh(f32[1024]{0} %res_4)
%copy-done = f32[1024]{0:S(5)} copy-done((f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) %copy-start)
%res_6 = f32[1024]{0} tanh(f32[1024]{0} %res_5)
%copy-done.2 = f32[1024]{0:S(5)} copy-done((f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) %copy-start.2)
%copy-start.3 = (f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) copy-start(f32[1024]{0:S(5)} %copy-done.2)
%res_7 = f32[1024]{0} add(f32[1024]{0} %res_6, f32[1024]{0} %res_6)
%copy-start.1 = (f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) copy-start(f32[1024]{0:S(5)} %copy-done)
%res_8 = f32[1024]{0} add(f32[1024]{0} %res_7, f32[1024]{0} %res_5)
%copy-done.3 = f32[1024]{0} copy-done((f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) %copy-start.3)
%res_9 = f32[1024]{0} add(f32[1024]{0} %res_8, f32[1024]{0} %copy-done.3)
%copy-done.1 = f32[1024]{0} copy-done((f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) %copy-start.1)
%res_10 = f32[1024]{0} add(f32[1024]{0} %res_9, f32[1024]{0} %copy-done.1)
ROOT %res_11 = f32[1024]{0} tanh(f32[1024]{0} %res_10)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
for (std::string i : {"", ".1", ".2", ".3"}) {
const HloInstruction* cp_start =
FindInstruction(module.get(), "copy-start" + i);
EXPECT_TRUE(cp_start->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
cp_start->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
}
}
TEST_F(StreamAttributeAnnotatorTest, DynamicUpdateSliceWrappedAndAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsyncDynamicUpdateSlice, is_scheduled=true
ENTRY entry (param_0: f32[256,128,128], param_1: f32[1,128,128]) -> f32[256,128,128] {
param_0 = f32[256,128,128]{2,1,0:S(5)} parameter(0), metadata={scheduling_name="param_0"}
param_1 = f32[1,128,128]{2,1,0} parameter(1), metadata={scheduling_name="param_1"}
izero = s32[] constant(0), metadata={scheduling_name="izero"}
dynamic-update-slice-start.2 = ((f32[256,128,128]{2,1,0:S(5)}, f32[1,128,128]{2,1,0}, s32[], s32[], s32[]), f32[256,128,128]{2,1,0:S(5)}, u32[])
dynamic-update-slice-start(param_0, param_1, izero, izero, izero),
metadata={scheduling_name="dynamic-update-slice-start.2"}
ROOT dynamic-update-slice-done.2 = f32[256,128,128]{2,1,0:S(5)}
dynamic-update-slice-done(dynamic-update-slice-start.2),
metadata={scheduling_name="dynamic-update-slice-done.2"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(bool changed,
StreamAttributeAnnotator().Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* dus =
FindInstruction(module.get(), HloOpcode::kDynamicUpdateSlice);
const HloComputation* computation = dus->parent();
EXPECT_TRUE(computation->IsFusionComputation());
const HloInstruction* fusion = computation->FusionInstruction();
EXPECT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(fusion->parent()->IsAsyncComputation());
EXPECT_TRUE(fusion->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
fusion->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
for (const auto* comp : module->computations()) {
for (const auto* instruction : comp->instructions()) {
if (!instruction->metadata().scheduling_name().empty()) {
EXPECT_EQ(instruction->name(),
instruction->metadata().scheduling_name());
}
}
}
constexpr absl::string_view kExpectedSchedulingName = R"(
)";
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions().set_print_operand_shape(false)),
kExpectedSchedulingName));
EXPECT_TRUE(filecheck_matches);
}
TEST_F(StreamAttributeAnnotatorTest, DynamicSliceWrappedAndAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsyncDynamicSlice, is_scheduled=true
ENTRY entry (param_0: f32[256,128,128]) -> f32[1,128,128] {
param_0 = f32[256,128,128]{2,1,0:S(5)} parameter(0), metadata={scheduling_name="param_0"}
izero = s32[] constant(0), metadata={scheduling_name="izero"}
dynamic-slice-start.2 = ((f32[256,128,128]{2,1,0:S(5)}, s32[], s32[], s32[]), f32[1,128,128]{2,1,0}, u32[])
dynamic-slice-start(param_0, izero, izero, izero), dynamic_slice_sizes={1,128,128},
metadata={scheduling_name="dynamic-slice-start.2"}
ROOT dynamic-slice-done.2 = f32[1,128,128]{2,1,0}
dynamic-slice-done(dynamic-slice-start.2),
metadata={scheduling_name="dynamic-slice-done.2"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
EXPECT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(bool changed,
StreamAttributeAnnotator().Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* ds =
FindInstruction(module.get(), HloOpcode::kDynamicSlice);
const HloComputation* computation = ds->parent();
EXPECT_TRUE(computation->IsFusionComputation());
const HloInstruction* fusion = computation->FusionInstruction();
EXPECT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(fusion->parent()->IsAsyncComputation());
EXPECT_TRUE(fusion->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
fusion->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
for (const auto* comp : module->computations()) {
for (const auto* instruction : comp->instructions()) {
if (!instruction->metadata().scheduling_name().empty()) {
EXPECT_EQ(instruction->name(),
instruction->metadata().scheduling_name());
}
}
}
constexpr absl::string_view kExpectedSchedulingName = R"(
)";
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions().set_print_operand_shape(false)),
kExpectedSchedulingName));
EXPECT_TRUE(filecheck_matches);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/stream_attribute_annotator.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/stream_attribute_annotator_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
cb674ea6-6720-4736-b815-049e514ec55d | cpp | google/quiche | socket | quiche/quic/core/io/socket.cc | quiche/quic/core/io/socket_test.cc | #include "quiche/quic/core/io/socket.h"
#include <cerrno>
#include <climits>
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/core/io/socket_internal.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/common/platform/api/quiche_logging.h"
#if defined(_WIN32)
#include "quiche/quic/core/io/socket_win.inc"
#else
#include "quiche/quic/core/io/socket_posix.inc"
#endif
namespace quic::socket_api {
namespace {
absl::StatusOr<AcceptResult> AcceptInternal(SocketFd fd) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
sockaddr_storage peer_addr;
PlatformSocklen peer_addr_len = sizeof(peer_addr);
SocketFd connection_socket = SyscallAccept(
fd, reinterpret_cast<struct sockaddr*>(&peer_addr), &peer_addr_len);
if (connection_socket == kInvalidSocketFd) {
absl::Status status = LastSocketOperationError("::accept()");
QUICHE_DVLOG(1) << "Failed to accept connection from socket " << fd
<< " with error: " << status;
return status;
}
absl::StatusOr<QuicSocketAddress> peer_address =
ValidateAndConvertAddress(peer_addr, peer_addr_len);
if (peer_address.ok()) {
return AcceptResult{connection_socket, *peer_address};
} else {
return peer_address.status();
}
}
absl::Status SetSockOptInt(SocketFd fd, int level, int option, int value) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
int result = SyscallSetsockopt(fd, level, option, &value, sizeof(value));
if (result >= 0) {
return absl::OkStatus();
} else {
absl::Status status = LastSocketOperationError("::setsockopt()");
QUICHE_DVLOG(1) << "Failed to set socket " << fd << " option " << option
<< " to " << value << " with error: " << status;
return status;
}
}
}
absl::Status SetReceiveBufferSize(SocketFd fd, QuicByteCount size) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK_LE(size, QuicByteCount{INT_MAX});
return SetSockOptInt(fd, SOL_SOCKET, SO_RCVBUF, static_cast<int>(size));
}
absl::Status SetSendBufferSize(SocketFd fd, QuicByteCount size) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK_LE(size, QuicByteCount{INT_MAX});
return SetSockOptInt(fd, SOL_SOCKET, SO_SNDBUF, static_cast<int>(size));
}
absl::Status Connect(SocketFd fd, const QuicSocketAddress& peer_address) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(peer_address.IsInitialized());
sockaddr_storage addr = peer_address.generic_address();
PlatformSocklen addrlen = GetAddrlen(peer_address.host().address_family());
int connect_result =
SyscallConnect(fd, reinterpret_cast<sockaddr*>(&addr), addrlen);
if (connect_result >= 0) {
return absl::OkStatus();
} else {
absl::Status status =
LastSocketOperationError("::connect()",
{EINPROGRESS});
QUICHE_DVLOG(1) << "Failed to connect socket " << fd
<< " to address: " << peer_address.ToString()
<< " with error: " << status;
return status;
}
}
absl::Status GetSocketError(SocketFd fd) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
int socket_error = 0;
PlatformSocklen len = sizeof(socket_error);
int sockopt_result =
SyscallGetsockopt(fd, SOL_SOCKET, SO_ERROR, &socket_error, &len);
if (sockopt_result >= 0) {
if (socket_error == 0) {
return absl::OkStatus();
} else {
return ToStatus(socket_error, "SO_ERROR");
}
} else {
absl::Status status = LastSocketOperationError("::getsockopt()");
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Failed to get socket error information from socket " << fd
<< " with error: " << status;
return status;
}
}
absl::Status Bind(SocketFd fd, const QuicSocketAddress& address) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(address.IsInitialized());
sockaddr_storage addr = address.generic_address();
PlatformSocklen addr_len = GetAddrlen(address.host().address_family());
int result = SyscallBind(fd, reinterpret_cast<sockaddr*>(&addr), addr_len);
if (result >= 0) {
return absl::OkStatus();
} else {
absl::Status status = LastSocketOperationError("::bind()");
QUICHE_DVLOG(1) << "Failed to bind socket " << fd
<< " to address: " << address.ToString()
<< " with error: " << status;
return status;
}
}
absl::StatusOr<QuicSocketAddress> GetSocketAddress(SocketFd fd) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
sockaddr_storage addr;
PlatformSocklen addr_len = sizeof(addr);
int result =
SyscallGetsockname(fd, reinterpret_cast<sockaddr*>(&addr), &addr_len);
if (result >= 0) {
return ValidateAndConvertAddress(addr, addr_len);
} else {
absl::Status status = LastSocketOperationError("::getsockname()");
QUICHE_DVLOG(1) << "Failed to get socket " << fd
<< " name with error: " << status;
return status;
}
}
absl::Status Listen(SocketFd fd, int backlog) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK_GT(backlog, 0);
int result = SyscallListen(fd, backlog);
if (result >= 0) {
return absl::OkStatus();
} else {
absl::Status status = LastSocketOperationError("::listen()");
QUICHE_DVLOG(1) << "Failed to mark socket: " << fd
<< " to listen with error :" << status;
return status;
}
}
absl::StatusOr<AcceptResult> Accept(SocketFd fd, bool blocking) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
#if defined(HAS_ACCEPT4)
if (!blocking) {
return AcceptWithFlags(fd, SOCK_NONBLOCK);
}
#endif
absl::StatusOr<AcceptResult> accept_result = AcceptInternal(fd);
if (!accept_result.ok() || blocking) {
return accept_result;
}
#if !defined(__linux__) || !defined(SOCK_NONBLOCK)
absl::Status set_non_blocking_result =
SetSocketBlocking(accept_result->fd, false);
if (!set_non_blocking_result.ok()) {
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Failed to set socket " << fd << " as non-blocking on acceptance.";
if (!Close(accept_result->fd).ok()) {
QUICHE_LOG_FIRST_N(ERROR, 100)
<< "Failed to close socket " << accept_result->fd
<< " after error setting non-blocking on acceptance.";
}
return set_non_blocking_result;
}
#endif
return accept_result;
}
absl::StatusOr<absl::Span<char>> Receive(SocketFd fd, absl::Span<char> buffer,
bool peek) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(!buffer.empty());
PlatformSsizeT num_read = SyscallRecv(fd, buffer.data(), buffer.size(),
peek ? MSG_PEEK : 0);
if (num_read > 0 && static_cast<size_t>(num_read) > buffer.size()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Received more bytes (" << num_read << ") from socket " << fd
<< " than buffer size (" << buffer.size() << ").";
return absl::OutOfRangeError(
"::recv(): Received more bytes than buffer size.");
} else if (num_read >= 0) {
return buffer.subspan(0, num_read);
} else {
absl::Status status = LastSocketOperationError("::recv()");
QUICHE_DVLOG(1) << "Failed to receive from socket: " << fd
<< " with error: " << status;
return status;
}
}
absl::StatusOr<absl::string_view> Send(SocketFd fd, absl::string_view buffer) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(!buffer.empty());
PlatformSsizeT num_sent =
SyscallSend(fd, buffer.data(), buffer.size(), 0);
if (num_sent > 0 && static_cast<size_t>(num_sent) > buffer.size()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Sent more bytes (" << num_sent << ") to socket " << fd
<< " than buffer size (" << buffer.size() << ").";
return absl::OutOfRangeError("::send(): Sent more bytes than buffer size.");
} else if (num_sent >= 0) {
return buffer.substr(num_sent);
} else {
absl::Status status = LastSocketOperationError("::send()");
QUICHE_DVLOG(1) << "Failed to send to socket: " << fd
<< " with error: " << status;
return status;
}
}
absl::StatusOr<absl::string_view> SendTo(SocketFd fd,
const QuicSocketAddress& peer_address,
absl::string_view buffer) {
QUICHE_DCHECK_NE(fd, kInvalidSocketFd);
QUICHE_DCHECK(peer_address.IsInitialized());
QUICHE_DCHECK(!buffer.empty());
sockaddr_storage addr = peer_address.generic_address();
PlatformSocklen addrlen = GetAddrlen(peer_address.host().address_family());
PlatformSsizeT num_sent =
SyscallSendTo(fd, buffer.data(), buffer.size(),
0, reinterpret_cast<sockaddr*>(&addr), addrlen);
if (num_sent > 0 && static_cast<size_t>(num_sent) > buffer.size()) {
QUICHE_LOG_FIRST_N(WARNING, 100)
<< "Sent more bytes (" << num_sent << ") to socket " << fd
<< " to address: " << peer_address.ToString() << " than buffer size ("
<< buffer.size() << ").";
return absl::OutOfRangeError(
"::sendto(): Sent more bytes than buffer size.");
} else if (num_sent >= 0) {
return buffer.substr(num_sent);
} else {
absl::Status status = LastSocketOperationError("::sendto()");
QUICHE_DVLOG(1) << "Failed to send to socket: " << fd
<< " to address: " << peer_address.ToString()
<< " with error: " << status;
return status;
}
}
} | #include "quiche/quic/core/io/socket.h"
#include <string>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_ip_address_family.h"
#include "quiche/quic/platform/api/quic_socket_address.h"
#include "quiche/quic/test_tools/test_ip_packets.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
#include "quiche/common/platform/api/quiche_test_loopback.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace quic::test {
namespace {
using quiche::test::QuicheTest;
using quiche::test::StatusIs;
using testing::Lt;
using testing::SizeIs;
SocketFd CreateTestSocket(socket_api::SocketProtocol protocol,
bool blocking = true) {
absl::StatusOr<SocketFd> socket = socket_api::CreateSocket(
quiche::TestLoopback().address_family(), protocol, blocking);
if (socket.ok()) {
return socket.value();
} else {
QUICHE_CHECK(false);
return kInvalidSocketFd;
}
}
SocketFd CreateTestRawSocket(
bool blocking = true,
IpAddressFamily address_family = IpAddressFamily::IP_UNSPEC) {
absl::StatusOr<SocketFd> socket;
switch (address_family) {
case IpAddressFamily::IP_V4:
socket = socket_api::CreateSocket(
quiche::TestLoopback4().address_family(),
socket_api::SocketProtocol::kRawIp, blocking);
break;
case IpAddressFamily::IP_V6:
socket = socket_api::CreateSocket(
quiche::TestLoopback6().address_family(),
socket_api::SocketProtocol::kRawIp, blocking);
break;
case IpAddressFamily::IP_UNSPEC:
socket = socket_api::CreateSocket(quiche::TestLoopback().address_family(),
socket_api::SocketProtocol::kRawIp,
blocking);
break;
}
if (socket.ok()) {
return socket.value();
} else {
QUICHE_CHECK(absl::IsPermissionDenied(socket.status()) ||
absl::IsNotFound(socket.status()));
return kInvalidSocketFd;
}
}
TEST(SocketTest, CreateAndCloseSocket) {
QuicIpAddress localhost_address = quiche::TestLoopback();
absl::StatusOr<SocketFd> created_socket = socket_api::CreateSocket(
localhost_address.address_family(), socket_api::SocketProtocol::kUdp);
QUICHE_EXPECT_OK(created_socket.status());
QUICHE_EXPECT_OK(socket_api::Close(created_socket.value()));
}
TEST(SocketTest, CreateAndCloseRawSocket) {
QuicIpAddress localhost_address = quiche::TestLoopback();
absl::StatusOr<SocketFd> created_socket = socket_api::CreateSocket(
localhost_address.address_family(), socket_api::SocketProtocol::kRawIp);
if (!created_socket.ok()) {
EXPECT_THAT(created_socket.status(),
StatusIs(absl::StatusCode::kPermissionDenied));
return;
}
QUICHE_EXPECT_OK(socket_api::Close(created_socket.value()));
}
TEST(SocketTest, SetSocketBlocking) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
QUICHE_EXPECT_OK(socket_api::SetSocketBlocking(socket, false));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetReceiveBufferSize) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
QUICHE_EXPECT_OK(socket_api::SetReceiveBufferSize(socket, 100));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetSendBufferSize) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
QUICHE_EXPECT_OK(socket_api::SetSendBufferSize(socket, 100));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetIpHeaderIncludedForRaw) {
SocketFd socket =
CreateTestRawSocket(true, IpAddressFamily::IP_V4);
if (socket == kInvalidSocketFd) {
GTEST_SKIP();
}
QUICHE_EXPECT_OK(socket_api::SetIpHeaderIncluded(
socket, IpAddressFamily::IP_V4, true));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetIpHeaderIncludedForRawV6) {
SocketFd socket =
CreateTestRawSocket(true, IpAddressFamily::IP_V6);
if (socket == kInvalidSocketFd) {
GTEST_SKIP();
}
QUICHE_EXPECT_OK(socket_api::SetIpHeaderIncluded(
socket, IpAddressFamily::IP_V6, true));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SetIpHeaderIncludedForUdp) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
EXPECT_THAT(socket_api::SetIpHeaderIncluded(socket, IpAddressFamily::IP_V4,
true),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(socket_api::SetIpHeaderIncluded(socket, IpAddressFamily::IP_V6,
true),
StatusIs(absl::StatusCode::kInvalidArgument));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Connect) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_EXPECT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, GetSocketError) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
true);
absl::Status error = socket_api::GetSocketError(socket);
QUICHE_EXPECT_OK(error);
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Bind) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_EXPECT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, GetSocketAddress) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
absl::StatusOr<QuicSocketAddress> address =
socket_api::GetSocketAddress(socket);
QUICHE_EXPECT_OK(address);
EXPECT_TRUE(address.value().IsInitialized());
EXPECT_EQ(address.value().host(), quiche::TestLoopback());
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Listen) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kTcp);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_EXPECT_OK(socket_api::Listen(socket, 5));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Accept) {
SocketFd socket =
CreateTestSocket(socket_api::SocketProtocol::kTcp, false);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
QUICHE_ASSERT_OK(socket_api::Listen(socket, 5));
absl::StatusOr<socket_api::AcceptResult> result = socket_api::Accept(socket);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kUnavailable));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Receive) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
false);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
std::string buffer(100, 0);
absl::StatusOr<absl::Span<char>> result =
socket_api::Receive(socket, absl::MakeSpan(buffer));
EXPECT_THAT(result, StatusIs(absl::StatusCode::kUnavailable));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Peek) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp,
false);
QUICHE_ASSERT_OK(socket_api::Bind(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
std::string buffer(100, 0);
absl::StatusOr<absl::Span<char>> result =
socket_api::Receive(socket, absl::MakeSpan(buffer), true);
EXPECT_THAT(result, StatusIs(absl::StatusCode::kUnavailable));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, Send) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_ASSERT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
char buffer[] = {12, 34, 56, 78};
absl::StatusOr<absl::string_view> result =
socket_api::Send(socket, absl::string_view(buffer, sizeof(buffer)));
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(4)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendTo) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
char buffer[] = {12, 34, 56, 78};
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(quiche::TestLoopback(), 57290),
absl::string_view(buffer, sizeof(buffer)));
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(4)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendToWithConnection) {
SocketFd socket = CreateTestSocket(socket_api::SocketProtocol::kUdp);
QUICHE_ASSERT_OK(socket_api::Connect(
socket, QuicSocketAddress(quiche::TestLoopback(), 0)));
char buffer[] = {12, 34, 56, 78};
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(quiche::TestLoopback(), 50495),
absl::string_view(buffer, sizeof(buffer)));
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(4)));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendToForRaw) {
SocketFd socket = CreateTestRawSocket(true);
if (socket == kInvalidSocketFd) {
GTEST_SKIP();
}
QuicIpAddress localhost_address = quiche::TestLoopback();
QUICHE_EXPECT_OK(socket_api::SetIpHeaderIncluded(
socket, localhost_address.address_family(),
false));
QuicSocketAddress client_address(localhost_address, 53368);
QuicSocketAddress server_address(localhost_address, 56362);
std::string packet = CreateUdpPacket(client_address, server_address, "foo");
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(localhost_address, 56362), packet);
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(packet.size())));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
TEST(SocketTest, SendToForRawWithIpHeader) {
SocketFd socket = CreateTestRawSocket(true);
if (socket == kInvalidSocketFd) {
GTEST_SKIP();
}
QuicIpAddress localhost_address = quiche::TestLoopback();
QUICHE_EXPECT_OK(socket_api::SetIpHeaderIncluded(
socket, localhost_address.address_family(), true));
QuicSocketAddress client_address(localhost_address, 53368);
QuicSocketAddress server_address(localhost_address, 56362);
std::string packet =
CreateIpPacket(client_address.host(), server_address.host(),
CreateUdpPacket(client_address, server_address, "foo"));
absl::StatusOr<absl::string_view> result = socket_api::SendTo(
socket, QuicSocketAddress(localhost_address, 56362), packet);
QUICHE_ASSERT_OK(result.status());
EXPECT_THAT(result.value(), SizeIs(Lt(packet.size())));
QUICHE_EXPECT_OK(socket_api::Close(socket));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/io/socket.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/io/socket_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
c52c1cca-1aa8-4857-935d-25c61d856f28 | cpp | google/quiche | quic_connection_context | quiche/quic/core/quic_connection_context.cc | quiche/quic/core/quic_connection_context_test.cc | #include "quiche/quic/core/quic_connection_context.h"
#include "absl/base/attributes.h"
namespace quic {
namespace {
ABSL_CONST_INIT thread_local QuicConnectionContext* current_context = nullptr;
}
QuicConnectionContext* QuicConnectionContext::Current() {
return current_context;
}
QuicConnectionContextSwitcher::QuicConnectionContextSwitcher(
QuicConnectionContext* new_context)
: old_context_(QuicConnectionContext::Current()) {
current_context = new_context;
if (new_context && new_context->listener) {
new_context->listener->Activate();
}
}
QuicConnectionContextSwitcher::~QuicConnectionContextSwitcher() {
QuicConnectionContext* current = QuicConnectionContext::Current();
if (current && current->listener) {
current->listener->Deactivate();
}
current_context = old_context_;
}
} | #include "quiche/quic/core/quic_connection_context.h"
#include <memory>
#include <string>
#include <vector>
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/platform/api/quic_thread.h"
using testing::ElementsAre;
namespace quic::test {
namespace {
class TraceCollector : public QuicConnectionTracer {
public:
~TraceCollector() override = default;
void PrintLiteral(const char* literal) override { trace_.push_back(literal); }
void PrintString(absl::string_view s) override {
trace_.push_back(std::string(s));
}
const std::vector<std::string>& trace() const { return trace_; }
private:
std::vector<std::string> trace_;
};
struct FakeConnection {
FakeConnection() { context.tracer = std::make_unique<TraceCollector>(); }
const std::vector<std::string>& trace() const {
return static_cast<const TraceCollector*>(context.tracer.get())->trace();
}
QuicConnectionContext context;
};
void SimpleSwitch() {
FakeConnection connection;
EXPECT_EQ(QuicConnectionContext::Current(), nullptr);
QUIC_TRACELITERAL("before switch: literal");
QUIC_TRACESTRING(std::string("before switch: string"));
QUIC_TRACEPRINTF("%s: %s", "before switch", "printf");
{
QuicConnectionContextSwitcher switcher(&connection.context);
QUIC_TRACELITERAL("literal");
QUIC_TRACESTRING(std::string("string"));
QUIC_TRACEPRINTF("%s", "printf");
}
EXPECT_EQ(QuicConnectionContext::Current(), nullptr);
QUIC_TRACELITERAL("after switch: literal");
QUIC_TRACESTRING(std::string("after switch: string"));
QUIC_TRACEPRINTF("%s: %s", "after switch", "printf");
EXPECT_THAT(connection.trace(), ElementsAre("literal", "string", "printf"));
}
void NestedSwitch() {
FakeConnection outer, inner;
{
QuicConnectionContextSwitcher switcher(&outer.context);
QUIC_TRACELITERAL("outer literal 0");
QUIC_TRACESTRING(std::string("outer string 0"));
QUIC_TRACEPRINTF("%s %s %d", "outer", "printf", 0);
{
QuicConnectionContextSwitcher nested_switcher(&inner.context);
QUIC_TRACELITERAL("inner literal");
QUIC_TRACESTRING(std::string("inner string"));
QUIC_TRACEPRINTF("%s %s", "inner", "printf");
}
QUIC_TRACELITERAL("outer literal 1");
QUIC_TRACESTRING(std::string("outer string 1"));
QUIC_TRACEPRINTF("%s %s %d", "outer", "printf", 1);
}
EXPECT_THAT(outer.trace(), ElementsAre("outer literal 0", "outer string 0",
"outer printf 0", "outer literal 1",
"outer string 1", "outer printf 1"));
EXPECT_THAT(inner.trace(),
ElementsAre("inner literal", "inner string", "inner printf"));
}
void AlternatingSwitch() {
FakeConnection zero, one, two;
for (int i = 0; i < 15; ++i) {
FakeConnection* connection =
((i % 3) == 0) ? &zero : (((i % 3) == 1) ? &one : &two);
QuicConnectionContextSwitcher switcher(&connection->context);
QUIC_TRACEPRINTF("%d", i);
}
EXPECT_THAT(zero.trace(), ElementsAre("0", "3", "6", "9", "12"));
EXPECT_THAT(one.trace(), ElementsAre("1", "4", "7", "10", "13"));
EXPECT_THAT(two.trace(), ElementsAre("2", "5", "8", "11", "14"));
}
typedef void (*ThreadFunction)();
template <ThreadFunction func>
class TestThread : public QuicThread {
public:
TestThread() : QuicThread("TestThread") {}
~TestThread() override = default;
protected:
void Run() override { func(); }
};
template <ThreadFunction func>
void RunInThreads(size_t n_threads) {
using ThreadType = TestThread<func>;
std::vector<ThreadType> threads(n_threads);
for (ThreadType& t : threads) {
t.Start();
}
for (ThreadType& t : threads) {
t.Join();
}
}
class QuicConnectionContextTest : public QuicTest {
protected:
};
TEST_F(QuicConnectionContextTest, NullTracerOK) {
FakeConnection connection;
std::unique_ptr<QuicConnectionTracer> tracer;
{
QuicConnectionContextSwitcher switcher(&connection.context);
QUIC_TRACELITERAL("msg 1 recorded");
}
connection.context.tracer.swap(tracer);
{
QuicConnectionContextSwitcher switcher(&connection.context);
QUIC_TRACELITERAL("msg 2 ignored");
}
EXPECT_THAT(static_cast<TraceCollector*>(tracer.get())->trace(),
ElementsAre("msg 1 recorded"));
}
TEST_F(QuicConnectionContextTest, TestSimpleSwitch) {
RunInThreads<SimpleSwitch>(10);
}
TEST_F(QuicConnectionContextTest, TestNestedSwitch) {
RunInThreads<NestedSwitch>(10);
}
TEST_F(QuicConnectionContextTest, TestAlternatingSwitch) {
RunInThreads<AlternatingSwitch>(10);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_connection_context.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_connection_context_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
bbbc2413-3f9b-4c5e-b2d4-176041d08ebe | cpp | tensorflow/tensorflow | parameterized_truncated_normal_op | tensorflow/core/kernels/parameterized_truncated_normal_op.cc | tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc | #define EIGEN_USE_THREADS
#include "tensorflow/core/kernels/parameterized_truncated_normal_op.h"
#include <algorithm>
#include <cmath>
#include <memory>
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/framework/tensor_util.h"
#include "tensorflow/core/kernels/stateless_random_ops.h"
#include "tensorflow/core/lib/random/random_distributions.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/util/guarded_philox_random.h"
#include "tensorflow/core/util/work_sharder.h"
namespace tensorflow {
typedef Eigen::ThreadPoolDevice CPUDevice;
typedef Eigen::GpuDevice GPUDevice;
namespace functor {
using random::PhiloxRandom;
static constexpr int kMaxIterations = 1000;
template <typename T>
struct TruncatedNormalFunctor<CPUDevice, T> {
void operator()(OpKernelContext* ctx, const CPUDevice& d, int64_t num_batches,
int64_t samples_per_batch, int64_t num_elements,
typename TTypes<T>::ConstFlat means,
typename TTypes<T>::ConstFlat stddevs,
typename TTypes<T>::ConstFlat minvals,
typename TTypes<T>::ConstFlat maxvals,
const random::PhiloxRandom& gen,
typename TTypes<T>::Flat output) {
const T kStdDevsInsideBoundsToUseRandnSampler = T(1.3);
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
auto do_work = [samples_per_batch, num_elements, &ctx, &means, &stddevs,
&minvals, &maxvals, &gen, &output,
kStdDevsInsideBoundsToUseRandnSampler](
int64_t start_batch, int64_t limit_batch) {
random::PhiloxRandom gen_copy = gen;
gen_copy.Skip(start_batch * 2 * kMaxIterations * (samples_per_batch + 3) /
4);
using Uniform = random::UniformDistribution<random::PhiloxRandom, T>;
Uniform dist;
using Normal = random::NormalDistribution<random::PhiloxRandom, T>;
Normal normal_dist;
Eigen::array<T, 4> z;
Eigen::array<T, 4> g;
for (int64_t b = start_batch; b < limit_batch; ++b) {
T mean = means((means.dimension(0) == 1) ? 0 : b);
T stddev = stddevs((stddevs.dimension(0) == 1) ? 0 : b);
T minval = minvals((minvals.dimension(0) == 1) ? 0 : b);
T maxval = maxvals((maxvals.dimension(0) == 1) ? 0 : b);
const int64_t limit_sample =
std::min((b + 1) * samples_per_batch, num_elements);
int64_t sample = b * samples_per_batch;
OP_REQUIRES(ctx,
stddev > T(0) && minval < maxval &&
(Eigen::numext::isfinite(minval) ||
Eigen::numext::isfinite(maxval)),
errors::InvalidArgument("Invalid parameters"));
int num_iterations = 0;
if ((Eigen::numext::isinf(minval) && minval < T(0)) || maxval < mean) {
std::swap(minval, maxval);
stddev = -stddev;
}
const T normMin = (minval - mean) / stddev;
const T normMax = (maxval - mean) / stddev;
const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4));
const T cutoff =
T(2) *
Eigen::numext::exp(T(0.5) +
(normMin * (normMin - sqrtFactor)) / T(4)) /
(normMin + sqrtFactor);
const T diff = normMax - normMin;
if (((normMin < -kStdDevsInsideBoundsToUseRandnSampler) &&
(normMax >= T(0.))) ||
((normMax > kStdDevsInsideBoundsToUseRandnSampler) &&
(normMin <= T(0.)))) {
while (sample < limit_sample) {
const auto randn_sample = normal_dist(&gen_copy);
const int size = randn_sample.size();
for (int i = 0; i < size; i++) {
if ((randn_sample[i] >= normMin) &&
(randn_sample[i] <= normMax)) {
output(sample) = randn_sample[i] * stddev + mean;
sample++;
if (sample >= limit_sample) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
if (num_iterations > kMaxIterations) {
LOG(ERROR) << "TruncatedNormal randn rejection sampler "
<< "exceeded maximum iterations for "
<< "normMin=" << normMin << " normMax=" << normMax
<< " kMaxIterations=" << kMaxIterations;
ctx->SetStatus(errors::Internal(
"TruncatedNormal randn rejection sampler failed to accept"
" a sample."));
return;
}
}
}
}
} else if (diff < cutoff) {
const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin;
while (sample < limit_sample) {
const auto rand = dist(&gen_copy);
const int size = rand.size();
for (int i = 0; i < size; i++) {
z[i] = rand[i] * diff + normMin;
}
for (int i = 0; i < size; i++) {
g[i] = (plusFactor - z[i] * z[i]) / T(2.0);
}
const auto u = dist(&gen_copy);
for (int i = 0; i < size; i++) {
auto accept = u[i] <= Eigen::numext::exp(g[i]);
if (accept || num_iterations + 1 >= kMaxIterations) {
if (!accept) {
LOG(ERROR) << "TruncatedNormal uniform rejection sampler "
<< "exceeded max iterations. Sample may contain "
<< "outliers.";
ctx->SetStatus(errors::Internal(
"TruncatedNormal uniform rejection sampler failed to "
" accept a sample."));
return;
}
output(sample) = z[i] * stddev + mean;
sample++;
if (sample >= limit_sample) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
}
}
}
} else {
const T alpha =
(normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) /
T(2);
while (sample < limit_sample) {
auto rand = dist(&gen_copy);
const int size = rand.size();
int i = 0;
while (i < size) {
const T z = -Eigen::numext::log(rand[i]) / alpha + normMin;
i++;
const T x = normMin < alpha ? alpha - z : normMin - alpha;
const T g = Eigen::numext::exp(-x * x / T(2.0));
const T u = rand[i];
i++;
auto accept = (u <= g && z < normMax);
if (accept || num_iterations + 1 >= kMaxIterations) {
if (!accept) {
LOG(ERROR) << "TruncatedNormal exponential distribution "
<< "rejection sampler exceeds max iterations. "
<< "Sample may contain outliers.";
ctx->SetStatus(errors::Internal(
"TruncatedNormal exponential distribution rejection"
" sampler failed to accept a sample."));
return;
}
output(sample) = z * stddev + mean;
sample++;
if (sample >= limit_sample) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
}
}
}
}
}
};
const int64_t batchInitCost =
(Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>()) *
2
+ Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>() +
Eigen::internal::functor_traits<
Eigen::internal::scalar_sqrt_op<T>>::Cost
+ Eigen::TensorOpCost::MulCost<T>() * 4 +
Eigen::internal::functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost
+ Eigen::TensorOpCost::AddCost<T>();
const int64_t uniformSampleCost =
random::PhiloxRandom::kElementCost +
random::UniformDistribution<random::PhiloxRandom, T>::kElementCost;
const int64_t uniformRejectionSamplingCost =
uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() +
Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>() * 2 +
Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost +
Eigen::internal::functor_traits<
Eigen::internal::scalar_exp_op<T>>::Cost +
Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>();
const int64_t batchCost =
batchInitCost + uniformRejectionSamplingCost * 2 * samples_per_batch;
Shard(worker_threads.num_threads, worker_threads.workers, num_batches,
batchCost, do_work);
}
};
template <typename T>
struct TruncatedNormalFunctorV2<CPUDevice, T> {
void operator()(OpKernelContext* ctx, const CPUDevice& d, int64_t num_batches,
int64_t samples_per_batch, int64_t num_elements,
const BCastList<4>& bcast,
typename TTypes<T>::ConstFlat means,
typename TTypes<T>::ConstFlat stddevs,
typename TTypes<T>::ConstFlat minvals,
typename TTypes<T>::ConstFlat maxvals,
const random::PhiloxRandom& gen,
typename TTypes<T>::Flat output) {
const T kStdDevsInsideBoundsToUseRandnSampler = T(1.3);
auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads());
auto do_work = [num_batches, samples_per_batch, &ctx, &bcast, &means,
&stddevs, &minvals, &maxvals, &gen, &output,
kStdDevsInsideBoundsToUseRandnSampler](
int64_t start_output, int64_t limit_output) {
random::PhiloxRandom gen_copy = gen;
using Uniform = random::UniformDistribution<random::PhiloxRandom, T>;
Uniform dist;
using Normal = random::NormalDistribution<random::PhiloxRandom, T>;
Normal normal_dist;
gen_copy.Skip((start_output * 2 * kMaxIterations +
Uniform::kResultElementCount - 1) /
Uniform::kResultElementCount);
Eigen::array<T, Uniform::kResultElementCount> z;
Eigen::array<T, Uniform::kResultElementCount> g;
const bool should_bcast = bcast.IsBroadcastingRequired();
const auto& means_batch_indices = bcast.batch_indices(0);
const auto& stddevs_batch_indices = bcast.batch_indices(1);
const auto& minvals_batch_indices = bcast.batch_indices(2);
const auto& maxvals_batch_indices = bcast.batch_indices(3);
auto output_flat = output.data();
for (int64_t output_idx = start_output; output_idx < limit_output;
) {
int64_t batch_idx = output_idx / samples_per_batch;
T* const output_batch_offset = output_flat + batch_idx;
T mean, stddev, minval, maxval;
if (should_bcast) {
mean = means(means_batch_indices[batch_idx]);
stddev = stddevs(stddevs_batch_indices[batch_idx]);
minval = minvals(minvals_batch_indices[batch_idx]);
maxval = maxvals(maxvals_batch_indices[batch_idx]);
} else {
mean = means(batch_idx);
stddev = stddevs(batch_idx);
minval = minvals(batch_idx);
maxval = maxvals(batch_idx);
}
OP_REQUIRES(ctx,
stddev > T(0) && minval < maxval &&
(Eigen::numext::isfinite(minval) ||
Eigen::numext::isfinite(maxval)),
errors::InvalidArgument("Invalid parameters"));
int num_iterations = 0;
if ((Eigen::numext::isinf(minval) && minval < T(0)) || maxval < mean) {
std::swap(minval, maxval);
stddev = -stddev;
}
const T normMin = (minval - mean) / stddev;
const T normMax = (maxval - mean) / stddev;
const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4));
const T cutoff =
T(2) *
Eigen::numext::exp(T(0.5) +
(normMin * (normMin - sqrtFactor)) / T(4)) /
(normMin + sqrtFactor);
const T diff = normMax - normMin;
if (((normMin < -kStdDevsInsideBoundsToUseRandnSampler) &&
(normMax >= T(0.))) ||
((normMax > kStdDevsInsideBoundsToUseRandnSampler) &&
(normMin <= T(0.)))) {
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;) {
const auto randn_sample = normal_dist(&gen_copy);
const int size = randn_sample.size();
for (int i = 0; i < size; ++i) {
if ((randn_sample[i] >= normMin) &&
(randn_sample[i] <= normMax)) {
output_batch_offset[sample_idx * num_batches] =
randn_sample[i] * stddev + mean;
++sample_idx;
++output_idx;
if (sample_idx >= samples_per_batch ||
output_idx >= limit_output) {
break;
}
num_iterations = 0;
} else {
++num_iterations;
if (num_iterations > kMaxIterations) {
LOG(ERROR) << "TruncatedNormal randn rejection sampler "
<< "exceeded maximum iterations for "
<< "normMin=" << normMin << " normMax=" << normMax
<< " kMaxIterations=" << kMaxIterations;
ctx->SetStatus(errors::Internal(
"TruncatedNormal randn rejection sampler failed to accept"
" a sample."));
return;
}
}
}
}
} else if (diff < cutoff) {
const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin;
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;) {
const auto rand = dist(&gen_copy);
const int size = rand.size();
for (int i = 0; i < size; i++) {
z[i] = rand[i] * diff + normMin;
g[i] = (plusFactor - z[i] * z[i]) / T(2.0);
}
const auto u = dist(&gen_copy);
for (int i = 0; i < size; i++) {
auto accept = u[i] <= Eigen::numext::exp(g[i]);
if (accept || num_iterations + 1 >= kMaxIterations) {
if (!accept) {
LOG(ERROR) << "TruncatedNormal uniform rejection sampler "
<< "exceeded max iterations. Sample may contain "
<< "outliers.";
ctx->SetStatus(errors::Internal(
"TruncatedNormal uniform rejection sampler failed to "
" accept a sample."));
return;
}
output_batch_offset[sample_idx * num_batches] =
z[i] * stddev + mean;
++sample_idx;
++output_idx;
if (sample_idx >= samples_per_batch ||
output_idx >= limit_output) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
}
}
}
} else {
const T alpha =
(normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) /
T(2);
for (int64_t sample_idx = output_idx % samples_per_batch;
sample_idx < samples_per_batch && output_idx < limit_output;) {
auto rand = dist(&gen_copy);
const int size = rand.size();
int i = 0;
while (i < size) {
const T z = -Eigen::numext::log(rand[i]) / alpha + normMin;
i++;
const T x = normMin < alpha ? alpha - z : normMin - alpha;
const T g = Eigen::numext::exp(-x * x / T(2.0));
const T u = rand[i];
i++;
auto accept = (u <= g && z < normMax);
if (accept || num_iterations + 1 >= kMaxIterations) {
if (!accept) {
LOG(ERROR) << "TruncatedNormal exponential distribution "
<< "rejection sampler exceeds max iterations. "
<< "Sample may contain outliers.";
ctx->SetStatus(errors::Internal(
"TruncatedNormal exponential distribution rejection"
" sampler failed to accept a sample."));
return;
}
output_batch_offset[sample_idx * num_batches] =
z * stddev + mean;
++sample_idx;
++output_idx;
if (sample_idx >= samples_per_batch ||
output_idx >= limit_output) {
break;
}
num_iterations = 0;
} else {
num_iterations++;
}
}
}
}
}
};
const int64_t batchInitCost =
(Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>()) *
2
+ Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>() +
Eigen::internal::functor_traits<
Eigen::internal::scalar_sqrt_op<T>>::Cost
+ Eigen::TensorOpCost::MulCost<T>() * 4 +
Eigen::internal::functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost
+ Eigen::TensorOpCost::AddCost<T>();
const int64_t uniformSampleCost =
random::PhiloxRandom::kElementCost +
random::UniformDistribution<random::PhiloxRandom, T>::kElementCost;
const int64_t uniformRejectionSamplingCost =
uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() +
Eigen::TensorOpCost::AddCost<T>() +
Eigen::TensorOpCost::MulCost<T>() * 2 +
Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost +
Eigen::internal::functor_traits<
Eigen::internal::scalar_exp_op<T>>::Cost +
Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>();
const int64_t batchCost = batchInitCost + uniformRejectionSamplingCost * 2;
Shard(worker_threads.num_threads, worker_threads.workers, num_elements,
batchCost, do_work);
}
};
}
namespace {
template <typename Device, typename T>
class ParameterizedTruncatedNormalOp : public OpKernel {
static constexpr int32_t kDesiredBatchSize = 100;
public:
explicit ParameterizedTruncatedNormalOp(OpKernelConstruction* context)
: OpKernel(context) {
OP_REQUIRES_OK(context, generator_.Init(context));
}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape_tensor = ctx->input(0);
const Tensor& means_tensor = ctx->input(1);
const Tensor& stddevs_tensor = ctx->input(2);
const Tensor& minvals_tensor = ctx->input(3);
const Tensor& maxvals_tensor = ctx->input(4);
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_tensor.shape()),
errors::InvalidArgument("Input shape should be a vector, got shape: ",
shape_tensor.shape().DebugString()));
OP_REQUIRES(ctx, shape_tensor.NumElements() > 0,
errors::InvalidArgument("Shape tensor must not be empty, got ",
shape_tensor.DebugString()));
TensorShape tensor_shape;
OP_REQUIRES_OK(ctx, tensor::MakeShape(shape_tensor, &tensor_shape));
int32_t num_batches = tensor_shape.dim_size(0);
int32_t samples_per_batch = 1;
const int32_t num_dims = tensor_shape.dims();
for (int32_t i = 1; i < num_dims; i++) {
samples_per_batch *= tensor_shape.dim_size(i);
}
const int32_t num_elements = num_batches * samples_per_batch;
Tensor* samples_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, tensor_shape, &samples_tensor));
OP_REQUIRES(ctx, means_tensor.dims() <= 1,
errors::InvalidArgument(
"Input means should be a scalar or vector, got shape: ",
means_tensor.shape().DebugString()));
OP_REQUIRES(ctx, stddevs_tensor.dims() <= 1,
errors::InvalidArgument(
"Input stddevs should be a scalar or vector, got shape: ",
stddevs_tensor.shape().DebugString()));
OP_REQUIRES(ctx, minvals_tensor.dims() <= 1,
errors::InvalidArgument(
"Input minvals should be a scalar or vector, got shape: ",
minvals_tensor.shape().DebugString()));
OP_REQUIRES(ctx, maxvals_tensor.dims() <= 1,
errors::InvalidArgument(
"Input maxvals should be a scalar or vector, got shape: ",
maxvals_tensor.shape().DebugString()));
if ((means_tensor.dims() == 0 || means_tensor.dim_size(0) == 1) &&
(stddevs_tensor.dims() == 0 || stddevs_tensor.dim_size(0) == 1) &&
minvals_tensor.dims() == 0 && maxvals_tensor.dims() == 0) {
int32_t size = num_batches * samples_per_batch;
int32_t adjusted_samples = kDesiredBatchSize;
int32_t adjusted_batches = Eigen::divup(size, adjusted_samples);
num_batches = adjusted_batches;
samples_per_batch = adjusted_samples;
} else {
OP_REQUIRES(
ctx,
TensorShapeUtils::IsScalar(means_tensor.shape()) ||
means_tensor.dim_size(0) == 1 ||
means_tensor.dim_size(0) == num_batches,
errors::InvalidArgument(
"Input means should have length 1 or shape[0], got shape: ",
means_tensor.shape().DebugString()));
OP_REQUIRES(
ctx,
TensorShapeUtils::IsScalar(stddevs_tensor.shape()) ||
stddevs_tensor.dim_size(0) == 1 ||
stddevs_tensor.dim_size(0) == num_batches,
errors::InvalidArgument(
"Input stddevs should have length 1 or shape[0], got shape: ",
stddevs_tensor.shape().DebugString()));
OP_REQUIRES(
ctx,
TensorShapeUtils::IsScalar(minvals_tensor.shape()) ||
minvals_tensor.dim_size(0) == 1 ||
minvals_tensor.dim_size(0) == num_batches,
errors::InvalidArgument(
"Input minvals should have length 1 or shape[0], got shape: ",
minvals_tensor.shape().DebugString()));
OP_REQUIRES(
ctx,
TensorShapeUtils::IsScalar(maxvals_tensor.shape()) ||
maxvals_tensor.dim_size(0) == 1 ||
maxvals_tensor.dim_size(0) == num_batches,
errors::InvalidArgument(
"Input maxvals should have length 1 or shape[0], got shape: ",
maxvals_tensor.shape().DebugString()));
}
auto truncFunctor = functor::TruncatedNormalFunctor<Device, T>();
random::PhiloxRandom rng =
generator_.ReserveSamples128(num_batches * 2 * functor::kMaxIterations *
(samples_per_batch + 3) / 4);
truncFunctor(ctx, ctx->eigen_device<Device>(), num_batches,
samples_per_batch, num_elements, means_tensor.flat<T>(),
stddevs_tensor.flat<T>(), minvals_tensor.flat<T>(),
maxvals_tensor.flat<T>(), rng, samples_tensor->flat<T>());
}
private:
GuardedPhiloxRandom generator_;
ParameterizedTruncatedNormalOp(const ParameterizedTruncatedNormalOp&) =
delete;
void operator=(const ParameterizedTruncatedNormalOp&) = delete;
};
template <typename Device, typename T>
class StatelessParameterizedTruncatedNormal : public OpKernel {
static const int32_t kDesiredBatchSize = 100;
public:
explicit StatelessParameterizedTruncatedNormal(OpKernelConstruction* context)
: OpKernel(context) {}
void Compute(OpKernelContext* ctx) override {
const Tensor& shape_tensor = ctx->input(0);
const Tensor& seed_tensor = ctx->input(1);
const Tensor& means_tensor = ctx->input(2);
const Tensor& stddevs_tensor = ctx->input(3);
const Tensor& minvals_tensor = ctx->input(4);
const Tensor& maxvals_tensor = ctx->input(5);
OP_REQUIRES(ctx, seed_tensor.dims() == 1 && seed_tensor.dim_size(0) == 2,
errors::InvalidArgument("seed must have shape [2], not ",
seed_tensor.shape().DebugString()));
tensorflow::BCastList<4> bcast(
{means_tensor.shape().dim_sizes(), stddevs_tensor.shape().dim_sizes(),
minvals_tensor.shape().dim_sizes(),
maxvals_tensor.shape().dim_sizes()},
false,
true);
OP_REQUIRES(ctx, bcast.IsValid(),
errors::InvalidArgument(
"means, stddevs, minvals, maxvals must have compatible "
"batch dimensions: ",
means_tensor.shape().DebugString(), " vs. ",
stddevs_tensor.shape().DebugString(), " vs. ",
minvals_tensor.shape().DebugString(), " vs. ",
maxvals_tensor.shape().DebugString()));
TensorShape bcast_shape = BCast::ToShape(bcast.output_shape());
OP_REQUIRES(
ctx, TensorShapeUtils::IsVector(shape_tensor.shape()),
errors::InvalidArgument("Input shape should be a vector, got shape: ",
shape_tensor.shape().DebugString()));
TensorShape output_shape;
if (shape_tensor.dtype() == DataType::DT_INT32) {
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(shape_tensor.vec<int32>(),
&output_shape));
} else {
OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(
shape_tensor.vec<int64_t>(), &output_shape));
}
OP_REQUIRES(ctx, TensorShapeUtils::EndsWith(output_shape, bcast_shape),
errors::InvalidArgument(
"Shape passed in must end with broadcasted shape."));
int64_t samples_per_batch = 1;
const int64_t num_sample_dims =
(shape_tensor.dim_size(0) - bcast.output_shape().size());
for (int64_t i = 0; i < num_sample_dims; ++i) {
samples_per_batch *= output_shape.dim_size(i);
}
int64_t num_batches = 1;
for (int64_t i = num_sample_dims; i < shape_tensor.dim_size(0); ++i) {
num_batches *= output_shape.dim_size(i);
}
const int64_t num_elements = num_batches * samples_per_batch;
Tensor* samples_tensor;
OP_REQUIRES_OK(ctx, ctx->allocate_output(0, output_shape, &samples_tensor));
auto truncFunctor = functor::TruncatedNormalFunctorV2<Device, T>();
random::PhiloxRandom::Key key;
random::PhiloxRandom::ResultType counter;
OP_REQUIRES_OK(ctx, GenerateKey(seed_tensor, &key, &counter));
auto philox = random::PhiloxRandom(counter, key);
truncFunctor(ctx, ctx->eigen_device<Device>(), num_batches,
samples_per_batch, num_elements, bcast, means_tensor.flat<T>(),
stddevs_tensor.flat<T>(), minvals_tensor.flat<T>(),
maxvals_tensor.flat<T>(), philox, samples_tensor->flat<T>());
}
private:
StatelessParameterizedTruncatedNormal(
const StatelessParameterizedTruncatedNormal&) = delete;
void operator=(const StatelessParameterizedTruncatedNormal&) = delete;
};
}
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("ParameterizedTruncatedNormal") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("dtype"), \
ParameterizedTruncatedNormalOp<CPUDevice, TYPE>) \
REGISTER_KERNEL_BUILDER( \
Name("StatelessParameterizedTruncatedNormal") \
.HostMemory("shape") \
.HostMemory("seed") \
.HostMemory("means") \
.HostMemory("stddevs") \
.HostMemory("minvals") \
.HostMemory("maxvals") \
.Device(DEVICE_CPU) \
.TypeConstraint<TYPE>("dtype"), \
StatelessParameterizedTruncatedNormal<CPUDevice, TYPE>)
TF_CALL_half(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#define REGISTER(TYPE) \
REGISTER_KERNEL_BUILDER(Name("ParameterizedTruncatedNormal") \
.Device(DEVICE_GPU) \
.HostMemory("shape") \
.TypeConstraint<TYPE>("dtype"), \
ParameterizedTruncatedNormalOp<GPUDevice, TYPE>)
TF_CALL_half(REGISTER);
TF_CALL_float(REGISTER);
TF_CALL_double(REGISTER);
#undef REGISTER
#endif
} | #include <functional>
#include <memory>
#include <vector>
#include "tensorflow/core/common_runtime/kernel_benchmark_testlib.h"
#include "tensorflow/core/graph/node_builder.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/platform/test_benchmark.h"
namespace tensorflow {
static Graph* PTruncatedNormal(int num_batches, int samples_per_batch) {
Graph* g = new Graph(OpRegistry::Global());
Tensor shape_t(DT_INT32, TensorShape({2}));
shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
Tensor means_t(DT_FLOAT, TensorShape({num_batches}));
means_t.flat<float>().setConstant(0.0);
Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches}));
stdevs_t.flat<float>().setConstant(1.0);
Tensor minvals_t(DT_FLOAT, TensorShape({num_batches}));
minvals_t.flat<float>().setRandom();
Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches}));
maxvals_t.flat<float>().setConstant(5.0);
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal")
.Input(test::graph::Constant(g, shape_t))
.Input(test::graph::Constant(g, means_t))
.Input(test::graph::Constant(g, stdevs_t))
.Input(test::graph::Constant(g, minvals_t))
.Input(test::graph::Constant(g, maxvals_t))
.Attr("dtype", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
static Graph* PTruncatedNormal2SD(int num_batches, int samples_per_batch) {
Graph* g = new Graph(OpRegistry::Global());
Tensor shape_t(DT_INT32, TensorShape({2}));
shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
Tensor means_t(DT_FLOAT, TensorShape({num_batches}));
means_t.flat<float>().setConstant(0.0);
Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches}));
stdevs_t.flat<float>().setConstant(1.0);
Tensor minvals_t(DT_FLOAT, TensorShape({num_batches}));
minvals_t.flat<float>().setConstant(-2.0);
Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches}));
maxvals_t.flat<float>().setConstant(2.0);
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal")
.Input(test::graph::Constant(g, shape_t))
.Input(test::graph::Constant(g, means_t))
.Input(test::graph::Constant(g, stdevs_t))
.Input(test::graph::Constant(g, minvals_t))
.Input(test::graph::Constant(g, maxvals_t))
.Attr("dtype", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
static Graph* PTruncatedNormalOneTail(int num_batches, int samples_per_batch) {
Graph* g = new Graph(OpRegistry::Global());
Tensor shape_t(DT_INT32, TensorShape({2}));
shape_t.flat<int32>().setValues({num_batches, samples_per_batch});
Tensor means_t(DT_FLOAT, TensorShape({num_batches}));
means_t.flat<float>().setConstant(0.0);
Tensor stdevs_t(DT_FLOAT, TensorShape({num_batches}));
stdevs_t.flat<float>().setConstant(1.0);
Tensor minvals_t(DT_FLOAT, TensorShape({num_batches}));
minvals_t.flat<float>().setConstant(2.0);
Tensor maxvals_t(DT_FLOAT, TensorShape({num_batches}));
maxvals_t.flat<float>().setConstant(std::numeric_limits<float>::infinity());
Node* ret;
TF_CHECK_OK(
NodeBuilder(g->NewName("truncatednormal"), "ParameterizedTruncatedNormal")
.Input(test::graph::Constant(g, shape_t))
.Input(test::graph::Constant(g, means_t))
.Input(test::graph::Constant(g, stdevs_t))
.Input(test::graph::Constant(g, minvals_t))
.Input(test::graph::Constant(g, maxvals_t))
.Attr("dtype", DT_FLOAT)
.Finalize(g, &ret));
return g;
}
#define BM_PTruncatedNormalDev(DEVICE, B, S) \
static void BM_PTruncatedNormal_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, PTruncatedNormal(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_PTruncatedNormal_##DEVICE##_##B##_##S);
#define BM_PTruncatedNormalDev_2SD(DEVICE, B, S) \
static void BM_PTruncatedNormal_2SD_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, PTruncatedNormal2SD(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_PTruncatedNormal_2SD_##DEVICE##_##B##_##S);
#define BM_PTruncatedNormalDev_OneTail(DEVICE, B, S) \
static void BM_PTruncatedNormal_OneTail_##DEVICE##_##B##_##S( \
::testing::benchmark::State& state) { \
test::Benchmark(#DEVICE, PTruncatedNormalOneTail(B, S), \
false) \
.Run(state); \
state.SetItemsProcessed(static_cast<int64_t>(B) * S * state.iterations()); \
} \
BENCHMARK(BM_PTruncatedNormal_OneTail_##DEVICE##_##B##_##S);
BM_PTruncatedNormalDev(cpu, 1000, 1000);
BM_PTruncatedNormalDev_2SD(cpu, 10000, 100);
BM_PTruncatedNormalDev_OneTail(cpu, 10000, 100);
BM_PTruncatedNormalDev(gpu, 1000, 1000);
BM_PTruncatedNormalDev_2SD(gpu, 10000, 100);
BM_PTruncatedNormalDev_OneTail(gpu, 10000, 100);
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/parameterized_truncated_normal_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/parameterized_truncated_normal_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
124ce3af-8573-40e2-9097-e788f3cff240 | cpp | abseil/abseil-cpp | usage_config | absl/flags/usage_config.cc | absl/flags/usage_config_test.cc | #include "absl/flags/usage_config.h"
#include <functional>
#include <iostream>
#include <string>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/const_init.h"
#include "absl/base/thread_annotations.h"
#include "absl/flags/internal/path_util.h"
#include "absl/flags/internal/program_name.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
#include "absl/synchronization/mutex.h"
extern "C" {
ABSL_ATTRIBUTE_WEAK void ABSL_INTERNAL_C_SYMBOL(
AbslInternalReportFatalUsageError)(absl::string_view) {}
}
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace flags_internal {
namespace {
bool ContainsHelpshortFlags(absl::string_view filename) {
auto suffix = flags_internal::Basename(filename);
auto program_name = flags_internal::ShortProgramInvocationName();
absl::string_view program_name_ref = program_name;
#if defined(_WIN32)
absl::ConsumeSuffix(&program_name_ref, ".exe");
#endif
if (!absl::ConsumePrefix(&suffix, program_name_ref))
return false;
return absl::StartsWith(suffix, ".") || absl::StartsWith(suffix, "-main.") ||
absl::StartsWith(suffix, "_main.");
}
bool ContainsHelppackageFlags(absl::string_view filename) {
return ContainsHelpshortFlags(filename);
}
std::string VersionString() {
std::string version_str(flags_internal::ShortProgramInvocationName());
version_str += "\n";
#if !defined(NDEBUG)
version_str += "Debug build (NDEBUG not #defined)\n";
#endif
return version_str;
}
std::string NormalizeFilename(absl::string_view filename) {
auto pos = filename.find_first_not_of("\\/");
if (pos == absl::string_view::npos) return "";
filename.remove_prefix(pos);
return std::string(filename);
}
ABSL_CONST_INIT absl::Mutex custom_usage_config_guard(absl::kConstInit);
ABSL_CONST_INIT FlagsUsageConfig* custom_usage_config
ABSL_GUARDED_BY(custom_usage_config_guard) = nullptr;
}
FlagsUsageConfig GetUsageConfig() {
absl::MutexLock l(&custom_usage_config_guard);
if (custom_usage_config) return *custom_usage_config;
FlagsUsageConfig default_config;
default_config.contains_helpshort_flags = &ContainsHelpshortFlags;
default_config.contains_help_flags = &ContainsHelppackageFlags;
default_config.contains_helppackage_flags = &ContainsHelppackageFlags;
default_config.version_string = &VersionString;
default_config.normalize_filename = &NormalizeFilename;
return default_config;
}
void ReportUsageError(absl::string_view msg, bool is_fatal) {
std::cerr << "ERROR: " << msg << std::endl;
if (is_fatal) {
ABSL_INTERNAL_C_SYMBOL(AbslInternalReportFatalUsageError)(msg);
}
}
}
void SetFlagsUsageConfig(FlagsUsageConfig usage_config) {
absl::MutexLock l(&flags_internal::custom_usage_config_guard);
if (!usage_config.contains_helpshort_flags)
usage_config.contains_helpshort_flags =
flags_internal::ContainsHelpshortFlags;
if (!usage_config.contains_help_flags)
usage_config.contains_help_flags = flags_internal::ContainsHelppackageFlags;
if (!usage_config.contains_helppackage_flags)
usage_config.contains_helppackage_flags =
flags_internal::ContainsHelppackageFlags;
if (!usage_config.version_string)
usage_config.version_string = flags_internal::VersionString;
if (!usage_config.normalize_filename)
usage_config.normalize_filename = flags_internal::NormalizeFilename;
if (flags_internal::custom_usage_config)
*flags_internal::custom_usage_config = usage_config;
else
flags_internal::custom_usage_config = new FlagsUsageConfig(usage_config);
}
ABSL_NAMESPACE_END
} | #include "absl/flags/usage_config.h"
#include <string>
#include "gtest/gtest.h"
#include "absl/flags/internal/path_util.h"
#include "absl/flags/internal/program_name.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
namespace {
class FlagsUsageConfigTest : public testing::Test {
protected:
void SetUp() override {
absl::FlagsUsageConfig default_config;
absl::SetFlagsUsageConfig(default_config);
}
};
namespace flags = absl::flags_internal;
bool TstContainsHelpshortFlags(absl::string_view f) {
return absl::StartsWith(flags::Basename(f), "progname.");
}
bool TstContainsHelppackageFlags(absl::string_view f) {
return absl::EndsWith(flags::Package(f), "aaa/");
}
bool TstContainsHelpFlags(absl::string_view f) {
return absl::EndsWith(flags::Package(f), "zzz/");
}
std::string TstVersionString() { return "program 1.0.0"; }
std::string TstNormalizeFilename(absl::string_view filename) {
return std::string(filename.substr(2));
}
void TstReportUsageMessage(absl::string_view msg) {}
TEST_F(FlagsUsageConfigTest, TestGetSetFlagsUsageConfig) {
EXPECT_TRUE(flags::GetUsageConfig().contains_helpshort_flags);
EXPECT_TRUE(flags::GetUsageConfig().contains_help_flags);
EXPECT_TRUE(flags::GetUsageConfig().contains_helppackage_flags);
EXPECT_TRUE(flags::GetUsageConfig().version_string);
EXPECT_TRUE(flags::GetUsageConfig().normalize_filename);
absl::FlagsUsageConfig empty_config;
empty_config.contains_helpshort_flags = &TstContainsHelpshortFlags;
empty_config.contains_help_flags = &TstContainsHelpFlags;
empty_config.contains_helppackage_flags = &TstContainsHelppackageFlags;
empty_config.version_string = &TstVersionString;
empty_config.normalize_filename = &TstNormalizeFilename;
absl::SetFlagsUsageConfig(empty_config);
EXPECT_TRUE(flags::GetUsageConfig().contains_helpshort_flags);
EXPECT_TRUE(flags::GetUsageConfig().contains_help_flags);
EXPECT_TRUE(flags::GetUsageConfig().contains_helppackage_flags);
EXPECT_TRUE(flags::GetUsageConfig().version_string);
EXPECT_TRUE(flags::GetUsageConfig().normalize_filename);
}
TEST_F(FlagsUsageConfigTest, TestContainsHelpshortFlags) {
#if defined(_WIN32)
flags::SetProgramInvocationName("usage_config_test.exe");
#else
flags::SetProgramInvocationName("usage_config_test");
#endif
auto config = flags::GetUsageConfig();
EXPECT_TRUE(config.contains_helpshort_flags("adir/cd/usage_config_test.cc"));
EXPECT_TRUE(
config.contains_helpshort_flags("aaaa/usage_config_test-main.cc"));
EXPECT_TRUE(config.contains_helpshort_flags("abc/usage_config_test_main.cc"));
EXPECT_FALSE(config.contains_helpshort_flags("usage_config_main.cc"));
absl::FlagsUsageConfig empty_config;
empty_config.contains_helpshort_flags = &TstContainsHelpshortFlags;
absl::SetFlagsUsageConfig(empty_config);
EXPECT_TRUE(
flags::GetUsageConfig().contains_helpshort_flags("aaa/progname.cpp"));
EXPECT_FALSE(
flags::GetUsageConfig().contains_helpshort_flags("aaa/progmane.cpp"));
}
TEST_F(FlagsUsageConfigTest, TestContainsHelpFlags) {
flags::SetProgramInvocationName("usage_config_test");
auto config = flags::GetUsageConfig();
EXPECT_TRUE(config.contains_help_flags("zzz/usage_config_test.cc"));
EXPECT_TRUE(
config.contains_help_flags("bdir/a/zzz/usage_config_test-main.cc"));
EXPECT_TRUE(
config.contains_help_flags("
EXPECT_FALSE(config.contains_help_flags("zzz/aa/usage_config_main.cc"));
absl::FlagsUsageConfig empty_config;
empty_config.contains_help_flags = &TstContainsHelpFlags;
absl::SetFlagsUsageConfig(empty_config);
EXPECT_TRUE(flags::GetUsageConfig().contains_help_flags("zzz/main-body.c"));
EXPECT_FALSE(
flags::GetUsageConfig().contains_help_flags("zzz/dir/main-body.c"));
}
TEST_F(FlagsUsageConfigTest, TestContainsHelppackageFlags) {
flags::SetProgramInvocationName("usage_config_test");
auto config = flags::GetUsageConfig();
EXPECT_TRUE(config.contains_helppackage_flags("aaa/usage_config_test.cc"));
EXPECT_TRUE(
config.contains_helppackage_flags("bbdir/aaa/usage_config_test-main.cc"));
EXPECT_TRUE(config.contains_helppackage_flags(
"
EXPECT_FALSE(config.contains_helppackage_flags("aadir/usage_config_main.cc"));
absl::FlagsUsageConfig empty_config;
empty_config.contains_helppackage_flags = &TstContainsHelppackageFlags;
absl::SetFlagsUsageConfig(empty_config);
EXPECT_TRUE(
flags::GetUsageConfig().contains_helppackage_flags("aaa/main-body.c"));
EXPECT_FALSE(
flags::GetUsageConfig().contains_helppackage_flags("aadir/main-body.c"));
}
TEST_F(FlagsUsageConfigTest, TestVersionString) {
flags::SetProgramInvocationName("usage_config_test");
#ifdef NDEBUG
std::string expected_output = "usage_config_test\n";
#else
std::string expected_output =
"usage_config_test\nDebug build (NDEBUG not #defined)\n";
#endif
EXPECT_EQ(flags::GetUsageConfig().version_string(), expected_output);
absl::FlagsUsageConfig empty_config;
empty_config.version_string = &TstVersionString;
absl::SetFlagsUsageConfig(empty_config);
EXPECT_EQ(flags::GetUsageConfig().version_string(), "program 1.0.0");
}
TEST_F(FlagsUsageConfigTest, TestNormalizeFilename) {
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("a/a.cc"), "a/a.cc");
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("/a/a.cc"), "a/a.cc");
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("/"), "");
absl::FlagsUsageConfig empty_config;
empty_config.normalize_filename = &TstNormalizeFilename;
absl::SetFlagsUsageConfig(empty_config);
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("a/a.cc"), "a.cc");
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("aaa/a.cc"), "a/a.cc");
empty_config.normalize_filename = nullptr;
absl::SetFlagsUsageConfig(empty_config);
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("a/a.cc"), "a/a.cc");
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("/a/a.cc"), "a/a.cc");
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("\\a\\a.cc"), "a\\a.cc");
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("
EXPECT_EQ(flags::GetUsageConfig().normalize_filename("\\\\"), "");
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/usage_config.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/flags/usage_config_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
cfb1aa9b-3c0d-4dc7-a715-a854d74496a6 | cpp | google/tensorstore | chunk_layout | tensorstore/chunk_layout.cc | tensorstore/chunk_layout_test.cc | #include "tensorstore/chunk_layout.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <atomic>
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <memory>
#include <ostream>
#include <string_view>
#include <type_traits>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/index_space/dimension_permutation.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/json.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/integer_overflow.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/dimension_indexed.h"
#include "tensorstore/internal/json_binding/enum.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/json_bindable.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/maybe_hard_constraint.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/small_bit_set.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace {
using Usage = ChunkLayout::Usage;
using Storage = ChunkLayout::Storage;
using StoragePtr = ChunkLayout::StoragePtr;
constexpr auto kNumUsages = ChunkLayout::kNumUsages;
namespace jb = tensorstore::internal_json_binding;
enum class HardConstraintBit {
inner_order,
write_chunk_elements,
read_chunk_elements,
codec_chunk_elements,
};
struct OriginValueTraits {
using Element = Index;
constexpr static Index kDefaultValue = kImplicit;
constexpr static bool IsSoftConstraintValue(Index value) { return false; }
constexpr static bool IsValid(Index x) {
return x == kImplicit || IsFiniteIndex(x);
}
static Result<Index> TransformInputValue(Index value, Index offset,
Index stride) {
if (stride < 0) value = value - 1;
Index new_value;
if (internal::MulOverflow(stride, value, &new_value) ||
internal::AddOverflow(new_value, offset, &new_value) ||
!IsFiniteIndex(new_value)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow transforming input origin ", value, " by offset ",
offset, " and stride ", stride));
}
return new_value;
}
static Result<Index> TransformOutputValue(Index value, Index offset,
Index stride) {
Index new_value;
if (internal::SubOverflow(value, offset, &new_value) ||
!IsFiniteIndex(new_value)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow transforming output origin ", value, " by offset ",
offset, " and stride ", stride));
}
new_value = CeilOfRatio(((stride > 0) ? new_value : new_value - 1), stride);
return new_value;
}
};
struct ShapeValueTraits {
using Element = Index;
constexpr static Index kDefaultValue = ChunkLayout::kDefaultShapeValue;
constexpr static bool IsSoftConstraintValue(Index value) {
return value == -1;
}
constexpr static bool IsValid(Index x) { return x == -1 || x >= 0; }
static Result<Index> TransformInputValue(Index value, Index offset,
Index stride) {
Index new_value;
if (stride == std::numeric_limits<Index>::min() ||
internal::MulOverflow(std::abs(stride), value, &new_value)) {
return absl::OutOfRangeError(tensorstore::StrCat(
"Integer overflow computing abs(", stride, ") * ", value));
}
return new_value;
}
static Result<Index> TransformOutputValue(Index value, Index offset,
Index stride) {
assert(stride != 0);
const Index gcd = tensorstore::GreatestCommonDivisor(stride, value);
return value / gcd;
}
};
struct AspectRatioValueTraits {
using Element = double;
constexpr static double kDefaultValue = ChunkLayout::kDefaultAspectRatioValue;
constexpr static bool IsSoftConstraintValue(double value) { return false; }
constexpr static bool IsValid(double x) { return x >= 0; }
static Result<double> TransformInputValue(double value, Index offset,
Index stride) {
return value * std::abs(static_cast<double>(stride));
}
static Result<double> TransformOutputValue(double value, Index offset,
Index stride) {
return value / std::abs(static_cast<double>(stride));
}
};
struct ChunkLayoutData {
int8_t rank_;
SmallBitSet<8> hard_constraint_ = false;
DimensionSet grid_origin_hard_constraint_;
DimensionSet chunk_shape_hard_constraint_[kNumUsages];
DimensionSet chunk_aspect_ratio_hard_constraint_[kNumUsages];
Index chunk_elements_[kNumUsages] = {kImplicit, kImplicit, kImplicit};
};
bool IsHardConstraint(const ChunkLayoutData& impl, HardConstraintBit bit) {
return impl.hard_constraint_[static_cast<int>(bit)];
}
void SetHardConstraintBit(ChunkLayoutData& impl, HardConstraintBit bit) {
impl.hard_constraint_[static_cast<int>(bit)] = true;
}
}
struct ChunkLayout::Storage : public ChunkLayoutData {
explicit Storage(DimensionIndex rank)
: ChunkLayoutData{static_cast<int8_t>(rank)} {
Initialize();
}
Storage(const Storage& other) : ChunkLayoutData(other) {
std::memcpy(static_cast<void*>(this + 1),
static_cast<const void*>(&other + 1),
TotalBytesAfterHeader(
std::max(DimensionIndex(0), DimensionIndex(rank_))));
}
Storage(const Storage& other, DimensionIndex new_rank)
: ChunkLayoutData(other) {
rank_ = new_rank;
Initialize();
}
void Initialize() {
if (DimensionIndex rank = rank_; rank > 0) {
std::fill_n(this->grid_origin(), NumOriginElements(rank),
OriginValueTraits::kDefaultValue);
std::fill_n(this->chunk_shapes(), NumShapeElements(rank),
ShapeValueTraits::kDefaultValue);
std::fill_n(this->chunk_aspect_ratios(), NumAspectRatioElements(rank),
AspectRatioValueTraits::kDefaultValue);
std::fill_n(this->inner_order(), NumInnerOrderElements(rank),
static_cast<DimensionIndex>(-1));
}
}
constexpr static size_t NumOriginElements(DimensionIndex rank) {
return rank;
}
constexpr static size_t NumShapeElements(DimensionIndex rank) {
return kNumUsages * rank;
}
constexpr static size_t NumAspectRatioElements(DimensionIndex rank) {
return kNumUsages * rank;
}
constexpr static size_t NumInnerOrderElements(DimensionIndex rank) {
return rank;
}
constexpr static size_t TotalBytesAfterHeader(DimensionIndex rank) {
return sizeof(Index) * NumOriginElements(rank) +
sizeof(Index) * NumShapeElements(rank) +
sizeof(double) * NumAspectRatioElements(rank) +
sizeof(DimensionIndex) * NumInnerOrderElements(rank);
}
Index* grid_origin() { return reinterpret_cast<Index*>(this + 1); }
Index* chunk_shapes() { return grid_origin() + rank_; }
tensorstore::span<Index> chunk_shape(size_t usage_index) {
return {chunk_shapes() + rank_ * usage_index, rank_};
}
double* chunk_aspect_ratios() {
return reinterpret_cast<double*>(chunk_shapes() + NumShapeElements(rank_));
}
tensorstore::span<double> chunk_aspect_ratio(size_t usage_index) {
return {chunk_aspect_ratios() + rank_ * usage_index, rank_};
}
DimensionIndex* inner_order() {
return reinterpret_cast<DimensionIndex*>(chunk_aspect_ratios() +
NumAspectRatioElements(rank_));
}
static StoragePtr Allocate(DimensionIndex rank) {
rank = std::max(rank, DimensionIndex(0));
assert(rank < kMaxRank);
const size_t total_bytes =
sizeof(Storage) +
TotalBytesAfterHeader(rank);
StoragePtr ptr(static_cast<Storage*>(std::malloc(total_bytes)),
internal::adopt_object_ref);
return ptr;
}
static Storage& EnsureUnique(StoragePtr& ptr, DimensionIndex rank,
StoragePtr& storage_to_be_destroyed) {
if (!ptr) {
ptr = Allocate(rank);
new (ptr.get()) Storage(rank);
} else if (ptr->ref_count_.load(std::memory_order_acquire) != 1) {
auto new_ptr = Allocate(ptr->rank_);
new (new_ptr.get()) Storage(*ptr);
storage_to_be_destroyed = std::move(ptr);
ptr = std::move(new_ptr);
}
return *ptr;
}
std::atomic<size_t> ref_count_{1};
};
void intrusive_ptr_increment(Storage* p) {
p->ref_count_.fetch_add(1, std::memory_order_acq_rel);
}
void intrusive_ptr_decrement(Storage* p) {
if (p->ref_count_.fetch_sub(1, std::memory_order_acq_rel) == 1) {
std::free(p);
}
}
namespace {
void ClearHardConstraintBits(Storage& impl) {
impl.hard_constraint_ = false;
impl.grid_origin_hard_constraint_ = false;
for (int i = 0; i < kNumUsages; ++i) {
impl.chunk_shape_hard_constraint_[i] = false;
impl.chunk_aspect_ratio_hard_constraint_[i] = false;
}
}
bool HasAnyHardConstraints(const Storage& impl) {
if (IsHardConstraint(impl, HardConstraintBit::inner_order)) return true;
if (impl.grid_origin_hard_constraint_) return true;
for (int i = 0; i < kNumUsages; ++i) {
if (impl.chunk_shape_hard_constraint_[i]) return true;
}
return false;
}
absl::Status RankMismatchError(DimensionIndex new_rank,
DimensionIndex existing_rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank ", new_rank, " does not match existing rank ", existing_rank));
}
absl::Status EnsureRank(StoragePtr& ptr, DimensionIndex rank,
StoragePtr& storage_to_be_destroyed) {
TENSORSTORE_RETURN_IF_ERROR(tensorstore::ValidateRank(rank));
if (!ptr || ptr->rank_ == rank) {
Storage::EnsureUnique(ptr, rank, storage_to_be_destroyed);
return absl::OkStatus();
}
if (ptr->rank_ == dynamic_rank) {
auto new_ptr = Storage::Allocate(rank);
new (new_ptr.get()) Storage(*ptr, rank);
storage_to_be_destroyed = std::move(ptr);
ptr = std::move(new_ptr);
return absl::OkStatus();
}
return RankMismatchError(rank, ptr->rank_);
}
template <typename T, typename U>
absl::Status MismatchError(const T& existing_value, const U& new_value) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"New hard constraint (", new_value,
") does not match existing hard constraint (", existing_value, ")"));
}
template <typename Traits>
absl::Status MergeVectorInto(
MaybeHardConstraintSpan<typename Traits::Element> in_vector,
typename Traits::Element* out_vector, DimensionSet& out_hard_constraint) {
using Element = typename Traits::Element;
DimensionIndex rank = in_vector.size();
if (DimensionSet dims_to_check =
in_vector.hard_constraint & out_hard_constraint;
dims_to_check) {
for (DimensionIndex i = 0; i < rank; ++i) {
if (!dims_to_check[i]) continue;
Element x = in_vector[i];
if (x != Traits::kDefaultValue && out_vector[i] != x) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"New hard constraint (", x, ") for dimension ", i,
" does not match existing hard constraint (", out_vector[i], ")"));
}
}
}
for (DimensionIndex i = 0; i < rank; ++i) {
Element x = in_vector[i];
if (x == Traits::kDefaultValue) continue;
const bool in_hard_constraint_value = in_vector.hard_constraint[i];
if (in_hard_constraint_value || out_vector[i] == Traits::kDefaultValue) {
out_vector[i] = x;
out_hard_constraint[i] =
out_hard_constraint[i] || in_hard_constraint_value;
}
}
return absl::OkStatus();
}
template <typename Traits>
absl::Status ValidateAndMergeVectorInto(
MaybeHardConstraintSpan<typename Traits::Element> in_vector,
typename Traits::Element* out_vector, DimensionSet& out_hard_constraint) {
using Element = typename Traits::Element;
DimensionIndex rank = in_vector.size();
if (rank == 0) return absl::OkStatus();
for (DimensionIndex i = 0; i < in_vector.size(); ++i) {
const Element value = in_vector[i];
if (!Traits::IsValid(value)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid value for dimension ", i, ": ", in_vector));
}
if (Traits::IsSoftConstraintValue(value)) {
in_vector.hard_constraint[i] = false;
}
}
return MergeVectorInto<Traits>(in_vector, out_vector, out_hard_constraint);
}
ChunkLayout::ChunkShapeBase GetChunkShape(const ChunkLayout& self,
Usage usage) {
auto* storage = self.storage_.get();
if (!storage || storage->rank_ <= 0) {
return ChunkLayout::ChunkShapeBase();
}
const size_t usage_index = static_cast<size_t>(usage);
return ChunkLayout::ChunkShapeBase(
storage->chunk_shape(usage_index),
storage->chunk_shape_hard_constraint_[usage_index]);
}
ChunkLayout::ChunkAspectRatioBase GetChunkAspectRatio(const ChunkLayout& self,
Usage usage) {
auto* storage = self.storage_.get();
if (!storage || storage->rank_ <= 0) {
return ChunkLayout::ChunkAspectRatioBase();
}
const size_t usage_index = static_cast<size_t>(usage);
return ChunkLayout::ChunkAspectRatioBase(
storage->chunk_aspect_ratio(usage_index),
storage->chunk_aspect_ratio_hard_constraint_[usage_index]);
}
constexpr inline HardConstraintBit GetChunkElementsHardConstraintBit(
Usage usage) {
return static_cast<HardConstraintBit>(
static_cast<int>(HardConstraintBit::write_chunk_elements) +
static_cast<int>(usage));
}
ChunkLayout::ChunkElementsBase GetChunkElements(const ChunkLayout& self,
Usage usage) {
auto* storage = self.storage_.get();
if (!storage) return ChunkLayout::ChunkElementsBase();
const size_t usage_index = static_cast<size_t>(usage);
return ChunkLayout::ChunkElementsBase(
storage->chunk_elements_[usage_index],
IsHardConstraint(*storage, GetChunkElementsHardConstraintBit(usage)));
}
ChunkLayout::GridView GetGridConstraints(const ChunkLayout& self, Usage usage) {
return ChunkLayout::GridView(GetChunkShape(self, usage),
GetChunkAspectRatio(self, usage),
GetChunkElements(self, usage));
}
absl::Status SetInnerOrderInternal(ChunkLayout& self,
ChunkLayout::InnerOrder value,
StoragePtr& storage_to_be_destroyed) {
if (!IsValidPermutation(value)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid permutation: ", value));
}
const DimensionIndex rank = value.size();
TENSORSTORE_RETURN_IF_ERROR(
EnsureRank(self.storage_, rank, storage_to_be_destroyed));
auto& impl = *self.storage_;
DimensionIndex* inner_order = impl.inner_order();
if (inner_order[0] != -1) {
if (!value.hard_constraint) return absl::OkStatus();
if (IsHardConstraint(impl, HardConstraintBit::inner_order)) {
if (!std::equal(value.data(), value.data() + rank, inner_order)) {
return MismatchError(
tensorstore::span<const DimensionIndex>(inner_order, rank),
tensorstore::span<const DimensionIndex>(value));
}
return absl::OkStatus();
}
}
std::copy_n(value.begin(), rank, inner_order);
if (value.hard_constraint) {
SetHardConstraintBit(impl, HardConstraintBit::inner_order);
}
return absl::OkStatus();
}
absl::Status SetGridOriginInternal(ChunkLayout& self,
MaybeHardConstraintSpan<Index> value,
StoragePtr& storage_to_be_destroyed) {
const DimensionIndex rank = value.size();
TENSORSTORE_RETURN_IF_ERROR(
EnsureRank(self.storage_, rank, storage_to_be_destroyed));
return ValidateAndMergeVectorInto<OriginValueTraits>(
value, self.storage_->grid_origin(),
self.storage_->grid_origin_hard_constraint_);
}
absl::Status SetChunkShapeInternal(ChunkLayout& self,
MaybeHardConstraintSpan<Index> value,
Usage usage,
StoragePtr& storage_to_be_destroyed) {
const size_t usage_index = static_cast<size_t>(usage);
const DimensionIndex rank = value.size();
TENSORSTORE_RETURN_IF_ERROR(
EnsureRank(self.storage_, rank, storage_to_be_destroyed));
return ValidateAndMergeVectorInto<ShapeValueTraits>(
value, self.storage_->chunk_shape(usage_index).data(),
self.storage_->chunk_shape_hard_constraint_[usage_index]);
}
absl::Status SetChunkShape(ChunkLayout& self,
MaybeHardConstraintSpan<Index> value, Usage usage,
StoragePtr& storage_to_be_destroyed) {
TENSORSTORE_RETURN_IF_ERROR(
SetChunkShapeInternal(self, value, usage, storage_to_be_destroyed),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat("Error setting ", usage, "_chunk shape")));
return absl::OkStatus();
}
absl::Status SetChunkAspectRatioInternal(ChunkLayout& self,
MaybeHardConstraintSpan<double> value,
Usage usage,
StoragePtr& storage_to_be_destroyed) {
const size_t usage_index = static_cast<size_t>(usage);
const DimensionIndex rank = value.size();
TENSORSTORE_RETURN_IF_ERROR(
EnsureRank(self.storage_, rank, storage_to_be_destroyed));
return ValidateAndMergeVectorInto<AspectRatioValueTraits>(
value, self.storage_->chunk_aspect_ratio(usage_index).data(),
self.storage_->chunk_aspect_ratio_hard_constraint_[usage_index]);
}
absl::Status SetChunkAspectRatio(ChunkLayout& self,
MaybeHardConstraintSpan<double> value,
Usage usage,
StoragePtr& storage_to_be_destroyed) {
TENSORSTORE_RETURN_IF_ERROR(
SetChunkAspectRatioInternal(self, value, usage, storage_to_be_destroyed),
tensorstore::MaybeAnnotateStatus(
_,
tensorstore::StrCat("Error setting ", usage, "_chunk aspect_ratio")));
return absl::OkStatus();
}
template <typename HardConstraintRef>
absl::Status SetChunkElementsInternal(Index& elements,
HardConstraintRef is_hard_constraint,
ChunkLayout::ChunkElementsBase value) {
if (value.valid()) {
if (value < 0) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid value: ", value.value));
}
if (elements != kImplicit) {
if (!value.hard_constraint) return absl::OkStatus();
if (is_hard_constraint && elements != value.value) {
return MismatchError(elements, value.value);
}
}
elements = value.value;
if (value.hard_constraint) {
is_hard_constraint = true;
}
}
return absl::OkStatus();
}
absl::Status SetChunkElementsInternal(ChunkLayout& self,
ChunkLayout::ChunkElementsBase value,
Usage usage,
StoragePtr& storage_to_be_destroyed) {
if (!value.valid()) return absl::OkStatus();
auto& impl = Storage::EnsureUnique(self.storage_, dynamic_rank,
storage_to_be_destroyed);
return SetChunkElementsInternal(
impl.chunk_elements_[static_cast<size_t>(usage)],
impl.hard_constraint_[static_cast<size_t>(
GetChunkElementsHardConstraintBit(usage))],
value);
}
absl::Status SetChunkElements(ChunkLayout& self,
ChunkLayout::ChunkElementsBase value, Usage usage,
StoragePtr& storage_to_be_destroyed) {
TENSORSTORE_RETURN_IF_ERROR(
SetChunkElementsInternal(self, value, usage, storage_to_be_destroyed),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat("Error setting ", usage, "_chunk elements")));
return absl::OkStatus();
}
absl::Status SetGridConstraints(ChunkLayout& self,
const ChunkLayout::GridView& value, Usage usage,
StoragePtr& storage_to_be_destroyed) {
if (value.shape().valid()) {
TENSORSTORE_RETURN_IF_ERROR(
SetChunkShape(self, value.shape(), usage, storage_to_be_destroyed));
}
if (value.aspect_ratio().valid()) {
TENSORSTORE_RETURN_IF_ERROR(SetChunkAspectRatio(
self, value.aspect_ratio(), usage, storage_to_be_destroyed));
}
if (value.elements().valid()) {
TENSORSTORE_RETURN_IF_ERROR(SetChunkElements(self, value.elements(), usage,
storage_to_be_destroyed));
}
return absl::OkStatus();
}
absl::Status SetChunkLayout(ChunkLayout& self, ChunkLayout other,
bool hard_constraint) {
if (!other.storage_) return absl::OkStatus();
if (!self.storage_) {
self.storage_ = std::move(other.storage_);
if (!hard_constraint) {
StoragePtr storage_to_be_destroyed;
ClearHardConstraintBits(Storage::EnsureUnique(
self.storage_, self.storage_->rank_, storage_to_be_destroyed));
}
return absl::OkStatus();
}
{
auto inner_order = other.inner_order();
if (!hard_constraint) inner_order.hard_constraint = false;
TENSORSTORE_RETURN_IF_ERROR(self.Set(inner_order));
}
{
auto grid_origin = other.grid_origin();
if (!hard_constraint) grid_origin.hard_constraint = false;
TENSORSTORE_RETURN_IF_ERROR(self.Set(grid_origin));
}
StoragePtr storage_to_be_destroyed;
for (Usage usage : ChunkLayout::kUsages) {
TENSORSTORE_RETURN_IF_ERROR(SetGridConstraints(
self,
ChunkLayout::GridView(GetGridConstraints(other, usage),
hard_constraint),
usage, storage_to_be_destroyed));
}
return absl::OkStatus();
}
}
DimensionIndex ChunkLayout::rank() const {
if (storage_) return storage_->rank_;
return dynamic_rank;
}
bool ChunkLayout::HasHardConstraints() const {
if (!storage_) return false;
return HasAnyHardConstraints(*storage_);
}
absl::Status ChunkLayout::Set(RankConstraint value) {
if (value.rank == dynamic_rank) return absl::OkStatus();
StoragePtr storage_to_be_destroyed;
return EnsureRank(storage_, value.rank, storage_to_be_destroyed);
}
ChunkLayout::InnerOrder ChunkLayout::inner_order() const {
if (storage_) {
const DimensionIndex rank = storage_->rank_;
if (rank > 0) {
const DimensionIndex* inner_order = storage_->inner_order();
if (inner_order[0] != -1) {
return InnerOrder(
tensorstore::span(inner_order, rank),
IsHardConstraint(*storage_, HardConstraintBit::inner_order));
}
}
}
return InnerOrder();
}
absl::Status ChunkLayout::Set(InnerOrder value) {
if (!value.valid()) return absl::OkStatus();
StoragePtr storage_to_be_destroyed;
TENSORSTORE_RETURN_IF_ERROR(
SetInnerOrderInternal(*this, value, storage_to_be_destroyed),
tensorstore::MaybeAnnotateStatus(_, "Error setting inner_order"));
return absl::OkStatus();
}
ChunkLayout::GridOrigin ChunkLayout::grid_origin() const {
if (storage_) {
const DimensionIndex rank = storage_->rank_;
if (rank > 0) {
return GridOrigin(tensorstore::span<const Index>(storage_->grid_origin(),
storage_->rank_),
storage_->grid_origin_hard_constraint_);
}
}
return GridOrigin();
}
absl::Status ChunkLayout::Set(GridOrigin value) {
if (!value.valid()) return absl::OkStatus();
StoragePtr storage_to_be_destroyed;
TENSORSTORE_RETURN_IF_ERROR(
SetGridOriginInternal(*this, value, storage_to_be_destroyed),
tensorstore::MaybeAnnotateStatus(_, "Error setting grid_origin"));
return absl::OkStatus();
}
#define TENSORSTORE_INTERNAL_DO_DEFINE_FOR_USAGE(NAME, USAGE) \
template <> \
absl::Status ChunkLayout::Set<USAGE>(const GridViewFor<USAGE>& value) { \
StoragePtr storage_to_be_destroyed; \
return SetGridConstraints(*this, value, USAGE, storage_to_be_destroyed); \
} \
ChunkLayout::GridViewFor<USAGE> ChunkLayout::NAME##_chunk() const { \
return ChunkLayout::GridViewFor<USAGE>(GetGridConstraints(*this, USAGE)); \
} \
ChunkLayout::ChunkShapeFor<USAGE> ChunkLayout::NAME##_chunk_shape() const { \
return ChunkLayout::ChunkShapeFor<USAGE>(GetChunkShape(*this, USAGE)); \
} \
ChunkLayout::ChunkAspectRatioFor<USAGE> \
ChunkLayout::NAME##_chunk_aspect_ratio() const { \
return ChunkLayout::ChunkAspectRatioFor<USAGE>( \
GetChunkAspectRatio(*this, USAGE)); \
} \
ChunkLayout::ChunkElementsFor<USAGE> ChunkLayout::NAME##_chunk_elements() \
const { \
return ChunkLayout::ChunkElementsFor<USAGE>( \
GetChunkElements(*this, USAGE)); \
} \
TENSORSTORE_INTERNAL_DO_DEFINE_FOR_USAGE(write, Usage::kWrite)
TENSORSTORE_INTERNAL_DO_DEFINE_FOR_USAGE(read, Usage::kRead)
TENSORSTORE_INTERNAL_DO_DEFINE_FOR_USAGE(codec, Usage::kCodec)
#undef TENSORSTORE_INTERNAL_DO_DEFINE_FOR_USAGE
template <>
absl::Status ChunkLayout::Set<ChunkLayout::kUnspecifiedUsage>(
const GridViewFor<ChunkLayout::kUnspecifiedUsage>& value) {
StoragePtr storage_to_be_destroyed;
if (value.usage() == kUnspecifiedUsage) {
TENSORSTORE_RETURN_IF_ERROR(SetGridConstraints(*this, value, Usage::kWrite,
storage_to_be_destroyed));
TENSORSTORE_RETURN_IF_ERROR(SetGridConstraints(*this, value, Usage::kRead,
storage_to_be_destroyed));
TENSORSTORE_RETURN_IF_ERROR(
SetGridConstraints(*this, CodecChunk(value.aspect_ratio()),
Usage::kCodec, storage_to_be_destroyed));
return absl::OkStatus();
}
return SetGridConstraints(*this, value, value.usage(),
storage_to_be_destroyed);
}
ChunkLayout::GridView ChunkLayout::operator[](Usage usage) const {
assert(usage != kUnspecifiedUsage);
return GetGridConstraints(*this, usage);
}
ChunkLayout::ChunkLayout(ChunkLayout layout, bool hard_constraint) {
storage_ = std::move(layout.storage_);
if (!hard_constraint && storage_) {
StoragePtr storage_to_be_destroyed;
ClearHardConstraintBits(Storage::EnsureUnique(storage_, storage_->rank_,
storage_to_be_destroyed));
}
}
absl::Status ChunkLayout::Set(ChunkLayout value) {
return SetChunkLayout(*this, value, true);
}
namespace {
template <typename BaseBinder>
constexpr auto HardSoftMemberPairJsonBinder(const char* name,
const char* soft_constraint_name,
BaseBinder base_binder) {
return jb::Sequence(
jb::Member(name, base_binder(true)),
jb::Member(soft_constraint_name, base_binder(false)));
}
template <typename ElementBinder>
constexpr auto DimensionIndexedFixedArrayJsonBinder(
DimensionIndex& rank, ElementBinder element_binder) {
return jb::DimensionIndexedVector(
&rank,
[](auto& x) -> size_t { ABSL_UNREACHABLE(); },
[](auto& x, size_t n) { return absl::OkStatus(); },
[](auto& x, size_t i) -> decltype(auto) { return (&x)[i]; },
element_binder);
}
template <typename Traits>
bool VectorIsDefault(tensorstore::span<const typename Traits::Element> vec) {
return std::all_of(vec.begin(), vec.end(), [](typename Traits::Element x) {
return x == Traits::kDefaultValue;
});
}
bool GridConstraintsUnset(const ChunkLayout& self, Usage usage) {
return VectorIsDefault<ShapeValueTraits>(GetChunkShape(self, usage)) &&
VectorIsDefault<AspectRatioValueTraits>(
GetChunkAspectRatio(self, usage)) &&
!GetChunkElements(self, usage).valid();
}
bool AllRankDependentConstraintsUnset(Storage& storage) {
const DimensionIndex rank = storage.rank_;
if (rank <= 0) return true;
if (storage.inner_order()[0] != -1) return false;
if (auto* origin = storage.grid_origin();
std::any_of(origin, origin + rank, [](auto x) {
return x != OriginValueTraits::kDefaultValue;
})) {
return false;
}
if (auto* shapes = storage.chunk_shapes();
std::any_of(shapes, shapes + Storage::NumShapeElements(rank), [](auto x) {
return x != ShapeValueTraits::kDefaultValue;
})) {
return false;
}
if (auto* aspect_ratios = storage.chunk_aspect_ratios(); std::any_of(
aspect_ratios, aspect_ratios + Storage::NumAspectRatioElements(rank),
[](auto x) { return x != AspectRatioValueTraits::kDefaultValue; })) {
return false;
}
return true;
}
bool AllConstraintsUnset(const ChunkLayout& self) {
if (!self.storage_) return true;
auto& storage = *self.storage_;
if (storage.rank_ != dynamic_rank) return false;
if (std::any_of(storage.chunk_elements_, storage.chunk_elements_ + kNumUsages,
[](Index x) { return x != kImplicit; })) {
return false;
}
const DimensionIndex rank = storage.rank_;
if (rank <= 0) return true;
return AllRankDependentConstraintsUnset(storage);
}
template <typename Wrapper, typename Traits, typename Getter, typename Setter>
constexpr auto VectorJsonBinder(Getter getter, Setter setter) {
using ElementType = typename Wrapper::value_type;
return [=](bool hard_constraint) {
return [=](auto is_loading, const auto& options, auto* obj, auto* j) {
constexpr auto element_binder = jb::MapValue(
jb::DefaultBinder<>, std::pair(Traits::kDefaultValue, nullptr));
if constexpr (is_loading) {
if (j->is_discarded()) return absl::OkStatus();
ElementType value[kMaxRank];
DimensionIndex rank = dynamic_rank;
TENSORSTORE_RETURN_IF_ERROR(DimensionIndexedFixedArrayJsonBinder(
rank, element_binder)(is_loading, options, &value[0], j));
return setter(*obj,
Wrapper(tensorstore::span<const ElementType>(value, rank),
hard_constraint));
} else {
auto vec = getter(*obj);
if (!vec.valid()) {
return absl::OkStatus();
}
ElementType new_vec[kMaxRank];
bool has_value = false;
for (DimensionIndex i = 0; i < vec.size(); ++i) {
if (vec.hard_constraint[i] == hard_constraint &&
vec[i] != Traits::kDefaultValue) {
new_vec[i] = vec[i];
has_value = true;
} else {
new_vec[i] = Traits::kDefaultValue;
}
}
if (!has_value) return absl::OkStatus();
tensorstore::span<const ElementType> new_span(new_vec, vec.size());
return jb::Array(element_binder)(is_loading, options, &new_span, j);
}
};
};
}
constexpr auto InnerOrderJsonBinder(bool hard_constraint) {
return [=](auto is_loading, const auto& options, auto* obj, auto* j) {
if constexpr (is_loading) {
if (j->is_discarded() || j->is_null()) {
return absl::OkStatus();
}
DimensionIndex value[kMaxRank];
DimensionIndex rank = dynamic_rank;
TENSORSTORE_RETURN_IF_ERROR(DimensionIndexedFixedArrayJsonBinder(
rank, jb::Integer<DimensionIndex>(0, kMaxRank - 1))(
is_loading, options, &value[0], j));
StoragePtr storage_to_be_destroyed;
return SetInnerOrderInternal(
*obj,
ChunkLayout::InnerOrder(
tensorstore::span<const DimensionIndex>(value, rank),
hard_constraint),
storage_to_be_destroyed);
} else {
auto vec = obj->inner_order();
if (vec.valid() && vec.hard_constraint == hard_constraint) {
*j = static_cast<::nlohmann::json>(vec);
}
return absl::OkStatus();
}
};
}
constexpr auto StandaloneGridJsonBinder() {
return jb::Object(
HardSoftMemberPairJsonBinder(
"shape", "shape_soft_constraint",
VectorJsonBinder<ChunkLayout::ChunkShapeBase, ShapeValueTraits>(
[](auto& self) { return self.shape(); },
[](auto& self, ChunkLayout::ChunkShapeBase value) {
return self.Set(value);
})),
HardSoftMemberPairJsonBinder(
"aspect_ratio", "aspect_ratio_soft_constraint",
VectorJsonBinder<ChunkLayout::ChunkAspectRatioBase,
AspectRatioValueTraits>(
[](auto& self) { return self.aspect_ratio(); },
[](auto& self, ChunkLayout::ChunkAspectRatioBase value) {
return self.Set(value);
})),
HardSoftMemberPairJsonBinder(
"elements", "elements_soft_constraint", [](bool hard_constraint) {
return jb::GetterSetter(
[=](auto& self) -> Index {
auto value = self.elements();
if (value.hard_constraint != hard_constraint)
return kImplicit;
return value.value;
},
[=](auto& self, Index value) {
return self.Set(
ChunkLayout::ChunkElementsBase(value, hard_constraint));
},
jb::DefaultPredicate<jb::kNeverIncludeDefaults>(
[](auto* obj) { *obj = kImplicit; },
[](auto*
obj) { return *obj == kImplicit; }));
}));
}
constexpr auto GridConstraintsJsonBinder(Usage usage) {
return jb::Object(
HardSoftMemberPairJsonBinder(
"shape", "shape_soft_constraint",
VectorJsonBinder<ChunkLayout::ChunkShapeBase, ShapeValueTraits>(
[=](auto& self) { return GetChunkShape(self, usage); },
[=](auto& self, ChunkLayout::ChunkShapeBase value) {
StoragePtr storage_to_be_destroyed;
if (usage != ChunkLayout::kUnspecifiedUsage) {
return SetChunkShapeInternal(self, value, usage,
storage_to_be_destroyed);
}
TENSORSTORE_RETURN_IF_ERROR(SetChunkShapeInternal(
self, value, Usage::kWrite, storage_to_be_destroyed));
TENSORSTORE_RETURN_IF_ERROR(SetChunkShapeInternal(
self, value, Usage::kRead, storage_to_be_destroyed));
return absl::OkStatus();
})),
HardSoftMemberPairJsonBinder(
"aspect_ratio", "aspect_ratio_soft_constraint",
VectorJsonBinder<ChunkLayout::ChunkAspectRatioBase,
AspectRatioValueTraits>(
[=](auto& self) { return GetChunkAspectRatio(self, usage); },
[=](auto& self, ChunkLayout::ChunkAspectRatioBase value) {
StoragePtr storage_to_be_destroyed;
if (usage != ChunkLayout::kUnspecifiedUsage) {
return SetChunkAspectRatioInternal(self, value, usage,
storage_to_be_destroyed);
}
TENSORSTORE_RETURN_IF_ERROR(SetChunkAspectRatioInternal(
self, value, Usage::kWrite, storage_to_be_destroyed));
TENSORSTORE_RETURN_IF_ERROR(SetChunkAspectRatioInternal(
self, value, Usage::kRead, storage_to_be_destroyed));
TENSORSTORE_RETURN_IF_ERROR(SetChunkAspectRatioInternal(
self, value, Usage::kCodec, storage_to_be_destroyed));
return absl::OkStatus();
})),
HardSoftMemberPairJsonBinder(
"elements", "elements_soft_constraint", [=](bool hard_constraint) {
return jb::GetterSetter(
[=](auto& self) -> Index {
auto value = GetChunkElements(self, usage);
if (value.hard_constraint != hard_constraint)
return kImplicit;
return value.value;
},
[=](auto& self, Index value) {
ChunkLayout::ChunkElementsBase elements(value,
hard_constraint);
StoragePtr storage_to_be_destroyed;
if (usage != ChunkLayout::kUnspecifiedUsage) {
return SetChunkElementsInternal(self, elements, usage,
storage_to_be_destroyed);
}
TENSORSTORE_RETURN_IF_ERROR(SetChunkElementsInternal(
self, elements, Usage::kWrite, storage_to_be_destroyed));
TENSORSTORE_RETURN_IF_ERROR(SetChunkElementsInternal(
self, elements, Usage::kRead, storage_to_be_destroyed));
return absl::OkStatus();
},
jb::DefaultPredicate<jb::kNeverIncludeDefaults>(
[](auto* obj) { *obj = kImplicit; },
[](auto*
obj) { return *obj == kImplicit; }));
}));
}
constexpr auto DefaultableGridConstraintsJsonBinder(Usage usage) {
return jb::DefaultPredicate<jb::kNeverIncludeDefaults>(
[](auto* obj) {},
[=](auto* obj) {
if (!obj->storage_) return true;
return GridConstraintsUnset(*obj, usage);
},
GridConstraintsJsonBinder(usage));
}
}
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
ChunkLayout,
jb::Object(jb::Member("rank",
jb::Compose<DimensionIndex>(
[](auto is_loading, const auto& options,
auto* obj, auto* rank) {
if constexpr (is_loading) {
return obj->Set(RankConstraint{*rank});
} else {
const DimensionIndex rank_value = obj->rank();
*rank = (rank_value == dynamic_rank ||
!AllRankDependentConstraintsUnset(
*obj->storage_))
? dynamic_rank
: rank_value;
return absl::OkStatus();
}
},
jb::ConstrainedRankJsonBinder)),
HardSoftMemberPairJsonBinder("inner_order",
"inner_order_soft_constraint",
InnerOrderJsonBinder),
HardSoftMemberPairJsonBinder(
"grid_origin", "grid_origin_soft_constraint",
VectorJsonBinder<ChunkLayout::GridOrigin, OriginValueTraits>(
[](auto& self) { return self.grid_origin(); },
[](auto& self, ChunkLayout::GridOrigin value) {
StoragePtr storage_to_be_destroyed;
return SetGridOriginInternal(self, value,
storage_to_be_destroyed);
})),
jb::LoadSave(jb::Member("chunk",
DefaultableGridConstraintsJsonBinder(
ChunkLayout::kUnspecifiedUsage))),
jb::Member("write_chunk",
DefaultableGridConstraintsJsonBinder(Usage::kWrite)),
jb::Member("read_chunk",
DefaultableGridConstraintsJsonBinder(Usage::kRead)),
jb::Member("codec_chunk",
DefaultableGridConstraintsJsonBinder(Usage::kCodec))))
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(ChunkLayout::Grid,
StandaloneGridJsonBinder())
namespace {
template <typename Traits>
static absl::Status TransformInputVector(
IndexTransformView<> transform,
tensorstore::span<const typename Traits::Element> in_vec,
DimensionSet in_hard_constraint,
tensorstore::span<typename Traits::Element> out_vec,
DimensionSet& out_hard_constraint) {
using Element = typename Traits::Element;
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
assert(input_rank == in_vec.size());
assert(output_rank == out_vec.size());
Element in_vec_copy[kMaxRank];
std::copy_n(in_vec.begin(), input_rank, in_vec_copy);
std::fill_n(out_vec.begin(), output_rank, Traits::kDefaultValue);
out_hard_constraint = false;
DimensionSet remaining_in_hard_constraint = in_hard_constraint;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension ||
map.stride() == 0) {
continue;
}
const DimensionIndex input_dim = map.input_dimension();
Element value = in_vec_copy[input_dim];
remaining_in_hard_constraint[input_dim] = false;
if (value == Traits::kDefaultValue) continue;
TENSORSTORE_ASSIGN_OR_RETURN(
value, Traits::TransformInputValue(value, map.offset(), map.stride()),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Error transforming input dimension ",
input_dim, " -> output dimension ",
output_dim)));
out_vec[output_dim] = value;
if (in_hard_constraint[input_dim] && value != Traits::kDefaultValue) {
out_hard_constraint[output_dim] = true;
}
}
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
if (in_vec[input_dim] == Traits::kDefaultValue) continue;
if (!remaining_in_hard_constraint[input_dim]) continue;
return absl::InvalidArgumentError(tensorstore::StrCat(
"No output dimension corresponds to input dimension ", input_dim));
}
return absl::OkStatus();
}
template <typename Traits>
static absl::Status TransformOutputVector(
IndexTransformView<> transform, DimensionSet one_to_one_input_dims,
tensorstore::span<const typename Traits::Element> out_vec,
DimensionSet out_hard_constraint,
tensorstore::span<typename Traits::Element> in_vec,
DimensionSet& in_hard_constraint) {
using Element = typename Traits::Element;
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_rank = transform.output_rank();
assert(output_rank == out_vec.size());
assert(input_rank == in_vec.size());
Element out_vec_copy[kMaxRank];
std::copy_n(out_vec.begin(), output_rank, out_vec_copy);
std::fill_n(in_vec.begin(), input_rank, Traits::kDefaultValue);
in_hard_constraint = false;
for (DimensionIndex output_dim = 0; output_dim < output_rank; ++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() != OutputIndexMethod::single_input_dimension ||
map.stride() == 0) {
continue;
}
const DimensionIndex input_dim = map.input_dimension();
if (!one_to_one_input_dims[input_dim]) continue;
Element value = out_vec_copy[output_dim];
if (value == Traits::kDefaultValue) continue;
TENSORSTORE_ASSIGN_OR_RETURN(
value, Traits::TransformOutputValue(value, map.offset(), map.stride()),
MaybeAnnotateStatus(
_, tensorstore::StrCat("Error transforming output dimension ",
output_dim, " -> input dimension ",
input_dim)));
in_vec[input_dim] = value;
if (value != Traits::kDefaultValue) {
in_hard_constraint[input_dim] = out_hard_constraint[output_dim];
}
}
return absl::OkStatus();
}
absl::Status TransformOutputGridConstraints(Storage& output_storage,
Storage& input_storage,
DimensionSet one_to_one_input_dims,
IndexTransformView<> transform,
size_t usage_index) {
input_storage.chunk_elements_[usage_index] =
output_storage.chunk_elements_[usage_index];
TENSORSTORE_RETURN_IF_ERROR(
TransformOutputVector<ShapeValueTraits>(
transform, one_to_one_input_dims,
output_storage.chunk_shape(usage_index),
output_storage.chunk_shape_hard_constraint_[usage_index],
input_storage.chunk_shape(usage_index),
input_storage.chunk_shape_hard_constraint_[usage_index]),
tensorstore::MaybeAnnotateStatus(_, "Error transforming shape"));
TENSORSTORE_RETURN_IF_ERROR(
TransformOutputVector<AspectRatioValueTraits>(
transform, one_to_one_input_dims,
output_storage.chunk_aspect_ratio(usage_index),
output_storage.chunk_aspect_ratio_hard_constraint_[usage_index],
input_storage.chunk_aspect_ratio(usage_index),
input_storage.chunk_aspect_ratio_hard_constraint_[usage_index]),
tensorstore::MaybeAnnotateStatus(_, "Error transforming aspect_ratio"));
return absl::OkStatus();
}
absl::Status TransformInputGridConstraints(Storage& input_storage,
Storage& output_storage,
IndexTransformView<> transform,
size_t usage_index) {
output_storage.chunk_elements_[usage_index] =
input_storage.chunk_elements_[usage_index];
TENSORSTORE_RETURN_IF_ERROR(
TransformInputVector<ShapeValueTraits>(
transform, input_storage.chunk_shape(usage_index),
input_storage.chunk_shape_hard_constraint_[usage_index],
output_storage.chunk_shape(usage_index),
output_storage.chunk_shape_hard_constraint_[usage_index]),
tensorstore::MaybeAnnotateStatus(_, "Error transforming shape"));
TENSORSTORE_RETURN_IF_ERROR(
TransformInputVector<AspectRatioValueTraits>(
transform, input_storage.chunk_aspect_ratio(usage_index),
input_storage.chunk_aspect_ratio_hard_constraint_[usage_index],
output_storage.chunk_aspect_ratio(usage_index),
output_storage.chunk_aspect_ratio_hard_constraint_[usage_index]),
tensorstore::MaybeAnnotateStatus(_, "Error transforming aspect_ratio"));
return absl::OkStatus();
}
}
Result<ChunkLayout> ApplyIndexTransform(IndexTransformView<> transform,
ChunkLayout output_constraints) {
if (!transform.valid() || !output_constraints.storage_) {
return output_constraints;
}
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex output_constraints_rank = output_constraints.rank();
if (!RankConstraint::EqualOrUnspecified(output_constraints_rank,
transform.output_rank())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot transform constraints of rank ", output_constraints_rank,
" by index transform of rank ", input_rank, " -> ", output_rank));
}
if (output_constraints_rank <= 0) return output_constraints;
ChunkLayout input_constraints;
Storage* output_storage;
if (output_rank == input_rank) {
StoragePtr storage_to_be_destroyed;
input_constraints = std::move(output_constraints);
output_storage = &Storage::EnsureUnique(
input_constraints.storage_, input_rank, storage_to_be_destroyed);
} else {
input_constraints.storage_ = Storage::Allocate(input_rank);
new (input_constraints.storage_.get()) Storage(input_rank);
output_storage = output_constraints.storage_.get();
}
input_constraints.storage_->hard_constraint_ =
output_storage->hard_constraint_;
if (auto* inner_order = output_storage->inner_order(); inner_order[0] != -1) {
TransformOutputDimensionOrder(
transform, {inner_order, output_rank},
{input_constraints.storage_->inner_order(), input_rank});
}
DimensionSet one_to_one_input_dims =
internal::GetOneToOneInputDimensions(transform).one_to_one;
TENSORSTORE_RETURN_IF_ERROR(
TransformOutputVector<OriginValueTraits>(
transform, one_to_one_input_dims,
tensorstore::span<const Index>(output_storage->grid_origin(),
output_rank),
output_storage->grid_origin_hard_constraint_,
tensorstore::span<Index>(input_constraints.storage_->grid_origin(),
input_rank),
input_constraints.storage_->grid_origin_hard_constraint_),
tensorstore::MaybeAnnotateStatus(_, "Error transforming grid_origin"));
for (size_t usage_index = 0; usage_index < kNumUsages; ++usage_index) {
TENSORSTORE_RETURN_IF_ERROR(
TransformOutputGridConstraints(
*output_storage, *input_constraints.storage_, one_to_one_input_dims,
transform, usage_index),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat("Error transforming ",
static_cast<Usage>(usage_index), "_chunk")));
}
return input_constraints;
}
Result<ChunkLayout> ApplyInverseIndexTransform(IndexTransformView<> transform,
ChunkLayout input_constraints) {
if (!transform.valid() || !input_constraints.storage_) {
return input_constraints;
}
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
const DimensionIndex input_constraints_rank = input_constraints.rank();
if (!RankConstraint::EqualOrUnspecified(input_constraints_rank,
transform.input_rank())) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Cannot transform constraints of rank ", input_constraints_rank,
" by index transform of rank ", input_rank, " -> ", output_rank));
}
if (input_constraints_rank <= 0) return input_constraints;
ChunkLayout output_constraints;
Storage* input_storage;
if (output_rank == input_rank) {
StoragePtr storage_to_be_destroyed;
output_constraints = std::move(input_constraints);
input_storage = &Storage::EnsureUnique(
output_constraints.storage_, output_rank, storage_to_be_destroyed);
} else {
output_constraints.storage_ = Storage::Allocate(output_rank);
new (output_constraints.storage_.get()) Storage(output_rank);
input_storage = input_constraints.storage_.get();
}
output_constraints.storage_->hard_constraint_ =
input_storage->hard_constraint_;
if (auto* inner_order = input_storage->inner_order(); inner_order[0] != -1) {
TransformInputDimensionOrder(
transform, {inner_order, input_rank},
{output_constraints.storage_->inner_order(), output_rank});
}
TENSORSTORE_RETURN_IF_ERROR(
TransformInputVector<OriginValueTraits>(
transform,
tensorstore::span<const Index>(input_storage->grid_origin(),
input_rank),
input_storage->grid_origin_hard_constraint_,
tensorstore::span<Index>(output_constraints.storage_->grid_origin(),
output_rank),
output_constraints.storage_->grid_origin_hard_constraint_),
tensorstore::MaybeAnnotateStatus(_, "Error transforming grid_origin"));
for (size_t usage_index = 0; usage_index < kNumUsages; ++usage_index) {
TENSORSTORE_RETURN_IF_ERROR(
TransformInputGridConstraints(*input_storage,
*output_constraints.storage_, transform,
usage_index),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat("Error transforming ",
static_cast<Usage>(usage_index), "_chunk")));
}
return output_constraints;
}
bool operator==(const ChunkLayout& a, const ChunkLayout& b) {
if (!a.storage_) {
if (!b.storage_) return true;
return AllConstraintsUnset(b);
}
if (!b.storage_) {
return AllConstraintsUnset(a);
}
auto& a_storage = *a.storage_;
auto& b_storage = *b.storage_;
if (a_storage.hard_constraint_ != b_storage.hard_constraint_ ||
a_storage.grid_origin_hard_constraint_ !=
b_storage.grid_origin_hard_constraint_ ||
!internal::RangesEqual(
tensorstore::span(a_storage.chunk_shape_hard_constraint_),
tensorstore::span(b_storage.chunk_shape_hard_constraint_)) ||
!internal::RangesEqual(
tensorstore::span(a_storage.chunk_aspect_ratio_hard_constraint_),
tensorstore::span(b_storage.chunk_aspect_ratio_hard_constraint_)) ||
!std::equal(a_storage.chunk_elements_,
a_storage.chunk_elements_ + kNumUsages,
b_storage.chunk_elements_)) {
return false;
}
const DimensionIndex rank = a_storage.rank_;
if (rank <= 0 || rank != b_storage.rank_) {
return AllRankDependentConstraintsUnset(a_storage) &&
AllRankDependentConstraintsUnset(b_storage);
}
if (auto* a_inner_order = a_storage.inner_order(); !std::equal(
a_inner_order, a_inner_order + rank, b_storage.inner_order())) {
return false;
}
if (auto* a_origin = a_storage.grid_origin();
!std::equal(a_origin, a_origin + rank, b_storage.grid_origin())) {
return false;
}
if (auto* a_shapes = a_storage.chunk_shapes();
!std::equal(a_shapes, a_shapes + Storage::NumShapeElements(rank),
b_storage.chunk_shapes())) {
return false;
}
if (auto* a_aspect_ratios = a_storage.chunk_aspect_ratios();
!std::equal(a_aspect_ratios,
a_aspect_ratios + Storage::NumAspectRatioElements(rank),
b_storage.chunk_aspect_ratios())) {
return false;
}
return true;
}
std::ostream& operator<<(std::ostream& os, const ChunkLayout& x) {
return os << ::nlohmann::json(x).dump();
}
namespace internal {
constexpr Index kDefaultChunkElements = 1024 * 1024;
namespace {
void ChooseChunkSizeFromAspectRatio(
tensorstore::span<const double> aspect_ratio,
tensorstore::span<Index> chunk_shape, Index target_chunk_elements,
BoxView<> domain,
absl::FunctionRef<Index(DimensionIndex dim, Index value)> map_size) {
const DimensionIndex rank = chunk_shape.size();
assert(aspect_ratio.size() == rank);
assert(domain.rank() == rank);
double max_chunk_shape[kMaxRank];
for (DimensionIndex i = 0; i < rank; ++i) {
double max_size = target_chunk_elements;
if (IndexInterval bounds = domain[i]; IsFinite(bounds)) {
max_size =
std::min(max_size, std::max(1.0, static_cast<double>(bounds.size())));
}
max_size = std::min(max_size, 0x1.0p62);
max_chunk_shape[i] = max_size;
}
const auto get_chunk_size = [&](DimensionIndex i, double factor) -> Index {
if (const Index size = chunk_shape[i]; size != 0) return size;
Index size =
std::max(Index(1), static_cast<Index>(std::min(aspect_ratio[i] * factor,
max_chunk_shape[i])));
size = map_size(i, size);
return size;
};
const auto get_total_elements = [&](double factor) -> Index {
Index total = 1;
#ifdef TENSORSTORE_INTERNAL_CHUNK_LAYOUT_DEBUG
Index cur_chunk_shape[kMaxRank];
#endif
for (DimensionIndex i = 0; i < rank; ++i) {
const Index size = get_chunk_size(i, factor);
#ifdef TENSORSTORE_INTERNAL_CHUNK_LAYOUT_DEBUG
cur_chunk_shape[i] = size;
#endif
if (internal::MulOverflow(size, total, &total)) {
total = std::numeric_limits<Index>::max();
break;
}
}
#ifdef TENSORSTORE_INTERNAL_CHUNK_LAYOUT_DEBUG
ABSL_LOG(INFO) << "factor=" << factor << ", chunk_shape="
<< tensorstore::span<const Index>(&cur_chunk_shape[0], rank)
<< ", total=" << total;
#endif
return total;
};
double min_factor_increment = std::numeric_limits<double>::infinity();
double max_factor = 0;
for (DimensionIndex i = 0; i < rank; ++i) {
if (chunk_shape[i] != 0) continue;
const double factor = aspect_ratio[i];
min_factor_increment = std::min(min_factor_increment, 1.0 / factor);
max_factor = std::max(max_factor, max_chunk_shape[i] / factor);
}
min_factor_increment /= 2;
max_factor *= 2;
double min_factor = min_factor_increment;
Index max_factor_elements = get_total_elements(max_factor);
while (min_factor + min_factor_increment < max_factor) {
double mid_factor = min_factor + (max_factor - min_factor) / 2.0;
Index mid_factor_elements = get_total_elements(mid_factor);
if (mid_factor_elements >= target_chunk_elements) {
max_factor = mid_factor;
max_factor_elements = mid_factor_elements;
}
if (mid_factor_elements <= target_chunk_elements) {
min_factor = mid_factor;
}
}
const double factor =
max_factor_elements == target_chunk_elements ? max_factor : min_factor;
for (DimensionIndex i = 0; i < rank; ++i) {
chunk_shape[i] = get_chunk_size(i, factor);
}
}
absl::Status ChooseChunkGridOrigin(
tensorstore::span<const Index> origin_constraints,
tensorstore::span<const Index> domain_origin,
tensorstore::span<const Index> chunk_shape,
tensorstore::span<Index> grid_origin) {
const DimensionIndex rank = grid_origin.size();
if (!origin_constraints.empty()) {
if (origin_constraints.size() != rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank of constraints (", origin_constraints.size(),
") does not match rank of domain (", rank, ")"));
}
std::copy_n(origin_constraints.begin(), rank, grid_origin.begin());
} else {
std::fill_n(grid_origin.begin(), rank, kImplicit);
}
for (DimensionIndex i = 0; i < rank; ++i) {
Index& origin_value = grid_origin[i];
if (origin_value == kImplicit) {
const Index domain_origin_value = domain_origin[i];
if (domain_origin_value == -kInfIndex) {
origin_value = 0;
} else {
origin_value = NonnegativeMod(domain_origin_value, chunk_shape[i]);
}
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto interval, IndexInterval::Sized(origin_value, chunk_shape[i]),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat("Invalid chunk constraints for dimension ",
i)));
grid_origin[i] = interval.inclusive_min();
}
return absl::OkStatus();
}
absl::Status InitializeChunkShape(ChunkLayout::ChunkShapeBase shape_constraints,
BoxView<> domain,
tensorstore::span<Index> chunk_shape,
DimensionSet& shape_hard_constraint) {
const DimensionIndex rank = chunk_shape.size();
DimensionSet hard_constraint = false;
if (shape_constraints.valid()) {
if (shape_constraints.size() != rank) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Rank of constraints (", shape_constraints.size(),
") does not match rank of domain (", rank, ")"));
}
std::copy_n(shape_constraints.begin(), rank, chunk_shape.begin());
hard_constraint = shape_constraints.hard_constraint;
} else {
std::fill_n(chunk_shape.begin(), rank, 0);
}
for (DimensionIndex i = 0; i < rank; ++i) {
Index& chunk_size = chunk_shape[i];
if (chunk_size == 0) {
hard_constraint[i] = false;
continue;
}
if (chunk_size == -1) {
IndexInterval bounds = domain[i];
if (!IsFinite(bounds)) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Cannot match chunk size for dimension ", i,
" to unbounded domain ", bounds));
}
chunk_size = std::max(Index(1), bounds.size());
}
}
shape_hard_constraint = hard_constraint;
return absl::OkStatus();
}
absl::Status CompleteChunkShapeFromAspectRatio(
BoxView<> domain,
ChunkLayout::ChunkAspectRatioBase aspect_ratio_constraints,
ChunkLayout::ChunkElementsBase elements_constraint,
absl::FunctionRef<Index(DimensionIndex dim, Index value)> map_size,
tensorstore::span<Index> chunk_shape) {
const DimensionIndex rank = chunk_shape.size();
if (std::any_of(chunk_shape.begin(), chunk_shape.end(),
[](Index x) { return x == 0; })) {
double aspect_ratio[kMaxRank];
if (aspect_ratio_constraints.valid()) {
if (aspect_ratio_constraints.size() != rank) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank of constraints (", aspect_ratio_constraints.size(),
") does not match rank of domain (", rank, ")"));
}
std::copy_n(aspect_ratio_constraints.begin(), rank, aspect_ratio);
for (DimensionIndex i = 0; i < rank; ++i) {
if (aspect_ratio[i] == 0) {
aspect_ratio[i] = 1;
}
}
} else {
std::fill_n(aspect_ratio, rank, 1);
}
Index target_chunk_elements = kDefaultChunkElements;
if (elements_constraint.valid()) {
target_chunk_elements = elements_constraint;
}
ChooseChunkSizeFromAspectRatio(
tensorstore::span<const double>(aspect_ratio, rank), chunk_shape,
target_chunk_elements, domain, map_size);
}
return absl::OkStatus();
}
}
absl::Status ChooseChunkShape(ChunkLayout::GridView shape_constraints,
BoxView<> domain,
tensorstore::span<Index> chunk_shape) {
assert(domain.rank() == chunk_shape.size());
DimensionSet shape_hard_constraint;
TENSORSTORE_RETURN_IF_ERROR(InitializeChunkShape(
shape_constraints.shape(), domain, chunk_shape, shape_hard_constraint));
constexpr auto map_size = [](DimensionIndex dim, Index size) { return size; };
return CompleteChunkShapeFromAspectRatio(
domain, shape_constraints.aspect_ratio(), shape_constraints.elements(),
map_size, chunk_shape);
}
absl::Status ChooseChunkGrid(tensorstore::span<const Index> origin_constraints,
ChunkLayout::GridView shape_constraints,
BoxView<> domain,
MutableBoxView<> chunk_template) {
TENSORSTORE_RETURN_IF_ERROR(
ChooseChunkShape(shape_constraints, domain, chunk_template.shape()));
return ChooseChunkGridOrigin(origin_constraints, domain.origin(),
chunk_template.shape(), chunk_template.origin());
}
absl::Status ChooseReadWriteChunkGrid(const ChunkLayout& constraints,
BoxView<> domain,
MutableBoxView<> chunk_template) {
ChunkLayout combined_constraints = constraints;
TENSORSTORE_RETURN_IF_ERROR(
combined_constraints.Set(
ChunkLayout::ReadChunk(constraints.write_chunk())),
tensorstore::MaybeAnnotateStatus(_,
"write_chunk constraints not compatible "
"with existing read_chunk constraints"));
return ChooseChunkGrid(combined_constraints.grid_origin(),
combined_constraints.read_chunk(), domain,
chunk_template);
}
Index FindNearestDivisor(Index dividend, Index target,
Index max_search_distance = 1000000) {
if (target >= dividend) {
return dividend;
}
if ((dividend % target) == 0) {
return target;
}
for (Index offset = 1; offset < max_search_distance; ++offset) {
if (target > offset && (dividend % (target - offset)) == 0) {
return target - offset;
}
if ((dividend % (target + offset)) == 0) {
return target + offset;
}
}
return dividend;
}
Index FindNearestMultiple(Index divisor, Index target) {
if (target < divisor) {
return divisor;
}
const Index lower = target / divisor * divisor;
const Index upper = lower + divisor;
if (target - lower <= upper - target) {
return lower;
} else {
return upper;
}
}
absl::Status ChooseReadWriteChunkShapes(
ChunkLayout::GridView read_constraints,
ChunkLayout::GridView write_constraints, BoxView<> domain,
tensorstore::span<Index> read_chunk_shape,
tensorstore::span<Index> write_chunk_shape) {
DimensionIndex rank = write_chunk_shape.size();
assert(read_chunk_shape.size() == rank);
assert(domain.rank() == rank);
DimensionSet write_shape_hard_constraint;
DimensionSet read_shape_hard_constraint;
TENSORSTORE_RETURN_IF_ERROR(
InitializeChunkShape(write_constraints.shape(), domain, write_chunk_shape,
write_shape_hard_constraint));
TENSORSTORE_RETURN_IF_ERROR(InitializeChunkShape(read_constraints.shape(),
domain, read_chunk_shape,
read_shape_hard_constraint));
for (DimensionIndex i = 0; i < rank; ++i) {
Index& read_size = read_chunk_shape[i];
Index& write_size = write_chunk_shape[i];
if (read_size == 0 || write_size == 0 || ((write_size % read_size) == 0)) {
continue;
}
const bool read_hard_constraint = read_shape_hard_constraint[i];
const bool write_hard_constraint = write_shape_hard_constraint[i];
if (read_hard_constraint && write_hard_constraint) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Incompatible chunk size constraints for dimension ", i,
": read size of ", read_size, ", write size of ", write_size));
}
if (read_hard_constraint && !write_hard_constraint) {
write_size = FindNearestMultiple(read_size, write_size);
continue;
}
if (!read_hard_constraint) {
read_size = FindNearestDivisor(write_size, read_size,
1000000);
continue;
}
}
const auto map_read_size = [&](DimensionIndex i, Index size) {
const Index write_size = write_chunk_shape[i];
if (write_size != 0) {
size = FindNearestDivisor(write_size, size,
1000000);
}
return size;
};
TENSORSTORE_RETURN_IF_ERROR(CompleteChunkShapeFromAspectRatio(
domain, read_constraints.aspect_ratio(), read_constraints.elements(),
map_read_size, read_chunk_shape));
const auto map_write_size = [&](DimensionIndex i, Index size) {
const Index read_size = read_chunk_shape[i];
return FindNearestMultiple(read_size, size);
};
TENSORSTORE_RETURN_IF_ERROR(CompleteChunkShapeFromAspectRatio(
domain, write_constraints.aspect_ratio(), write_constraints.elements(),
map_write_size, write_chunk_shape));
return absl::OkStatus();
}
}
absl::Status ChunkLayout::Finalize() {
const DimensionIndex rank = this->rank();
if (rank == dynamic_rank) {
return absl::InvalidArgumentError("rank must be specified");
}
{
StoragePtr storage_to_be_destroyed;
Storage::EnsureUnique(storage_, rank, storage_to_be_destroyed);
}
auto& impl = *storage_;
auto origin = impl.grid_origin();
for (DimensionIndex dim = 0; dim < rank; ++dim) {
if (!impl.grid_origin_hard_constraint_[dim]) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"No grid_origin hard constraint for dimension ", dim));
}
if (!IsFiniteIndex(origin[dim])) {
return absl::InvalidArgumentError(
tensorstore::StrCat("Invalid grid_origin: ", origin));
}
}
for (Usage usage : ChunkLayout::kUsages) {
const size_t usage_index = static_cast<size_t>(usage);
auto status = [&]() -> absl::Status {
auto shape = impl.chunk_shape(usage_index);
auto& shape_hard_constraint =
impl.chunk_shape_hard_constraint_[usage_index];
for (DimensionIndex dim = 0; dim < rank; ++dim) {
const Index origin_value = origin[dim];
Index& size_value = shape[dim];
if (!shape_hard_constraint[dim]) {
size_value = 0;
}
if (!IndexInterval::ValidSized(origin_value, size_value) ||
!IsFiniteIndex(origin_value + size_value)) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid origin/shape: origin=", origin, ", shape=", shape));
}
if (size_value == 0 && usage == Usage::kRead) {
auto write_shape =
impl.chunk_shape(static_cast<size_t>(Usage::kWrite));
size_value = write_shape[dim];
shape_hard_constraint[dim] =
impl.chunk_shape_hard_constraint_[static_cast<size_t>(
Usage::kWrite)][dim];
}
}
impl.chunk_aspect_ratio_hard_constraint_[usage_index] = false;
impl.hard_constraint_[static_cast<size_t>(
GetChunkElementsHardConstraintBit(usage))] = false;
impl.chunk_elements_[usage_index] = kImplicit;
std::fill_n(impl.chunk_aspect_ratio(usage_index).begin(), rank, 0);
return absl::OkStatus();
}();
if (!status.ok()) {
return tensorstore::MaybeAnnotateStatus(
status, tensorstore::StrCat("Invalid ", usage, " chunk grid"));
}
}
auto write_chunk_shape = impl.chunk_shape(static_cast<size_t>(Usage::kWrite));
auto read_chunk_shape = impl.chunk_shape(static_cast<size_t>(Usage::kRead));
for (DimensionIndex dim = 0; dim < rank; ++dim) {
const Index read_size = read_chunk_shape[dim];
const Index write_size = write_chunk_shape[dim];
if (read_size == 0) continue;
if ((write_size % read_size) != 0) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"write chunk shape ", write_chunk_shape,
" is not a multiple of read chunk shape ", read_chunk_shape));
}
}
return absl::OkStatus();
}
namespace {
constexpr auto UsageJsonBinder() {
return jb::Enum<ChunkLayout::Usage, std::string_view>({
{ChunkLayout::Usage::kWrite, "write"},
{ChunkLayout::Usage::kRead, "read"},
{ChunkLayout::Usage::kCodec, "codec"},
});
}
}
std::ostream& operator<<(std::ostream& os, ChunkLayout::Usage usage) {
std::string_view s;
UsageJsonBinder()(std::false_type{},
jb::NoOptions{}, &usage, &s)
.IgnoreError();
return os << s;
}
Result<ChunkLayout::Usage> ChunkLayout::ParseUsage(std::string_view s) {
Usage usage;
TENSORSTORE_RETURN_IF_ERROR(UsageJsonBinder()(std::true_type{},
jb::NoOptions{},
&usage, &s));
return usage;
}
ChunkLayout::Grid::Grid(const Grid& other)
: rank_(other.rank_),
elements_hard_constraint_(other.elements_hard_constraint_),
shape_hard_constraint_(other.shape_hard_constraint_),
aspect_ratio_hard_constraint_(other.aspect_ratio_hard_constraint_),
elements_(other.elements_) {
const DimensionIndex rank = other.rank_;
if (rank > 0) {
shape_.reset(new Index[rank]);
std::copy_n(other.shape_.get(), rank, shape_.get());
aspect_ratio_.reset(new double[rank]);
std::copy_n(other.aspect_ratio_.get(), rank, aspect_ratio_.get());
}
}
ChunkLayout::Grid& ChunkLayout::Grid::operator=(const Grid& other) {
const DimensionIndex new_rank = other.rank_;
if (new_rank <= 0) {
shape_.reset();
aspect_ratio_.reset();
} else {
if (new_rank != rank_) {
shape_.reset(new Index[new_rank]);
aspect_ratio_.reset(new double[new_rank]);
}
std::copy_n(other.shape_.get(), new_rank, shape_.get());
std::copy_n(other.aspect_ratio_.get(), new_rank, aspect_ratio_.get());
}
rank_ = new_rank;
elements_hard_constraint_ = other.elements_hard_constraint_;
shape_hard_constraint_ = other.shape_hard_constraint_;
aspect_ratio_hard_constraint_ = other.aspect_ratio_hard_constraint_;
elements_ = other.elements_;
return *this;
}
ChunkLayout::Grid::~Grid() = default;
absl::Status ChunkLayout::Grid::Set(RankConstraint value) {
const DimensionIndex rank = value.rank;
if (rank == dynamic_rank || rank == rank_) {
return absl::OkStatus();
}
TENSORSTORE_RETURN_IF_ERROR(ValidateRank(rank));
if (!RankConstraint::EqualOrUnspecified(rank_, rank)) {
return RankMismatchError(rank, rank_);
}
rank_ = rank;
if (rank > 0) {
shape_.reset(new Index[rank]);
std::fill_n(shape_.get(), rank, ShapeValueTraits::kDefaultValue);
aspect_ratio_.reset(new double[rank]);
std::fill_n(aspect_ratio_.get(), rank,
AspectRatioValueTraits::kDefaultValue);
}
return absl::OkStatus();
}
namespace {
template <typename Traits>
absl::Status SetVectorProperty(
ChunkLayout::Grid& self, std::unique_ptr<typename Traits::Element[]>& vec,
DimensionSet& hard_constraint,
MaybeHardConstraintSpan<typename Traits::Element> value) {
if (!value.valid()) return absl::OkStatus();
const DimensionIndex rank = value.size();
TENSORSTORE_RETURN_IF_ERROR(self.Set(RankConstraint(rank)));
return ValidateAndMergeVectorInto<Traits>(value, vec.get(), hard_constraint);
}
}
absl::Status ChunkLayout::Grid::Set(Shape value) {
return SetVectorProperty<ShapeValueTraits>(*this, shape_,
shape_hard_constraint_, value);
}
absl::Status ChunkLayout::Grid::Set(AspectRatio value) {
return SetVectorProperty<AspectRatioValueTraits>(
*this, aspect_ratio_, aspect_ratio_hard_constraint_, value);
}
absl::Status ChunkLayout::Grid::Set(Elements value) {
return SetChunkElementsInternal<bool&>(elements_, elements_hard_constraint_,
value);
}
absl::Status ChunkLayout::Grid::Set(const GridView& value) {
TENSORSTORE_RETURN_IF_ERROR(Set(value.shape()));
TENSORSTORE_RETURN_IF_ERROR(Set(value.aspect_ratio()));
TENSORSTORE_RETURN_IF_ERROR(Set(value.elements()));
return absl::OkStatus();
}
bool operator==(const ChunkLayout::Grid& a, const ChunkLayout::Grid& b) {
const DimensionIndex rank = a.rank_;
if (rank != b.rank_ ||
a.elements_hard_constraint_ != b.elements_hard_constraint_ ||
a.shape_hard_constraint_ != b.shape_hard_constraint_ ||
a.aspect_ratio_hard_constraint_ != b.aspect_ratio_hard_constraint_ ||
a.elements_ != b.elements_) {
return false;
}
return rank <= 0 ||
(std::equal(a.shape_.get(), a.shape_.get() + rank, b.shape_.get()) &&
std::equal(a.aspect_ratio_.get(), a.aspect_ratio_.get() + rank,
b.aspect_ratio_.get()));
}
absl::Status ChunkLayout::GetChunkTemplate(Usage usage,
MutableBoxView<> box) const {
assert(usage == kRead || usage == kWrite);
const DimensionIndex rank = this->rank();
if (rank == dynamic_rank) {
box.Fill();
return absl::OkStatus();
}
if (rank != box.rank()) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Rank of chunk layout (", rank, ") does not match expected rank (",
box.rank(), ")"));
}
auto grid_origin = this->grid_origin();
auto shape = (*this)[usage].shape();
for (DimensionIndex i = 0; i < rank; ++i) {
if (grid_origin[i] == kImplicit || !grid_origin.hard_constraint[i] ||
shape[i] == 0 || !shape.hard_constraint[i]) {
box[i] = IndexInterval::Infinite();
continue;
}
TENSORSTORE_ASSIGN_OR_RETURN(
box[i], IndexInterval::Sized(grid_origin[i], shape[i]),
tensorstore::MaybeAnnotateStatus(
_, tensorstore::StrCat(
"Incompatible grid origin/chunk shape for dimension ", i)));
}
return absl::OkStatus();
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::ChunkLayout,
tensorstore::serialization::JsonBindableSerializer<
tensorstore::ChunkLayout>())
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::ChunkLayout::Grid,
tensorstore::serialization::JsonBindableSerializer<
tensorstore::ChunkLayout::Grid>()) | #include "tensorstore/chunk_layout.h"
#include <stddef.h>
#include <algorithm>
#include <array>
#include <cstdlib>
#include <random>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/random/bit_gen_ref.h"
#include "absl/random/random.h"
#include "absl/status/status.h"
#include "tensorstore/box.h"
#include "tensorstore/index.h"
#include "tensorstore/index_space/dim_expression.h"
#include "tensorstore/index_space/index_domain_builder.h"
#include "tensorstore/index_space/index_transform.h"
#include "tensorstore/index_space/index_transform_builder.h"
#include "tensorstore/index_space/index_transform_testutil.h"
#include "tensorstore/index_space/output_index_method.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/gtest.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/testing/random_seed.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/rank.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/dimension_set.h"
#include "tensorstore/util/division.h"
#include "tensorstore/util/span.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
#include "tensorstore/util/str_cat.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::ChunkLayout;
using ::tensorstore::DimensionIndex;
using ::tensorstore::DimensionSet;
using ::tensorstore::Dims;
using ::tensorstore::dynamic_rank;
using ::tensorstore::Index;
using ::tensorstore::IndexDomainBuilder;
using ::tensorstore::IndexTransformBuilder;
using ::tensorstore::IndexTransformView;
using ::tensorstore::kImplicit;
using ::tensorstore::kInfIndex;
using ::tensorstore::kMaxRank;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::internal::ChooseChunkGrid;
using ::tensorstore::internal::ChooseChunkShape;
using ::tensorstore::internal::ChooseReadWriteChunkShapes;
using ::tensorstore::internal::MakeRandomDimensionOrder;
using ::testing::Optional;
using Usage = ChunkLayout::Usage;
TEST(ChunkLayoutTest, SingleLevelRank0) {
ChunkLayout layout;
TENSORSTORE_ASSERT_OK(layout.Set(tensorstore::RankConstraint(0)));
TENSORSTORE_ASSERT_OK(layout.Finalize());
ASSERT_EQ(0, layout.rank());
EXPECT_THAT(layout.inner_order(), ::testing::ElementsAre());
EXPECT_THAT(layout | tensorstore::IdentityTransform(0), Optional(layout));
EXPECT_THAT(layout.read_chunk().shape(), ::testing::ElementsAre());
}
TEST(ChunkLayoutTest, SingleLevelRank1) {
ChunkLayout layout;
TENSORSTORE_ASSERT_OK(layout.Set(ChunkLayout::GridOrigin({0})));
TENSORSTORE_ASSERT_OK(layout.Set(ChunkLayout::WriteChunkShape({5})));
TENSORSTORE_ASSERT_OK(layout.Finalize());
ASSERT_EQ(1, layout.rank());
EXPECT_THAT(layout.inner_order(), ::testing::ElementsAre());
EXPECT_THAT(layout.grid_origin(), ::testing::ElementsAre(0));
EXPECT_THAT(layout.read_chunk_shape(), ::testing::ElementsAre(5));
EXPECT_THAT(layout.write_chunk_shape(), ::testing::ElementsAre(5));
EXPECT_THAT(layout | tensorstore::IdentityTransform(1), Optional(layout));
}
using HierarchicalGridCell = std::array<std::vector<Index>, 3>;
HierarchicalGridCell GetHierarchicalGridCell(
const ChunkLayout& layout, tensorstore::span<const Index> position) {
const DimensionIndex rank = layout.rank();
auto origin = layout.grid_origin();
HierarchicalGridCell hier_grid_cell;
for (Usage usage : ChunkLayout::kUsages) {
auto& grid_cell = hier_grid_cell[static_cast<int>(usage)];
grid_cell.resize(rank);
auto grid = layout[usage];
for (DimensionIndex i = 0; i < rank; ++i) {
const Index size = grid.shape()[i];
if (size == 0) {
grid_cell[i] = 0;
continue;
}
const Index x = position[i] - origin[i];
grid_cell[i] = tensorstore::FloorOfRatio(x, size);
}
}
return hier_grid_cell;
}
void TestGridCorrespondence(absl::BitGenRef gen,
const ChunkLayout& output_layout,
const ChunkLayout& input_layout,
IndexTransformView<> transform) {
const DimensionIndex output_rank = transform.output_rank();
const DimensionIndex input_rank = transform.input_rank();
ASSERT_EQ(output_layout.rank(), output_rank);
ASSERT_EQ(input_layout.rank(), input_rank);
HierarchicalGridCell output_chunk_divisors;
for (Usage usage : ChunkLayout::kUsages) {
auto& divisors = output_chunk_divisors[static_cast<size_t>(usage)];
divisors.resize(output_rank, 1);
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
const auto map = transform.output_index_maps()[output_dim];
if (map.method() !=
tensorstore::OutputIndexMethod::single_input_dimension) {
continue;
}
auto size = output_layout[usage].shape()[output_dim];
if (size == 0) continue;
divisors[output_dim] =
std::abs(map.stride()) /
tensorstore::GreatestCommonDivisor(map.stride(), size);
}
}
SCOPED_TRACE(tensorstore::StrCat("output_layout=", output_layout));
SCOPED_TRACE(tensorstore::StrCat("input_layout=", input_layout));
SCOPED_TRACE(
tensorstore::StrCat("output_chunk_divisors=",
::testing::PrintToString(output_chunk_divisors)));
absl::flat_hash_map<HierarchicalGridCell, HierarchicalGridCell>
output_to_input_cell_map;
absl::flat_hash_map<HierarchicalGridCell, HierarchicalGridCell>
input_to_output_cell_map;
std::vector<Index> input_pos(input_rank);
std::vector<Index> output_pos(output_rank);
const auto test_point = [&] {
TENSORSTORE_ASSERT_OK(transform.TransformIndices(input_pos, output_pos));
auto input_cell = GetHierarchicalGridCell(input_layout, input_pos);
auto output_cell = GetHierarchicalGridCell(output_layout, output_pos);
SCOPED_TRACE(tensorstore::StrCat("orig_output_cell=",
::testing::PrintToString(output_cell)));
for (Usage usage : ChunkLayout::kUsages) {
const size_t usage_index = static_cast<size_t>(usage);
for (DimensionIndex output_dim = 0; output_dim < output_rank;
++output_dim) {
auto& out_cell = output_cell[usage_index][output_dim];
out_cell = tensorstore::FloorOfRatio(
out_cell, output_chunk_divisors[usage_index][output_dim]);
}
}
SCOPED_TRACE(
tensorstore::StrCat("input_pos=", tensorstore::span(input_pos)));
SCOPED_TRACE(
tensorstore::StrCat("output_pos=", tensorstore::span(output_pos)));
SCOPED_TRACE(tensorstore::StrCat("input_cell=",
::testing::PrintToString(input_cell)));
SCOPED_TRACE(tensorstore::StrCat("output_cell=",
::testing::PrintToString(output_cell)));
auto input_it =
output_to_input_cell_map.emplace(output_cell, input_cell).first;
auto output_it =
input_to_output_cell_map.emplace(input_cell, output_cell).first;
EXPECT_EQ(input_it->second, input_cell);
EXPECT_EQ(output_it->second, output_cell);
};
constexpr size_t kNumSamplePoints = 10;
for (size_t sample_i = 0; sample_i < kNumSamplePoints; ++sample_i) {
for (DimensionIndex input_dim = 0; input_dim < input_rank; ++input_dim) {
input_pos[input_dim] =
absl::Uniform<Index>(absl::IntervalClosedClosed, gen, -40, 40);
}
for (DimensionIndex dir_input_dim = 0; dir_input_dim < input_rank;
++dir_input_dim) {
const Index initial_pos = input_pos[dir_input_dim];
for (Index i = -20; i <= 20; ++i) {
input_pos[dir_input_dim] = initial_pos + i;
test_point();
}
input_pos[dir_input_dim] = initial_pos;
}
}
}
template <typename Expr>
void TestApplyIndexTransform(::nlohmann::json a, const Expr& expr,
::nlohmann::json b) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto a_layout, ChunkLayout::FromJson(a));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto b_layout, ChunkLayout::FromJson(b));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, tensorstore::IdentityTransform(a_layout.rank()) | expr);
EXPECT_THAT(a_layout | transform, ::testing::Optional(b_layout));
}
struct MakeRandomChunkLayoutParameters {
DimensionIndex min_rank = 1;
DimensionIndex max_rank = 3;
};
ChunkLayout MakeRandomChunkLayout(
absl::BitGenRef gen, const MakeRandomChunkLayoutParameters& p = {}) {
const DimensionIndex rank = absl::Uniform<DimensionIndex>(
absl::IntervalClosedClosed, gen, p.min_rank, p.max_rank);
ChunkLayout layout;
TENSORSTORE_CHECK_OK(layout.Set(tensorstore::RankConstraint(rank)));
if (absl::Bernoulli(gen, 0.5)) {
DimensionIndex inner_order[kMaxRank];
MakeRandomDimensionOrder(gen, tensorstore::span(inner_order, rank));
TENSORSTORE_CHECK_OK(layout.Set(
ChunkLayout::InnerOrder(tensorstore::span(inner_order, rank))));
} else {
}
Index grid_origin[kMaxRank];
for (DimensionIndex dim = 0; dim < rank; ++dim) {
grid_origin[dim] =
absl::Uniform<Index>(absl::IntervalClosedClosed, gen, -5, 5);
}
TENSORSTORE_CHECK_OK(layout.Set(
ChunkLayout::GridOrigin(tensorstore::span(grid_origin, rank))));
const auto set_grid = [&](Usage usage) {
if (absl::Bernoulli(gen, 0.3)) {
return;
}
Index shape[kMaxRank];
std::fill_n(shape, rank, 0);
for (DimensionIndex dim = 0; dim < rank; ++dim) {
if (absl::Bernoulli(gen, 0.3)) {
continue;
}
Index size;
if (usage == Usage::kWrite && layout.read_chunk_shape()[dim] != 0) {
const Index read_size = layout.read_chunk_shape()[dim];
size = absl::Uniform<Index>(absl::IntervalClosedClosed, gen, 1, 5) *
read_size;
} else {
size = absl::Uniform<Index>(absl::IntervalClosedClosed, gen, 1,
usage == Usage::kCodec ? 5 : 10);
}
shape[dim] = size;
}
TENSORSTORE_CHECK_OK(layout.Set(
ChunkLayout::Chunk(ChunkLayout::ChunkShapeBase(
tensorstore::span<const Index>(shape, rank)),
usage)));
};
set_grid(Usage::kCodec);
set_grid(Usage::kRead);
set_grid(Usage::kWrite);
TENSORSTORE_CHECK_OK(layout.Finalize());
return layout;
}
TEST(ChunkLayoutTest, Json) {
tensorstore::TestJsonBinderRoundTripJsonOnly<ChunkLayout>(
{
{
{"rank", 0},
},
{
{"rank", 2},
},
{
{"grid_origin", {1, 2}},
{"write_chunk",
{
{"shape", {10, 11}},
}},
{"inner_order", {1, 0}},
},
},
tensorstore::internal_json_binding::DefaultBinder<>,
tensorstore::IncludeDefaults{false});
}
TEST(ChunkLayoutTest, JsonExcludeDefaults) {
tensorstore::TestJsonBinderRoundTripJsonOnly<ChunkLayout>(
{{
{"grid_origin", {1, 2}},
{"write_chunk",
{
{"shape", {10, 11}},
}},
{"inner_order", {1, 0}},
}},
tensorstore::internal_json_binding::DefaultBinder<>,
tensorstore::IncludeDefaults{false});
}
TEST(ChunkLayoutTest, Rank2Translate) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {1, 0}},
},
Dims(0, 1).TranslateBy(5),
{
{"grid_origin", {5, 6}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {1, 0}},
});
}
TEST(ChunkLayoutTest, Rank2Transpose) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {1, 0}},
},
Dims(1, 0).Transpose(),
{
{"grid_origin", {1, 0}},
{"write_chunk",
{
{"shape", {20, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2TransposeWithGridOrder) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {1, 0}},
},
Dims(1, 0).Transpose(),
{
{"grid_origin", {1, 0}},
{"write_chunk",
{
{"shape", {20, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2Stride) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {0, 1}},
},
Dims(0, 1).Stride(2),
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {5, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2StrideNotEvenlyDisibile) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {0, 1}},
},
Dims(0, 1).Stride(6),
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {5, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2StrideNegative) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"inner_order", {0, 1}},
},
Dims(0, 1).Stride(-2),
{
{"grid_origin", {1, 0}},
{"write_chunk",
{
{"shape", {5, 10}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ChunkLayoutTest, Rank2TwoLevelStrideNegative) {
TestApplyIndexTransform(
{
{"grid_origin", {0, 1}},
{"write_chunk",
{
{"shape", {10, 20}},
}},
{"read_chunk",
{
{"shape", {5, 5}},
}},
{"inner_order", {0, 1}},
},
Dims(0, 1).TranslateBy({2, 3}).Stride(-2),
{
{"grid_origin", {0, -1}},
{"write_chunk",
{
{"shape", {5, 10}},
}},
{"read_chunk",
{
{"shape", {5, 5}},
}},
{"inner_order", {0, 1}},
});
}
TEST(ApplyIndexTransformTest, RandomInvertible) {
constexpr size_t kNumIterations = 10;
for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_LAYOUT_TEST_SEED")};
MakeRandomChunkLayoutParameters layout_p;
auto output_layout = MakeRandomChunkLayout(gen, layout_p);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
transform_p;
transform_p.new_dims_are_singleton = false;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IdentityTransform(output_layout.rank()).domain(),
transform_p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_layout,
output_layout | transform);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto new_output_layout,
ApplyInverseIndexTransform(transform, input_layout));
SCOPED_TRACE(tensorstore::StrCat("transform=", transform));
EXPECT_EQ(output_layout, new_output_layout)
<< "input_layout=" << input_layout;
TestGridCorrespondence(gen, output_layout, input_layout, transform);
}
}
TEST(ApplyIndexTransformTest, RandomNonInvertibleUnaligned) {
constexpr size_t kNumIterations = 10;
for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_LAYOUT_TEST_SEED")};
MakeRandomChunkLayoutParameters layout_p;
auto output_layout = MakeRandomChunkLayout(gen, layout_p);
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
transform_p;
transform_p.new_dims_are_singleton = false;
transform_p.max_stride = 3;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, tensorstore::IdentityTransform(output_layout.rank()).domain(),
transform_p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_layout,
output_layout | transform);
SCOPED_TRACE(tensorstore::StrCat("transform=", transform));
TestGridCorrespondence(gen, output_layout, input_layout, transform);
}
}
TEST(ApplyIndexTransformTest, RandomNonInvertibleAligned) {
constexpr size_t kNumIterations = 10;
for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_LAYOUT_TEST_SEED")};
MakeRandomChunkLayoutParameters layout_p;
auto input_layout = MakeRandomChunkLayout(gen, layout_p);
tensorstore::internal::MakeStridedIndexTransformForInputSpaceParameters
transform_p;
transform_p.max_stride = 3;
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForInputSpace(
gen, tensorstore::IdentityTransform(input_layout.rank()).domain(),
transform_p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto output_layout,
ApplyInverseIndexTransform(transform, input_layout));
SCOPED_TRACE(tensorstore::StrCat("transform=", transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto new_input_layout, ApplyIndexTransform(transform, output_layout));
EXPECT_EQ(input_layout, new_input_layout)
<< "output_layout=" << output_layout;
TestGridCorrespondence(gen, output_layout, input_layout, transform);
}
}
TEST(ChunkLayoutTest, DefaultConstruct) {
ChunkLayout x;
EXPECT_EQ(dynamic_rank, x.rank());
EXPECT_FALSE(x.inner_order().valid());
EXPECT_FALSE(x.grid_origin().valid());
EXPECT_FALSE(x.read_chunk().aspect_ratio().valid());
}
TEST(ChunkLayoutTest, ConstraintsJson) {
tensorstore::TestJsonBinderRoundTripJsonOnly<ChunkLayout>({
{
{"write_chunk",
{
{"elements_soft_constraint", 5},
}},
},
{
{"grid_origin", {1, 2}},
{"write_chunk",
{
{"shape", {10, 11}},
}},
{"inner_order", {1, 0}},
},
{
{"grid_origin", {1, 2}},
{"write_chunk",
{
{"shape", {10, 11}},
}},
{"inner_order_soft_constraint", {1, 0}},
},
{
{"grid_origin", {nullptr, nullptr, 3}},
{"grid_origin_soft_constraint", {4, nullptr, nullptr}},
{"write_chunk",
{{"elements_soft_constraint", 1000}, {"shape", {5, nullptr, 6}}}},
{"read_chunk",
{{"elements", 100},
{"shape_soft_constraint", {nullptr, 10, nullptr}},
{"aspect_ratio", {nullptr, 1, 2}}}},
{"codec_chunk", {{"aspect_ratio_soft_constraint", {nullptr, 2, 1}}}},
{"inner_order", {2, 1, 0}},
},
});
}
TEST(ChunkLayoutTest, JsonRoundTripInexact) {
tensorstore::TestJsonBinderRoundTripJsonOnlyInexact<ChunkLayout>({
{{
{"chunk", {{"elements", 50}}},
},
{
{"read_chunk", {{"elements", 50}}},
{"write_chunk", {{"elements", 50}}},
}},
{{
{"chunk", {{"elements_soft_constraint", 50}}},
},
{
{"read_chunk", {{"elements_soft_constraint", 50}}},
{"write_chunk", {{"elements_soft_constraint", 50}}},
}},
{{
{"read_chunk", {{"shape", {-1, 2, 3}}}},
},
{
{"read_chunk",
{{"shape", {nullptr, 2, 3}},
{"shape_soft_constraint", {-1, nullptr, nullptr}}}},
}},
{{
{"chunk", {{"elements_soft_constraint", 50}}},
{"read_chunk", {{"elements_soft_constraint", 60}}},
},
{
{"read_chunk", {{"elements_soft_constraint", 50}}},
{"write_chunk", {{"elements_soft_constraint", 50}}},
}},
{{
{"chunk", {{"elements_soft_constraint", 50}}},
{"read_chunk", {{"elements", 60}}},
},
{
{"read_chunk", {{"elements", 60}}},
{"write_chunk", {{"elements_soft_constraint", 50}}},
}},
{{
{"chunk", {{"aspect_ratio", {2, 3}}}},
},
{
{"codec_chunk", {{"aspect_ratio", {2, 3}}}},
{"read_chunk", {{"aspect_ratio", {2, 3}}}},
{"write_chunk", {{"aspect_ratio", {2, 3}}}},
}},
{{
{"chunk", {{"aspect_ratio_soft_constraint", {2, 3}}}},
},
{
{"codec_chunk", {{"aspect_ratio_soft_constraint", {2, 3}}}},
{"read_chunk", {{"aspect_ratio_soft_constraint", {2, 3}}}},
{"write_chunk", {{"aspect_ratio_soft_constraint", {2, 3}}}},
}},
{{
{"chunk", {{"shape", {2, 3}}}},
},
{
{"read_chunk", {{"shape", {2, 3}}}},
{"write_chunk", {{"shape", {2, 3}}}},
}},
{{
{"chunk", {{"shape_soft_constraint", {2, 3}}}},
},
{
{"read_chunk", {{"shape_soft_constraint", {2, 3}}}},
{"write_chunk", {{"shape_soft_constraint", {2, 3}}}},
}},
{{
{"chunk", {{"shape_soft_constraint", {2, 3}}}},
{"read_chunk", {{"shape", {4, nullptr}}}},
},
{
{"read_chunk",
{
{"shape_soft_constraint", {nullptr, 3}},
{"shape", {4, nullptr}},
}},
{"write_chunk", {{"shape_soft_constraint", {2, 3}}}},
}},
});
}
TEST(ChunkLayoutTest, CompareAllUnset) {
ChunkLayout a;
ChunkLayout b;
EXPECT_FALSE(b.Set(ChunkLayout::InnerOrder({2, 3, 4})).ok());
EXPECT_EQ(a, b);
EXPECT_EQ(b, a);
}
TEST(ChunkLayoutTest, CompareInnerOrder) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{"inner_order", {0, 1}}},
{{"inner_order", {0, 1, 2}}},
{{"inner_order", {0, 2, 1}}},
{{"inner_order_soft_constraint", {0, 2, 1}}},
});
}
TEST(ChunkLayoutTest, CompareChunkElements) {
for (std::string prefix : {"codec", "read", "write"}) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{prefix + "_chunk", {{"elements", 42}}}},
{{prefix + "_chunk", {{"elements", 43}}}},
{{prefix + "_chunk", {{"elements_soft_constraint", 42}}}},
});
}
}
TEST(ChunkLayoutTest, CompareChunkAspectRatio) {
for (std::string prefix : {"codec", "read", "write"}) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{prefix + "_chunk", {{"aspect_ratio", {1, 2, nullptr}}}}},
{{prefix + "_chunk", {{"aspect_ratio", {1, 1, nullptr}}}}},
{{prefix + "_chunk",
{
{"aspect_ratio", {1, 1, nullptr}},
{"aspect_ratio_soft_constraint", {nullptr, nullptr, 4}},
}}},
{{prefix + "_chunk",
{{"aspect_ratio_soft_constraint", {1, 2, nullptr}}}}},
});
}
}
TEST(ChunkLayoutTest, CompareGridOrigin) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{"grid_origin", {1, 2, nullptr}}},
{{"grid_origin", {1, 1, nullptr}}},
{
{"grid_origin", {1, 1, nullptr}},
{"grid_origin_soft_constraint", {nullptr, nullptr, 4}},
},
{{"grid_origin_soft_constraint", {1, 2, nullptr}}},
});
}
TEST(ChunkLayoutTest, CompareChunkShape) {
for (std::string prefix : {"codec", "read", "write"}) {
tensorstore::TestCompareDistinctFromJson<ChunkLayout>({
::nlohmann::json::object_t(),
{{prefix + "_chunk", {{"shape", {1, 2, nullptr}}}}},
{{prefix + "_chunk", {{"shape", {1, 1, nullptr}}}}},
{{prefix + "_chunk",
{
{"shape", {1, 1, nullptr}},
{"shape_soft_constraint", {nullptr, nullptr, 4}},
}}},
{{prefix + "_chunk", {{"shape_soft_constraint", {1, 2, nullptr}}}}},
});
}
}
TEST(ChunkLayoutTest, SetUnspecifiedUsage) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::Chunk(ChunkLayout::ChunkShape({5, 6, 0}),
ChunkLayout::ChunkAspectRatio({2, 1, 0}),
ChunkLayout::ChunkElements(42))));
EXPECT_THAT(constraints.ToJson(),
::testing::Optional(MatchesJson({
{"write_chunk",
{{"shape", {5, 6, nullptr}},
{"aspect_ratio", {2, 1, nullptr}},
{"elements", 42}}},
{"read_chunk",
{{"shape", {5, 6, nullptr}},
{"aspect_ratio", {2, 1, nullptr}},
{"elements", 42}}},
{"codec_chunk", {{"aspect_ratio", {2, 1, nullptr}}}},
})));
}
TEST(ChunkLayoutConstraintsTest, ApplyIndexTransformRandomInvertible) {
constexpr size_t kNumIterations = 10;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto output_constraints,
ChunkLayout::FromJson({
{"codec_chunk",
{{"elements_soft_constraint", 20},
{"aspect_ratio", {1, 2, 3}},
{"shape", {nullptr, 4, 5}}}},
{"read_chunk",
{{"elements", 30},
{"aspect_ratio", {4, 5, 6}},
{"shape_soft_constraint", {6, nullptr, 7}}}},
{"write_chunk",
{{"elements", 40},
{"aspect_ratio_soft_constraint", {7, 8, 9}},
{"shape", {8, 9, nullptr}}}},
{"grid_origin", {nullptr, nullptr, 11}},
{"inner_order_soft_constraint", {2, 0, 1}},
}));
for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
std::minstd_rand gen{tensorstore::internal_testing::GetRandomSeedForTest(
"TENSORSTORE_INTERNAL_LAYOUT_CONSTRAINTS_TEST_SEED")};
tensorstore::internal::MakeStridedIndexTransformForOutputSpaceParameters
transform_p;
transform_p.new_dims_are_singleton = true;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto domain, IndexDomainBuilder(output_constraints.rank()).Finalize());
auto transform =
tensorstore::internal::MakeRandomStridedIndexTransformForOutputSpace(
gen, domain, transform_p);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto inverse_transform,
InverseTransform(transform));
SCOPED_TRACE(tensorstore::StrCat("transform=", transform));
SCOPED_TRACE(tensorstore::StrCat("inverse_transform=", inverse_transform));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto input_constraints,
output_constraints | transform);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto input_constraints2,
ApplyInverseIndexTransform(inverse_transform, output_constraints));
EXPECT_EQ(input_constraints, input_constraints2)
<< "output_constraints=" << output_constraints;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto output_constraints2,
ApplyInverseIndexTransform(transform, input_constraints));
EXPECT_EQ(output_constraints, output_constraints2)
<< "input_constraints=" << input_constraints;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_output_constraints,
input_constraints | inverse_transform);
EXPECT_EQ(output_constraints, new_output_constraints)
<< "input_constraints=" << input_constraints;
}
}
TEST(ChunkLayoutTest, ApplyIndexTransformNoRank) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto new_constraints,
constraints | tensorstore::Dims(0, 1).TranslateBy(5));
EXPECT_EQ(constraints, new_constraints);
}
TEST(ChunkLayoutTest, ApplyIndexTransform) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto constraints,
ChunkLayout::FromJson({
{"inner_order", {0, 1, 2}},
{"grid_origin", {1, 2, 3}},
{"read_chunk", {{"shape", {4, 5, 6}}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_new_constraints,
ChunkLayout::FromJson({
{"inner_order", {2, 1, 0}},
{"grid_origin", {8, 7, 6}},
{"read_chunk", {{"shape", {6, 5, 4}}}},
}));
EXPECT_THAT(
constraints | tensorstore::Dims(2, 1, 0).TranslateBy(5).Transpose(),
::testing::Optional(expected_new_constraints));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto expected_new_inverse_constraints,
ChunkLayout::FromJson({
{"inner_order", {2, 1, 0}},
{"grid_origin", {-2, -3, -4}},
{"read_chunk", {{"shape", {6, 5, 4}}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform,
tensorstore::IdentityTransform(3) |
tensorstore::Dims(2, 1, 0).TranslateBy(5).Transpose());
EXPECT_THAT(ApplyInverseIndexTransform(transform, constraints),
::testing::Optional(expected_new_inverse_constraints));
}
TEST(ChunkLayoutTest, ApplyIndexTransformOverflow) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto constraints,
ChunkLayout::FromJson({
{"grid_origin", {0, 0, 0}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto transform, tensorstore::IdentityTransform(3) |
tensorstore::Dims(0).TranslateBy(kInfIndex));
EXPECT_THAT(constraints | transform,
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Error transforming grid_origin: "
"Error transforming output dimension 0 -> input dimension 0: "
"Integer overflow transforming output origin 0 by offset .* "
"and stride 1"));
EXPECT_THAT(ApplyInverseIndexTransform(transform, constraints),
MatchesStatus(
absl::StatusCode::kOutOfRange,
"Error transforming grid_origin: "
"Error transforming input dimension 0 -> output dimension 0: "
"Integer overflow transforming input origin 0 by offset .* "
"and stride 1"));
}
TEST(ChunkLayoutTest, ApplyInverseIndexTransformMissingInputDimensionRequired) {
ChunkLayout input_constraints;
TENSORSTORE_ASSERT_OK(input_constraints.Set(ChunkLayout::GridOrigin({5, 6})));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(2, 1)
.output_single_input_dimension(0, 1)
.Finalize());
EXPECT_THAT(
ApplyInverseIndexTransform(transform, input_constraints),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error transforming grid_origin: "
"No output dimension corresponds to input dimension 0"));
}
TEST(ChunkLayoutTest,
ApplyInverseIndexTransformMissingInputDimensionNotRequired) {
ChunkLayout input_constraints;
TENSORSTORE_ASSERT_OK(input_constraints.Set(
ChunkLayout::GridOrigin({5, 6}, false)));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto transform,
IndexTransformBuilder(2, 1)
.output_single_input_dimension(0, 1)
.Finalize());
ChunkLayout output_constraints;
TENSORSTORE_ASSERT_OK(output_constraints.Set(
ChunkLayout::GridOrigin({6}, false)));
EXPECT_THAT(ApplyInverseIndexTransform(transform, input_constraints),
::testing::Optional(output_constraints));
}
TEST(ChunkLayoutTest, ApplyIndexTransformKnownRankNullTransform) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto constraints,
ChunkLayout::FromJson({
{"inner_order", {2, 1, 0}},
}));
EXPECT_THAT(constraints | tensorstore::IndexTransform<>(),
::testing::Optional(constraints));
EXPECT_THAT(
ApplyInverseIndexTransform(tensorstore::IndexTransform<>(), constraints),
::testing::Optional(constraints));
}
TEST(ChunkLayoutTest, ApplyIndexTransformRankMismatch) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto constraints,
ChunkLayout::FromJson({
{"inner_order", {2, 1, 0}},
}));
EXPECT_THAT(constraints | tensorstore::IdentityTransform(2),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot transform constraints of rank 3 by index "
"transform of rank 2 -> 2"));
EXPECT_THAT(ApplyInverseIndexTransform(tensorstore::IdentityTransform(2),
constraints),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot transform constraints of rank 3 by index "
"transform of rank 2 -> 2"));
}
TEST(ChunkLayoutTest, ApplyIndexTransformUnknownRankNullTransform) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto constraints,
ChunkLayout::FromJson({
{"read_chunk", {{"elements", 42}}},
}));
EXPECT_THAT(constraints | tensorstore::IndexTransform<>(),
::testing::Optional(constraints));
EXPECT_THAT(
ApplyInverseIndexTransform(tensorstore::IndexTransform<>(), constraints),
::testing::Optional(constraints));
}
TEST(ChunkLayoutTest, InnerOrder) {
ChunkLayout constraints;
EXPECT_FALSE(constraints.inner_order().valid());
EXPECT_FALSE(constraints.inner_order().hard_constraint);
EXPECT_FALSE(constraints.inner_order().valid());
EXPECT_THAT(constraints.inner_order(), ::testing::ElementsAre());
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::InnerOrder({0, 2, 1}, false)));
EXPECT_EQ(3, constraints.rank());
EXPECT_FALSE(constraints.inner_order().hard_constraint);
EXPECT_THAT(constraints.inner_order(), ::testing::ElementsAre(0, 2, 1));
EXPECT_THAT(
constraints.Set(ChunkLayout::InnerOrder({0, 2, 2})),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error setting inner_order: Invalid permutation: \\{0, 2, 2\\}"));
EXPECT_THAT(
constraints.Set(
ChunkLayout::InnerOrder({0, 2, 2}, false)),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Error setting inner_order: Invalid permutation: \\{0, 2, 2\\}"));
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::InnerOrder({1, 2, 0}, false)));
EXPECT_FALSE(constraints.inner_order().hard_constraint);
EXPECT_THAT(constraints.inner_order(), ::testing::ElementsAre(0, 2, 1));
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::InnerOrder({2, 1, 0})));
EXPECT_TRUE(constraints.inner_order().hard_constraint);
EXPECT_THAT(constraints.inner_order(), ::testing::ElementsAre(2, 1, 0));
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::InnerOrder({2, 1, 0})));
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::InnerOrder({0, 2, 1}, false)));
EXPECT_TRUE(constraints.inner_order().hard_constraint);
EXPECT_THAT(constraints.inner_order(), ::testing::ElementsAre(2, 1, 0));
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::InnerOrder()));
EXPECT_THAT(
constraints.Set(ChunkLayout::InnerOrder({0, 1, 2})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting inner_order: "
"New hard constraint \\(\\{0, 1, 2\\}\\) does not match "
"existing hard constraint \\(\\{2, 1, 0\\}\\)"));
EXPECT_TRUE(constraints.inner_order().hard_constraint);
EXPECT_THAT(constraints.inner_order(), ::testing::ElementsAre(2, 1, 0));
}
TEST(ChunkLayoutTest, GridOrigin) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::GridOrigin(
{1, kImplicit, kImplicit}, false)));
EXPECT_EQ(3, constraints.rank());
EXPECT_THAT(constraints.grid_origin(),
::testing::ElementsAre(1, kImplicit, kImplicit));
EXPECT_EQ(0, constraints.grid_origin().hard_constraint.to_uint());
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::GridOrigin({2, 3, kImplicit}, false)));
EXPECT_THAT(constraints.grid_origin(),
::testing::ElementsAre(1, 3, kImplicit));
EXPECT_EQ(0, constraints.grid_origin().hard_constraint.to_uint());
EXPECT_THAT(constraints.Set(ChunkLayout::GridOrigin({kInfIndex, 2, 3})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting grid_origin: "
"Invalid value for dimension 0: .*"));
EXPECT_THAT(constraints.Set(
ChunkLayout::GridOrigin({2, 3}, false)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting grid_origin: "
"Rank 2 does not match existing rank 3"));
TENSORSTORE_ASSERT_OK(
constraints.Set(ChunkLayout::GridOrigin({kImplicit, 4, kImplicit})));
EXPECT_THAT(constraints.grid_origin(),
::testing::ElementsAre(1, 4, kImplicit));
EXPECT_EQ(0b10, constraints.grid_origin().hard_constraint.to_uint());
EXPECT_THAT(constraints.Set(ChunkLayout::GridOrigin({3, 5, kImplicit})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting grid_origin: "
"New hard constraint \\(5\\) for dimension 1 "
"does not match existing hard constraint \\(4\\)"));
EXPECT_THAT(constraints.grid_origin(),
::testing::ElementsAre(1, 4, kImplicit));
EXPECT_EQ(0b10, constraints.grid_origin().hard_constraint.to_uint());
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::GridOrigin({1, 4, 5})));
EXPECT_THAT(constraints.grid_origin(), ::testing::ElementsAre(1, 4, 5));
EXPECT_EQ(0b111, constraints.grid_origin().hard_constraint.to_uint());
}
TEST(ChunkLayoutTest, ReadChunkShape) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::ReadChunkShape({100, 0, 0}, false)));
EXPECT_EQ(3, constraints.rank());
EXPECT_THAT(constraints.read_chunk_shape(),
::testing::ElementsAre(100, 0, 0));
EXPECT_THAT(constraints.read_chunk().shape(),
::testing::ElementsAre(100, 0, 0));
EXPECT_EQ(0, constraints.read_chunk_shape().hard_constraint.to_uint());
EXPECT_EQ(0, constraints.read_chunk().shape().hard_constraint.to_uint());
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::ReadChunkShape({2, 300, 0}, false)));
EXPECT_THAT(constraints.read_chunk().shape(),
::testing::ElementsAre(100, 300, 0));
EXPECT_EQ(0, constraints.read_chunk_shape().hard_constraint.to_uint());
EXPECT_THAT(constraints.Set(ChunkLayout::ReadChunkShape({-5, 300, 3})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting read_chunk shape: "
"Invalid value for dimension 0: .*"));
EXPECT_THAT(constraints.Set(ChunkLayout::ReadChunkShape(
{2, 3}, false)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting read_chunk shape: "
"Rank 2 does not match existing rank 3"));
TENSORSTORE_ASSERT_OK(
constraints.Set(ChunkLayout::ReadChunkShape({0, 4, 0})));
EXPECT_THAT(constraints.read_chunk_shape(),
::testing::ElementsAre(100, 4, 0));
EXPECT_EQ(0b10, constraints.read_chunk_shape().hard_constraint.to_uint());
EXPECT_EQ(0b10, constraints.read_chunk().shape().hard_constraint.to_uint());
EXPECT_THAT(constraints.Set(ChunkLayout::ReadChunkShape({100, 5, 0})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting read_chunk shape: "
"New hard constraint \\(5\\) for dimension 1 "
"does not match existing hard constraint \\(4\\)"));
EXPECT_THAT(constraints.read_chunk_shape(),
::testing::ElementsAre(100, 4, 0));
EXPECT_EQ(0b10, constraints.read_chunk_shape().hard_constraint.to_uint());
TENSORSTORE_ASSERT_OK(
constraints.Set(ChunkLayout::ReadChunkShape({100, 4, 5})));
EXPECT_THAT(constraints.read_chunk_shape(),
::testing::ElementsAre(100, 4, 5));
EXPECT_EQ(0b111, constraints.read_chunk_shape().hard_constraint.to_uint());
}
TEST(ChunkLayoutTest, WriteChunkShape) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::WriteChunkShape({100, 0, 0}, false)));
EXPECT_EQ(3, constraints.rank());
EXPECT_THAT(constraints.write_chunk_shape(),
::testing::ElementsAre(100, 0, 0));
EXPECT_THAT(constraints.write_chunk().shape(),
::testing::ElementsAre(100, 0, 0));
EXPECT_EQ(0, constraints.write_chunk_shape().hard_constraint.to_uint());
EXPECT_EQ(0, constraints.write_chunk().shape().hard_constraint.to_uint());
}
TEST(ChunkLayoutTest, ReadChunkAspectRatio) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::ReadChunkAspectRatio({2, 0, 0}, false)));
EXPECT_EQ(3, constraints.rank());
EXPECT_THAT(constraints.read_chunk_aspect_ratio(),
::testing::ElementsAre(2, 0, 0));
EXPECT_THAT(constraints.read_chunk().aspect_ratio(),
::testing::ElementsAre(2, 0, 0));
EXPECT_EQ(0, constraints.read_chunk_aspect_ratio().hard_constraint.to_uint());
EXPECT_EQ(0,
constraints.read_chunk().aspect_ratio().hard_constraint.to_uint());
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::ReadChunkAspectRatio(
{3, 1.5, 0}, false)));
EXPECT_THAT(constraints.read_chunk().aspect_ratio(),
::testing::ElementsAre(2, 1.5, 0));
EXPECT_EQ(0, constraints.read_chunk_aspect_ratio().hard_constraint.to_uint());
EXPECT_THAT(constraints.Set(ChunkLayout::ReadChunkAspectRatio({-5, 1.5, 3})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting read_chunk aspect_ratio: "
"Invalid value for dimension 0: .*"));
EXPECT_THAT(constraints.Set(ChunkLayout::ReadChunkAspectRatio(
{2, 3}, false)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting read_chunk aspect_ratio: "
"Rank 2 does not match existing rank 3"));
TENSORSTORE_ASSERT_OK(
constraints.Set(ChunkLayout::ReadChunkAspectRatio({0, 4, 0})));
EXPECT_THAT(constraints.read_chunk_aspect_ratio(),
::testing::ElementsAre(2, 4, 0));
EXPECT_EQ(0b10,
constraints.read_chunk_aspect_ratio().hard_constraint.to_uint());
EXPECT_EQ(0b10,
constraints.read_chunk().aspect_ratio().hard_constraint.to_uint());
EXPECT_THAT(constraints.Set(ChunkLayout::ReadChunkAspectRatio({2, 5, 0})),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting read_chunk aspect_ratio: "
"New hard constraint \\(5\\) for dimension 1 "
"does not match existing hard constraint \\(4\\)"));
EXPECT_THAT(constraints.read_chunk_aspect_ratio(),
::testing::ElementsAre(2, 4, 0));
EXPECT_EQ(0b10,
constraints.read_chunk_aspect_ratio().hard_constraint.to_uint());
TENSORSTORE_ASSERT_OK(
constraints.Set(ChunkLayout::ReadChunkAspectRatio({2, 4, 5})));
EXPECT_THAT(constraints.read_chunk_aspect_ratio(),
::testing::ElementsAre(2, 4, 5));
EXPECT_EQ(0b111,
constraints.read_chunk_aspect_ratio().hard_constraint.to_uint());
}
TEST(ChunkLayoutTest, WriteChunkAspectRatio) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::WriteChunkAspectRatio(
{2, 0, 0}, false)));
EXPECT_EQ(3, constraints.rank());
EXPECT_THAT(constraints.write_chunk_aspect_ratio(),
::testing::ElementsAre(2, 0, 0));
EXPECT_THAT(constraints.write_chunk().aspect_ratio(),
::testing::ElementsAre(2, 0, 0));
EXPECT_EQ(0,
constraints.write_chunk_aspect_ratio().hard_constraint.to_uint());
EXPECT_EQ(0,
constraints.write_chunk().aspect_ratio().hard_constraint.to_uint());
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::WriteChunkAspectRatio(
{3, 1.5, 0}, false)));
EXPECT_THAT(constraints.write_chunk().aspect_ratio(),
::testing::ElementsAre(2, 1.5, 0));
EXPECT_EQ(0,
constraints.write_chunk_aspect_ratio().hard_constraint.to_uint());
}
TEST(ChunkLayoutTest, CodecChunkAspectRatio) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::CodecChunkAspectRatio(
{2, 0, 0}, false)));
EXPECT_EQ(3, constraints.rank());
EXPECT_THAT(constraints.codec_chunk_aspect_ratio(),
::testing::ElementsAre(2, 0, 0));
EXPECT_THAT(constraints.codec_chunk().aspect_ratio(),
::testing::ElementsAre(2, 0, 0));
EXPECT_EQ(0,
constraints.codec_chunk_aspect_ratio().hard_constraint.to_uint());
EXPECT_EQ(0,
constraints.codec_chunk().aspect_ratio().hard_constraint.to_uint());
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::CodecChunkAspectRatio(
{3, 1.5, 0}, false)));
EXPECT_THAT(constraints.codec_chunk().aspect_ratio(),
::testing::ElementsAre(2, 1.5, 0));
EXPECT_EQ(0,
constraints.codec_chunk_aspect_ratio().hard_constraint.to_uint());
}
TEST(ChunkLayoutTest, ReadChunkElements) {
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::ReadChunkElements(kImplicit, false)));
EXPECT_EQ(kImplicit, constraints.read_chunk_elements());
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::ReadChunkElements(42, false)));
EXPECT_EQ(42, constraints.read_chunk_elements());
EXPECT_EQ(42, constraints.read_chunk().elements());
EXPECT_EQ(false, constraints.read_chunk_elements().hard_constraint);
EXPECT_EQ(false, constraints.read_chunk().elements().hard_constraint);
TENSORSTORE_ASSERT_OK(constraints.Set(
ChunkLayout::ReadChunkElements(43, false)));
EXPECT_EQ(42, constraints.read_chunk().elements());
EXPECT_EQ(false, constraints.read_chunk_elements().hard_constraint);
EXPECT_THAT(constraints.Set(ChunkLayout::ReadChunkElements(-5)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting read_chunk elements: "
"Invalid value: -5"));
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::ReadChunkElements(45)));
EXPECT_EQ(45, constraints.read_chunk_elements());
EXPECT_EQ(true, constraints.read_chunk_elements().hard_constraint);
EXPECT_EQ(true, constraints.read_chunk().elements().hard_constraint);
EXPECT_THAT(
constraints.Set(ChunkLayout::ReadChunkElements(46)),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Error setting read_chunk elements: "
"New hard constraint \\(46\\) "
"does not match existing hard constraint \\(45\\)"));
EXPECT_EQ(45, constraints.read_chunk_elements());
EXPECT_EQ(true, constraints.read_chunk_elements().hard_constraint);
TENSORSTORE_ASSERT_OK(constraints.Set(ChunkLayout::ReadChunkElements(45)));
EXPECT_EQ(45, constraints.read_chunk_elements());
EXPECT_EQ(true, constraints.read_chunk_elements().hard_constraint);
}
TEST(ChunkLayoutTest, SetPreciseChunkLayout) {
::nlohmann::json layout_json{
{"inner_order", {0, 1, 2}},
{"grid_origin", {1, 2, 3}},
{"write_chunk", {{"shape", {100, 200, 300}}}},
{"read_chunk", {{"shape", {10, 20, 30}}}},
{"codec_chunk", {{"shape", {4, 5, 6}}}},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto layout,
ChunkLayout::FromJson(layout_json));
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(layout));
EXPECT_THAT(constraints.ToJson(),
::testing::Optional(MatchesJson(layout_json)));
EXPECT_EQ(ChunkLayout(layout), constraints);
}
TEST(ChunkLayoutTest, SetPreciseChunkLayoutAsSoftConstraints) {
::nlohmann::json layout_json{
{"inner_order", {0, 1, 2}},
{"grid_origin", {1, 2, 3}},
{"write_chunk", {{"shape", {100, 200, 300}}}},
{"read_chunk", {{"shape", {10, 20, 30}}}},
{"codec_chunk", {{"shape", {4, 5, 6}}}},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto layout,
ChunkLayout::FromJson(layout_json));
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(
constraints.Set(ChunkLayout(layout, false)));
EXPECT_THAT(constraints.ToJson(),
::testing::Optional(MatchesJson({
{"inner_order_soft_constraint", {0, 1, 2}},
{"grid_origin_soft_constraint", {1, 2, 3}},
{"write_chunk", {{"shape_soft_constraint", {100, 200, 300}}}},
{"read_chunk", {{"shape_soft_constraint", {10, 20, 30}}}},
{"codec_chunk", {{"shape_soft_constraint", {4, 5, 6}}}},
})));
EXPECT_EQ(constraints, ChunkLayout(ChunkLayout(layout),
false));
ChunkLayout constraints2;
TENSORSTORE_ASSERT_OK(
constraints2.Set(ChunkLayout(layout, false)));
EXPECT_EQ(constraints, constraints2);
}
TEST(ChunkLayoutTest, SetChunkLayout) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto layout_a,
ChunkLayout::FromJson({
{"inner_order", {0, 1, 2}},
{"grid_origin_soft_constraint", {1, 2, 3}},
{"write_chunk",
{
{"shape_soft_constraint", {100, 200, 300}},
{"elements", 42},
}},
{"read_chunk",
{
{"shape", {nullptr, 20, 30}},
{"shape_soft_constraint", {100, nullptr, nullptr}},
{"elements_soft_constraint", 50},
}},
{"codec_chunk", {{"aspect_ratio", {4, 5, 6}}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto layout_b, ChunkLayout::FromJson({
{"inner_order_soft_constraint", {2, 0, 1}},
{"grid_origin", {4, 5, 6}},
{"write_chunk",
{
{"shape", {200, 400, 900}},
{"elements", 42},
}},
{"read_chunk",
{
{"shape", {10, nullptr, 30}},
{"elements", 50},
}},
}));
ChunkLayout constraints;
TENSORSTORE_ASSERT_OK(constraints.Set(layout_a));
TENSORSTORE_ASSERT_OK(constraints.Set(layout_b));
EXPECT_THAT(constraints.ToJson(), ::testing::Optional(MatchesJson({
{"inner_order", {0, 1, 2}},
{"grid_origin", {4, 5, 6}},
{"write_chunk",
{
{"shape", {200, 400, 900}},
{"elements", 42},
}},
{"read_chunk",
{
{"shape", {10, 20, 30}},
{"elements", 50},
}},
{"codec_chunk",
{
{"aspect_ratio", {4, 5, 6}},
}},
})));
constraints = layout_a;
TENSORSTORE_ASSERT_OK(
constraints.Set(ChunkLayout(layout_b, false)));
EXPECT_THAT(constraints.ToJson(),
::testing::Optional(MatchesJson({
{"inner_order", {0, 1, 2}},
{"grid_origin_soft_constraint", {1, 2, 3}},
{"write_chunk",
{
{"shape_soft_constraint", {100, 200, 300}},
{"elements", 42},
}},
{"read_chunk",
{
{"shape", {nullptr, 20, 30}},
{"shape_soft_constraint", {100, nullptr, nullptr}},
{"elements_soft_constraint", 50},
}},
{"codec_chunk",
{
{"aspect_ratio", {4, 5, 6}},
}},
})));
}
TEST(ChunkLayoutTest, CopyOnWriteWithRankSet) {
ChunkLayout a;
TENSORSTORE_ASSERT_OK(a.Set(ChunkLayout::InnerOrder({0, 1, 2})));
EXPECT_THAT(a.ToJson(),
::testing::Optional(MatchesJson({{"inner_order", {0, 1, 2}}})));
ChunkLayout b = a;
TENSORSTORE_ASSERT_OK(b.Set(ChunkLayout::GridOrigin({1, 2, 3})));
EXPECT_THAT(a.ToJson(),
::testing::Optional(MatchesJson({{"inner_order", {0, 1, 2}}})));
EXPECT_THAT(b.ToJson(), ::testing::Optional(MatchesJson({
{"inner_order", {0, 1, 2}},
{"grid_origin", {1, 2, 3}},
})));
}
TEST(ChunkLayoutTest, CopyOnWriteWithRankNotSet) {
ChunkLayout a;
TENSORSTORE_ASSERT_OK(a.Set(ChunkLayout::ReadChunkElements(5)));
EXPECT_THAT(
a.ToJson(),
::testing::Optional(MatchesJson({{"read_chunk", {{"elements", 5}}}})));
ChunkLayout b = a;
TENSORSTORE_ASSERT_OK(b.Set(ChunkLayout::GridOrigin({1, 2, 3})));
EXPECT_THAT(
a.ToJson(),
::testing::Optional(MatchesJson({{"read_chunk", {{"elements", 5}}}})));
EXPECT_THAT(b.ToJson(), ::testing::Optional(MatchesJson({
{"read_chunk", {{"elements", 5}}},
{"grid_origin", {1, 2, 3}},
})));
}
TEST(ChunkLayoutTest, Ostream) {
ChunkLayout a;
EXPECT_EQ("{}", tensorstore::StrCat(a));
}
TEST(ChooseChunkGridTest, Rank0) {
Box box(0);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{}, ChunkLayout::GridView(), BoxView(0), box));
}
TEST(ChooseChunkGridTest, Rank1Unconstrained) {
Box box(1);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{}, ChunkLayout::GridView(), BoxView(1), box));
EXPECT_EQ(Box<1>({1024 * 1024}), box);
}
TEST(ChooseChunkGridTest, Rank2Unconstrained) {
Box box(2);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{}, ChunkLayout::GridView(), BoxView(2), box));
EXPECT_EQ(Box({1024, 1024}), box);
}
TEST(ChooseChunkGridTest, Rank3Unconstrained) {
Box box(3);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{}, ChunkLayout::GridView(), BoxView(3), box));
EXPECT_EQ(Box({101, 101, 101}), box);
}
TEST(ChooseChunkGridTest, Rank4Unconstrained) {
Box box(4);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{}, ChunkLayout::GridView(), BoxView(4), box));
EXPECT_EQ(Box({32, 32, 32, 32}), box);
}
TEST(ChooseChunkGridTest, Rank1ElementsConstrained) {
Box box(1);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
tensorstore::span<const Index>({42}),
ChunkLayout::GridView(ChunkLayout::ChunkElementsBase(9000)), BoxView(1),
box));
EXPECT_EQ(Box<1>({42}, {9000}), box);
}
TEST(ChooseChunkGridTest, Rank1ShapeConstrained) {
Box box(1);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
tensorstore::span<const Index>({42}),
ChunkLayout::GridView(ChunkLayout::ChunkShape({55})), BoxView(1), box));
EXPECT_EQ(Box<1>({42}, {55}), box);
}
TEST(ChooseChunkGridTest, Rank1ShapeFullExtent) {
Box box(1);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
tensorstore::span<const Index>({42}),
ChunkLayout::GridView(
ChunkLayout::ChunkShape({-1}), ChunkLayout::ChunkAspectRatio(),
ChunkLayout::ChunkElements(10)),
BoxView<1>({100}), box));
EXPECT_EQ(Box<1>({42}, {100}), box);
EXPECT_THAT(
ChooseChunkGrid(
tensorstore::span<const Index>({42}),
ChunkLayout::GridView(
ChunkLayout::ChunkShape({-1}), ChunkLayout::ChunkAspectRatio(),
ChunkLayout::ChunkElements(10)),
BoxView(1), box),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Cannot match chunk size for dimension 0 to "
"unbounded domain \\(-inf, \\+inf\\)"));
}
TEST(ChooseChunkGridTest, Rank1BoundedDomain) {
Box box(1);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkElementsBase(9000)),
BoxView<1>({42}, {1000}), box));
EXPECT_EQ(Box<1>({42}, {1000}), box);
}
TEST(ChunkLayoutTest, ChooseChunkGridRank1BoundedDomainOriginConstrained) {
Box box(1);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
tensorstore::span<const Index>({45}),
ChunkLayout::GridView(ChunkLayout::ChunkElementsBase(9000)),
BoxView<1>({42}, {1000}), box));
EXPECT_EQ(Box<1>({45}, {1000}), box);
}
TEST(ChooseChunkGridTest, Rank2AspectRatio) {
Box box(2);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkShape(),
ChunkLayout::ChunkAspectRatio({1.0, 2.0}),
ChunkLayout::ChunkElements(200)),
BoxView(2), box));
EXPECT_EQ(Box({10, 20}), box);
}
TEST(ChooseChunkGridTest, Rank3AspectRatio) {
Box box(3);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkShape(),
ChunkLayout::ChunkAspectRatio({1.0, 0, 2.0}),
ChunkLayout::ChunkElements(2000)),
BoxView(3), box));
EXPECT_EQ(Box({10, 10, 20}), box);
}
TEST(ChooseChunkGridTest, Rank3AspectRatioWithChunkShapeConstraint) {
Box box(3);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkShape({0, 1, 0}),
ChunkLayout::ChunkAspectRatio({1.0, 0, 2.0}),
ChunkLayout::ChunkElements(200)),
BoxView(3), box));
EXPECT_EQ(Box({10, 1, 20}), box);
}
TEST(ChooseChunkGridTest, Rank3AspectRatioLarge1) {
Box box(3);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkShape(),
ChunkLayout::ChunkAspectRatio({1.0, 1.0, 1e30}),
ChunkLayout::ChunkElements(200)),
BoxView(3), box));
EXPECT_EQ(Box({1, 1, 200}), box);
}
TEST(ChooseChunkGridTest, Rank3AspectRatioLarge2) {
Box box(3);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkShape(),
ChunkLayout::ChunkAspectRatio({1.0, 1e30, 1e30}),
ChunkLayout::ChunkElements(100)),
BoxView(3), box));
EXPECT_EQ(Box({1, 10, 10}), box);
}
TEST(ChooseChunkGridTest, Rank3AspectRatioLarge3) {
Box box(3);
TENSORSTORE_ASSERT_OK(ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkShape(),
ChunkLayout::ChunkAspectRatio({1.0, 1e30, 1e30}),
ChunkLayout::ChunkElements(Index(1) << 40)),
BoxView(3), box));
EXPECT_EQ(Box({1, Index(1) << 20, Index(1) << 20}), box);
}
TEST(ChooseChunkGridTest, GridOriginRankMismatch) {
Box box(3);
EXPECT_THAT(
ChooseChunkGrid(
tensorstore::span<const Index>({1, 2}),
ChunkLayout::GridView(), BoxView(3), box),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Rank of constraints \\(2\\) does not match rank of domain \\(3\\)"));
}
TEST(ChooseChunkGridTest, ShapeConstraintRankMismatch) {
Box box(3);
EXPECT_THAT(
ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkShape({1, 2})), BoxView(3),
box),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Rank of constraints \\(2\\) does not match rank of domain \\(3\\)"));
}
TEST(ChooseChunkGridTest, AspectRatioConstraintRankMismatch) {
Box box(3);
EXPECT_THAT(
ChooseChunkGrid(
{},
ChunkLayout::GridView(ChunkLayout::ChunkAspectRatio({1, 2})),
BoxView(3), box),
MatchesStatus(
absl::StatusCode::kInvalidArgument,
"Rank of constraints \\(2\\) does not match rank of domain \\(3\\)"));
}
TEST(ChunkLayoutGridTest, Basic) {
ChunkLayout::Grid grid;
TENSORSTORE_EXPECT_OK(grid.Set(ChunkLayout::ChunkShape({10, 11})));
EXPECT_EQ(2, grid.rank());
EXPECT_THAT(grid.shape(), ::testing::ElementsAre(10, 11));
}
TEST(ChunkLayoutGridTest, Json) {
tensorstore::TestJsonBinderRoundTripJsonOnly<ChunkLayout::Grid>(
{
{
{"shape", {10, 11}},
{"aspect_ratio", {2, nullptr}},
{"aspect_ratio_soft_constraint", {nullptr, 3}},
{"elements_soft_constraint", 10000},
},
},
tensorstore::internal_json_binding::DefaultBinder<>,
tensorstore::IncludeDefaults{false});
}
TEST(ChunkLayoutSerializationTest, SerializationRoundTrip) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto chunk_layout,
ChunkLayout::FromJson({
{"grid_origin", {nullptr, nullptr, 3}},
{"grid_origin_soft_constraint", {4, nullptr, nullptr}},
{"write_chunk",
{{"elements_soft_constraint", 1000}, {"shape", {5, nullptr, 6}}}},
{"read_chunk",
{{"elements", 100},
{"shape_soft_constraint", {nullptr, 10, nullptr}},
{"aspect_ratio", {nullptr, 1, 2}}}},
{"codec_chunk", {{"aspect_ratio_soft_constraint", {nullptr, 2, 1}}}},
{"inner_order", {2, 1, 0}},
}));
tensorstore::serialization::TestSerializationRoundTrip(chunk_layout);
}
TEST(ChooseChunkShapeTest, Elements) {
Index chunk_shape[kMaxRank] = {0};
TENSORSTORE_ASSERT_OK(ChooseChunkShape(
ChunkLayout::GridView(
ChunkLayout::ChunkElementsBase(1000, false)),
BoxView({0, 0, 0}, {2000, 2000, 2000}),
tensorstore::span<Index>(chunk_shape, 3)));
EXPECT_THAT(tensorstore::span<Index>(chunk_shape, 3),
testing::ElementsAre(10, 10, 10));
TENSORSTORE_ASSERT_OK(ChooseChunkShape(
ChunkLayout::GridView(
ChunkLayout::ChunkElementsBase(1000, true)),
BoxView({0, 0, 0}, {2000, 2000, 1}),
tensorstore::span<Index>(chunk_shape, 3)));
EXPECT_THAT(tensorstore::span<Index>(chunk_shape, 3),
testing::ElementsAre(31, 31, 1));
}
TEST(ChooseChunkShapeTest, AspectRatio) {
Index chunk_shape[kMaxRank] = {0};
TENSORSTORE_ASSERT_OK(ChooseChunkShape(
ChunkLayout::GridView(
ChunkLayout::ChunkAspectRatioBase({3, 2, 1}, true)),
BoxView({0, 0, 0}, {2000, 2000, 2000}),
tensorstore::span<Index>(chunk_shape, 3)));
EXPECT_THAT(tensorstore::span<Index>(chunk_shape, 3),
testing::ElementsAre(167, 111, 55));
TENSORSTORE_ASSERT_OK(ChooseChunkShape(
ChunkLayout::GridView(
ChunkLayout::ChunkAspectRatioBase({3, 2, 1}, false)),
BoxView({0, 0, 0}, {2000, 2000, 1}),
tensorstore::span<Index>(chunk_shape, 3)));
EXPECT_THAT(tensorstore::span<Index>(chunk_shape, 3),
testing::ElementsAre(1254, 836, 1));
}
TEST(ChooseChunkShapeTest, Shape) {
Index chunk_shape[kMaxRank] = {0};
TENSORSTORE_ASSERT_OK(ChooseChunkShape(
ChunkLayout::GridView(
ChunkLayout::ChunkShapeBase({30, 20, 10}, false)),
BoxView({0, 0, 0}, {2000, 2000, 2000}),
tensorstore::span<Index>(chunk_shape, 3)));
EXPECT_THAT(tensorstore::span<Index>(chunk_shape, 3),
testing::ElementsAre(30, 20, 10));
TENSORSTORE_ASSERT_OK(ChooseChunkShape(
ChunkLayout::GridView(
ChunkLayout::ChunkShapeBase({30, 20, 10}, false)),
BoxView({0, 0, 0}, {2000, 2000, 1}),
tensorstore::span<Index>(chunk_shape, 3)));
EXPECT_THAT(tensorstore::span<Index>(chunk_shape, 3),
testing::ElementsAre(30, 20, 10));
}
TEST(ChooseReadWriteChunkShapesTest, Unconstrained) {
Index read_chunk_shape[3];
Index write_chunk_shape[3];
TENSORSTORE_ASSERT_OK(ChooseReadWriteChunkShapes(
ChunkLayout::GridView(),
ChunkLayout::GridView(),
BoxView(3), read_chunk_shape, write_chunk_shape));
EXPECT_THAT(read_chunk_shape, ::testing::ElementsAre(101, 101, 101));
EXPECT_THAT(write_chunk_shape, ::testing::ElementsAre(101, 101, 101));
}
TEST(ChooseReadWriteChunkShapesTest, ShapeNonMultiple) {
Index read_chunk_shape[4];
Index write_chunk_shape[4];
TENSORSTORE_ASSERT_OK(ChooseReadWriteChunkShapes(
ChunkLayout::GridView(ChunkLayout::ChunkShapeBase(
{5, 11, 20, 8}, DimensionSet::FromBools({false, false, true, true}))),
ChunkLayout::GridView(ChunkLayout::ChunkShapeBase(
{6, 30, 41, 16},
DimensionSet::FromBools({false, true, false, true}))),
BoxView(4), read_chunk_shape, write_chunk_shape));
EXPECT_THAT(read_chunk_shape, ::testing::ElementsAre(6, 10, 20, 8));
EXPECT_THAT(write_chunk_shape, ::testing::ElementsAre(6, 30, 40, 16));
}
TEST(ChooseReadWriteChunkShapesTest, ShapeIncompatible) {
Index read_chunk_shape[1];
Index write_chunk_shape[1];
EXPECT_THAT(ChooseReadWriteChunkShapes(
ChunkLayout::GridView(
ChunkLayout::ChunkShapeBase({5}, true)),
ChunkLayout::GridView(ChunkLayout::ChunkShapeBase({6}, true)),
BoxView(1), read_chunk_shape, write_chunk_shape),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Incompatible chunk size constraints for dimension "
"0: read size of 5, write size of 6"));
}
TEST(ChooseReadWriteChunkShapesTest, ReadShapeConstrained) {
Index read_chunk_shape[2];
Index write_chunk_shape[2];
TENSORSTORE_ASSERT_OK(ChooseReadWriteChunkShapes(
ChunkLayout::GridView(
ChunkLayout::ChunkShapeBase({5, 7})),
ChunkLayout::GridView(ChunkLayout::ChunkElementsBase(100)),
BoxView(2), read_chunk_shape, write_chunk_shape));
EXPECT_THAT(read_chunk_shape, ::testing::ElementsAre(5, 7));
EXPECT_THAT(write_chunk_shape, ::testing::ElementsAre(10, 7));
}
TEST(ChooseReadWriteChunkShapesTest, WriteShapeConstrained) {
Index read_chunk_shape[2];
Index write_chunk_shape[2];
TENSORSTORE_ASSERT_OK(ChooseReadWriteChunkShapes(
ChunkLayout::GridView(
ChunkLayout::ChunkElementsBase(36)),
ChunkLayout::GridView(ChunkLayout::ChunkShapeBase({10, 14})),
BoxView(2), read_chunk_shape, write_chunk_shape));
EXPECT_THAT(read_chunk_shape, ::testing::ElementsAre(5, 7));
EXPECT_THAT(write_chunk_shape, ::testing::ElementsAre(10, 14));
}
TEST(HasHardConstraints, Basic) {
ChunkLayout layout;
EXPECT_FALSE(layout.HasHardConstraints());
TENSORSTORE_ASSERT_OK(layout.Set(tensorstore::RankConstraint{2}));
EXPECT_FALSE(layout.HasHardConstraints());
{
auto layout1 = layout;
TENSORSTORE_ASSERT_OK(layout1.Set(
tensorstore::ChunkLayout::InnerOrder({0, 1},
false)));
EXPECT_FALSE(layout1.HasHardConstraints());
}
{
auto layout1 = layout;
TENSORSTORE_ASSERT_OK(layout1.Set(
tensorstore::ChunkLayout::InnerOrder({0, 1},
true)));
EXPECT_TRUE(layout1.HasHardConstraints());
}
{
auto layout1 = layout;
TENSORSTORE_ASSERT_OK(layout1.Set(
tensorstore::ChunkLayout::GridOrigin({100, 200},
false)));
EXPECT_FALSE(layout1.HasHardConstraints());
}
{
auto layout1 = layout;
TENSORSTORE_ASSERT_OK(
layout1.Set(tensorstore::ChunkLayout::GridOrigin({100, 200})));
EXPECT_TRUE(layout1.HasHardConstraints());
}
{
auto layout1 = layout;
TENSORSTORE_ASSERT_OK(layout1.Set(
tensorstore::ChunkLayout::ReadChunkShape({100, 200},
false)));
EXPECT_FALSE(layout1.HasHardConstraints());
}
{
auto layout1 = layout;
TENSORSTORE_ASSERT_OK(
layout1.Set(tensorstore::ChunkLayout::ReadChunkShape({100, 200})));
EXPECT_TRUE(layout1.HasHardConstraints());
}
{
auto layout1 = layout;
TENSORSTORE_ASSERT_OK(
layout1.Set(tensorstore::ChunkLayout::ReadChunkAspectRatio({1, 1})));
EXPECT_FALSE(layout1.HasHardConstraints());
}
{
auto layout1 = layout;
TENSORSTORE_ASSERT_OK(
layout1.Set(tensorstore::ChunkLayout::ReadChunkElements(200)));
EXPECT_FALSE(layout1.HasHardConstraints());
}
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/chunk_layout.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/chunk_layout_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
00a55dbb-ab9f-464d-a180-889b68767656 | cpp | google/quiche | aes_128_gcm_12_encrypter | quiche/quic/core/crypto/aes_128_gcm_12_encrypter.cc | quiche/quic/core/crypto/aes_128_gcm_12_encrypter_test.cc | #include "quiche/quic/core/crypto/aes_128_gcm_12_encrypter.h"
#include "openssl/evp.h"
namespace quic {
namespace {
const size_t kKeySize = 16;
const size_t kNonceSize = 12;
}
Aes128Gcm12Encrypter::Aes128Gcm12Encrypter()
: AesBaseEncrypter(EVP_aead_aes_128_gcm, kKeySize, kAuthTagSize, kNonceSize,
false) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
Aes128Gcm12Encrypter::~Aes128Gcm12Encrypter() {}
} | #include "quiche/quic/core/crypto/aes_128_gcm_12_encrypter.h"
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestGroupInfo {
size_t key_len;
size_t iv_len;
size_t pt_len;
size_t aad_len;
size_t tag_len;
};
struct TestVector {
const char* key;
const char* iv;
const char* pt;
const char* aad;
const char* ct;
const char* tag;
};
const TestGroupInfo test_group_info[] = {
{128, 96, 0, 0, 128}, {128, 96, 0, 128, 128}, {128, 96, 128, 0, 128},
{128, 96, 408, 160, 128}, {128, 96, 408, 720, 128}, {128, 96, 104, 0, 128},
};
const TestVector test_group_0[] = {
{"11754cd72aec309bf52f7687212e8957", "3c819d9a9bed087615030b65", "", "", "",
"250327c674aaf477aef2675748cf6971"},
{"ca47248ac0b6f8372a97ac43508308ed", "ffd2b598feabc9019262d2be", "", "", "",
"60d20404af527d248d893ae495707d1a"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_1[] = {
{"77be63708971c4e240d1cb79e8d77feb", "e0e00f19fed7ba0136a797f3", "",
"7a43ec1d9c0a5a78a0b16533a6213cab", "",
"209fcc8d3675ed938e9c7166709dd946"},
{"7680c5d3ca6154758e510f4d25b98820", "f8f105f9c3df4965780321f8", "",
"c94c410194c765e3dcc7964379758ed3", "",
"94dca8edfcf90bb74b153c8d48a17930"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_2[] = {
{"7fddb57453c241d03efbed3ac44e371c", "ee283a3fc75575e33efd4887",
"d5de42b461646c255c87bd2962d3b9a2", "", "2ccda4a5415cb91e135c2a0f78c9b2fd",
"b36d1df9b9d5e596f83e8b7f52971cb3"},
{"ab72c77b97cb5fe9a382d9fe81ffdbed", "54cc7dc2c37ec006bcc6d1da",
"007c5e5b3e59df24a7c355584fc1518d", "", "0e1bde206a07a9c2c1b65300f8c64997",
"2b4401346697138c7a4891ee59867d0c"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_3[] = {
{"fe47fcce5fc32665d2ae399e4eec72ba", "5adb9609dbaeb58cbd6e7275",
"7c0e88c88899a779228465074797cd4c2e1498d259b54390b85e3eef1c02df60e743f1"
"b840382c4bccaf3bafb4ca8429bea063",
"88319d6e1d3ffa5f987199166c8a9b56c2aeba5a",
"98f4826f05a265e6dd2be82db241c0fbbbf9ffb1c173aa83964b7cf539304373636525"
"3ddbc5db8778371495da76d269e5db3e",
"291ef1982e4defedaa2249f898556b47"},
{"ec0c2ba17aa95cd6afffe949da9cc3a8", "296bce5b50b7d66096d627ef",
"b85b3753535b825cbe5f632c0b843c741351f18aa484281aebec2f45bb9eea2d79d987"
"b764b9611f6c0f8641843d5d58f3a242",
"f8d00f05d22bf68599bcdeb131292ad6e2df5d14",
"a7443d31c26bdf2a1c945e29ee4bd344a99cfaf3aa71f8b3f191f83c2adfc7a0716299"
"5506fde6309ffc19e716eddf1a828c5a",
"890147971946b627c40016da1ecf3e77"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_4[] = {
{"2c1f21cf0f6fb3661943155c3e3d8492", "23cb5ff362e22426984d1907",
"42f758836986954db44bf37c6ef5e4ac0adaf38f27252a1b82d02ea949c8a1a2dbc0d6"
"8b5615ba7c1220ff6510e259f06655d8",
"5d3624879d35e46849953e45a32a624d6a6c536ed9857c613b572b0333e701557a713e"
"3f010ecdf9a6bd6c9e3e44b065208645aff4aabee611b391528514170084ccf587177f"
"4488f33cfb5e979e42b6e1cfc0a60238982a7aec",
"81824f0e0d523db30d3da369fdc0d60894c7a0a20646dd015073ad2732bd989b14a222"
"b6ad57af43e1895df9dca2a5344a62cc",
"57a3ee28136e94c74838997ae9823f3a"},
{"d9f7d2411091f947b4d6f1e2d1f0fb2e", "e1934f5db57cc983e6b180e7",
"73ed042327f70fe9c572a61545eda8b2a0c6e1d6c291ef19248e973aee6c312012f490"
"c2c6f6166f4a59431e182663fcaea05a",
"0a8a18a7150e940c3d87b38e73baee9a5c049ee21795663e264b694a949822b639092d"
"0e67015e86363583fcf0ca645af9f43375f05fdb4ce84f411dcbca73c2220dea03a201"
"15d2e51398344b16bee1ed7c499b353d6c597af8",
"aaadbd5c92e9151ce3db7210b8714126b73e43436d242677afa50384f2149b831f1d57"
"3c7891c2a91fbc48db29967ec9542b23",
"21b51ca862cb637cdd03b99a0f93b134"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_5[] = {
{"fe9bb47deb3a61e423c2231841cfd1fb", "4d328eb776f500a2f7fb47aa",
"f1cc3818e421876bb6b8bbd6c9", "", "b88c5c1977b35b517b0aeae967",
"43fd4727fe5cdb4b5b42818dea7ef8c9"},
{"6703df3701a7f54911ca72e24dca046a", "12823ab601c350ea4bc2488c",
"793cd125b0b84a043e3ac67717", "", "b2051c80014f42f08735a7b0cd",
"38e6bcd29962e5f2c13626b85a877101"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector* const test_group_array[] = {
test_group_0, test_group_1, test_group_2,
test_group_3, test_group_4, test_group_5,
};
}
namespace quic {
namespace test {
QuicData* EncryptWithNonce(Aes128Gcm12Encrypter* encrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view plaintext) {
size_t ciphertext_size = encrypter->GetCiphertextSize(plaintext.length());
std::unique_ptr<char[]> ciphertext(new char[ciphertext_size]);
if (!encrypter->Encrypt(nonce, associated_data, plaintext,
reinterpret_cast<unsigned char*>(ciphertext.get()))) {
return nullptr;
}
return new QuicData(ciphertext.release(), ciphertext_size, true);
}
class Aes128Gcm12EncrypterTest : public QuicTest {};
TEST_F(Aes128Gcm12EncrypterTest, Encrypt) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(test_group_array); i++) {
SCOPED_TRACE(i);
const TestVector* test_vectors = test_group_array[i];
const TestGroupInfo& test_info = test_group_info[i];
for (size_t j = 0; test_vectors[j].key != nullptr; j++) {
std::string key;
std::string iv;
std::string pt;
std::string aad;
std::string ct;
std::string tag;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].pt, &pt));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].ct, &ct));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].tag, &tag));
EXPECT_EQ(test_info.key_len, key.length() * 8);
EXPECT_EQ(test_info.iv_len, iv.length() * 8);
EXPECT_EQ(test_info.pt_len, pt.length() * 8);
EXPECT_EQ(test_info.aad_len, aad.length() * 8);
EXPECT_EQ(test_info.pt_len, ct.length() * 8);
EXPECT_EQ(test_info.tag_len, tag.length() * 8);
Aes128Gcm12Encrypter encrypter;
ASSERT_TRUE(encrypter.SetKey(key));
std::unique_ptr<QuicData> encrypted(
EncryptWithNonce(&encrypter, iv,
aad.length() ? aad : absl::string_view(), pt));
ASSERT_TRUE(encrypted.get());
ASSERT_LE(static_cast<size_t>(Aes128Gcm12Encrypter::kAuthTagSize),
tag.length());
tag.resize(Aes128Gcm12Encrypter::kAuthTagSize);
ASSERT_EQ(ct.length() + tag.length(), encrypted->length());
quiche::test::CompareCharArraysWithHexError(
"ciphertext", encrypted->data(), ct.length(), ct.data(), ct.length());
quiche::test::CompareCharArraysWithHexError(
"authentication tag", encrypted->data() + ct.length(), tag.length(),
tag.data(), tag.length());
}
}
}
TEST_F(Aes128Gcm12EncrypterTest, GetMaxPlaintextSize) {
Aes128Gcm12Encrypter encrypter;
EXPECT_EQ(1000u, encrypter.GetMaxPlaintextSize(1012));
EXPECT_EQ(100u, encrypter.GetMaxPlaintextSize(112));
EXPECT_EQ(10u, encrypter.GetMaxPlaintextSize(22));
EXPECT_EQ(0u, encrypter.GetMaxPlaintextSize(11));
}
TEST_F(Aes128Gcm12EncrypterTest, GetCiphertextSize) {
Aes128Gcm12Encrypter encrypter;
EXPECT_EQ(1012u, encrypter.GetCiphertextSize(1000));
EXPECT_EQ(112u, encrypter.GetCiphertextSize(100));
EXPECT_EQ(22u, encrypter.GetCiphertextSize(10));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_128_gcm_12_encrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_128_gcm_12_encrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
b741e8e8-2745-4bf2-ba11-cb07d8f3a0b0 | cpp | abseil/abseil-cpp | charconv_parse | absl/strings/internal/charconv_parse.cc | absl/strings/internal/charconv_parse_test.cc | #include "absl/strings/internal/charconv_parse.h"
#include "absl/strings/charconv.h"
#include <cassert>
#include <cstdint>
#include <limits>
#include "absl/strings/internal/memutil.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace {
constexpr int kDecimalMantissaDigitsMax = 19;
static_assert(std::numeric_limits<uint64_t>::digits10 ==
kDecimalMantissaDigitsMax,
"(a) above");
static_assert(std::numeric_limits<double>::is_iec559, "IEEE double assumed");
static_assert(std::numeric_limits<double>::radix == 2, "IEEE double fact");
static_assert(std::numeric_limits<double>::digits == 53, "IEEE double fact");
static_assert(1000000000000000000u > (uint64_t{1} << (53 + 3)), "(b) above");
constexpr int kHexadecimalMantissaDigitsMax = 15;
constexpr int kGuaranteedHexadecimalMantissaBitPrecision =
4 * kHexadecimalMantissaDigitsMax - 3;
static_assert(kGuaranteedHexadecimalMantissaBitPrecision >
std::numeric_limits<double>::digits + 2,
"kHexadecimalMantissaDigitsMax too small");
constexpr int kDecimalExponentDigitsMax = 9;
static_assert(std::numeric_limits<int>::digits10 >= kDecimalExponentDigitsMax,
"int type too small");
constexpr int kDecimalDigitLimit = 50000000;
constexpr int kHexadecimalDigitLimit = kDecimalDigitLimit / 4;
static_assert(999999999 + 2 * kDecimalDigitLimit <
std::numeric_limits<int>::max(),
"int type too small");
static_assert(999999999 + 2 * (4 * kHexadecimalDigitLimit) <
std::numeric_limits<int>::max(),
"int type too small");
bool AllowExponent(chars_format flags) {
bool fixed = (flags & chars_format::fixed) == chars_format::fixed;
bool scientific =
(flags & chars_format::scientific) == chars_format::scientific;
return scientific || !fixed;
}
bool RequireExponent(chars_format flags) {
bool fixed = (flags & chars_format::fixed) == chars_format::fixed;
bool scientific =
(flags & chars_format::scientific) == chars_format::scientific;
return scientific && !fixed;
}
const int8_t kAsciiToInt[256] = {
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8,
9, -1, -1, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1, -1};
template <int base>
bool IsDigit(char ch);
template <int base>
unsigned ToDigit(char ch);
template <int base>
bool IsExponentCharacter(char ch);
template <int base>
constexpr int MantissaDigitsMax();
template <int base>
constexpr int DigitLimit();
template <int base>
constexpr int DigitMagnitude();
template <>
bool IsDigit<10>(char ch) {
return ch >= '0' && ch <= '9';
}
template <>
bool IsDigit<16>(char ch) {
return kAsciiToInt[static_cast<unsigned char>(ch)] >= 0;
}
template <>
unsigned ToDigit<10>(char ch) {
return static_cast<unsigned>(ch - '0');
}
template <>
unsigned ToDigit<16>(char ch) {
return static_cast<unsigned>(kAsciiToInt[static_cast<unsigned char>(ch)]);
}
template <>
bool IsExponentCharacter<10>(char ch) {
return ch == 'e' || ch == 'E';
}
template <>
bool IsExponentCharacter<16>(char ch) {
return ch == 'p' || ch == 'P';
}
template <>
constexpr int MantissaDigitsMax<10>() {
return kDecimalMantissaDigitsMax;
}
template <>
constexpr int MantissaDigitsMax<16>() {
return kHexadecimalMantissaDigitsMax;
}
template <>
constexpr int DigitLimit<10>() {
return kDecimalDigitLimit;
}
template <>
constexpr int DigitLimit<16>() {
return kHexadecimalDigitLimit;
}
template <>
constexpr int DigitMagnitude<10>() {
return 1;
}
template <>
constexpr int DigitMagnitude<16>() {
return 4;
}
template <int base, typename T>
int ConsumeDigits(const char* begin, const char* end, int max_digits, T* out,
bool* dropped_nonzero_digit) {
if (base == 10) {
assert(max_digits <= std::numeric_limits<T>::digits10);
} else if (base == 16) {
assert(max_digits * 4 <= std::numeric_limits<T>::digits);
}
const char* const original_begin = begin;
while (!*out && end != begin && *begin == '0') ++begin;
T accumulator = *out;
const char* significant_digits_end =
(end - begin > max_digits) ? begin + max_digits : end;
while (begin < significant_digits_end && IsDigit<base>(*begin)) {
auto digit = static_cast<T>(ToDigit<base>(*begin));
assert(accumulator * base >= accumulator);
accumulator *= base;
assert(accumulator + digit >= accumulator);
accumulator += digit;
++begin;
}
bool dropped_nonzero = false;
while (begin < end && IsDigit<base>(*begin)) {
dropped_nonzero = dropped_nonzero || (*begin != '0');
++begin;
}
if (dropped_nonzero && dropped_nonzero_digit != nullptr) {
*dropped_nonzero_digit = true;
}
*out = accumulator;
return static_cast<int>(begin - original_begin);
}
bool IsNanChar(char v) {
return (v == '_') || (v >= '0' && v <= '9') || (v >= 'a' && v <= 'z') ||
(v >= 'A' && v <= 'Z');
}
bool ParseInfinityOrNan(const char* begin, const char* end,
strings_internal::ParsedFloat* out) {
if (end - begin < 3) {
return false;
}
switch (*begin) {
case 'i':
case 'I': {
if (strings_internal::memcasecmp(begin + 1, "nf", 2) != 0) {
return false;
}
out->type = strings_internal::FloatType::kInfinity;
if (end - begin >= 8 &&
strings_internal::memcasecmp(begin + 3, "inity", 5) == 0) {
out->end = begin + 8;
} else {
out->end = begin + 3;
}
return true;
}
case 'n':
case 'N': {
if (strings_internal::memcasecmp(begin + 1, "an", 2) != 0) {
return false;
}
out->type = strings_internal::FloatType::kNan;
out->end = begin + 3;
begin += 3;
if (begin < end && *begin == '(') {
const char* nan_begin = begin + 1;
while (nan_begin < end && IsNanChar(*nan_begin)) {
++nan_begin;
}
if (nan_begin < end && *nan_begin == ')') {
out->subrange_begin = begin + 1;
out->subrange_end = nan_begin;
out->end = nan_begin + 1;
}
}
return true;
}
default:
return false;
}
}
}
namespace strings_internal {
template <int base>
strings_internal::ParsedFloat ParseFloat(const char* begin, const char* end,
chars_format format_flags) {
strings_internal::ParsedFloat result;
if (begin == end) return result;
if (ParseInfinityOrNan(begin, end, &result)) {
return result;
}
const char* const mantissa_begin = begin;
while (begin < end && *begin == '0') {
++begin;
}
uint64_t mantissa = 0;
int exponent_adjustment = 0;
bool mantissa_is_inexact = false;
int pre_decimal_digits = ConsumeDigits<base>(
begin, end, MantissaDigitsMax<base>(), &mantissa, &mantissa_is_inexact);
begin += pre_decimal_digits;
int digits_left;
if (pre_decimal_digits >= DigitLimit<base>()) {
return result;
} else if (pre_decimal_digits > MantissaDigitsMax<base>()) {
exponent_adjustment =
static_cast<int>(pre_decimal_digits - MantissaDigitsMax<base>());
digits_left = 0;
} else {
digits_left =
static_cast<int>(MantissaDigitsMax<base>() - pre_decimal_digits);
}
if (begin < end && *begin == '.') {
++begin;
if (mantissa == 0) {
const char* begin_zeros = begin;
while (begin < end && *begin == '0') {
++begin;
}
int zeros_skipped = static_cast<int>(begin - begin_zeros);
if (zeros_skipped >= DigitLimit<base>()) {
return result;
}
exponent_adjustment -= static_cast<int>(zeros_skipped);
}
int post_decimal_digits = ConsumeDigits<base>(
begin, end, digits_left, &mantissa, &mantissa_is_inexact);
begin += post_decimal_digits;
if (post_decimal_digits >= DigitLimit<base>()) {
return result;
} else if (post_decimal_digits > digits_left) {
exponent_adjustment -= digits_left;
} else {
exponent_adjustment -= post_decimal_digits;
}
}
if (mantissa_begin == begin) {
return result;
}
if (begin - mantissa_begin == 1 && *mantissa_begin == '.') {
return result;
}
if (mantissa_is_inexact) {
if (base == 10) {
result.subrange_begin = mantissa_begin;
result.subrange_end = begin;
} else if (base == 16) {
mantissa |= 1;
}
}
result.mantissa = mantissa;
const char* const exponent_begin = begin;
result.literal_exponent = 0;
bool found_exponent = false;
if (AllowExponent(format_flags) && begin < end &&
IsExponentCharacter<base>(*begin)) {
bool negative_exponent = false;
++begin;
if (begin < end && *begin == '-') {
negative_exponent = true;
++begin;
} else if (begin < end && *begin == '+') {
++begin;
}
const char* const exponent_digits_begin = begin;
begin += ConsumeDigits<10>(begin, end, kDecimalExponentDigitsMax,
&result.literal_exponent, nullptr);
if (begin == exponent_digits_begin) {
found_exponent = false;
begin = exponent_begin;
} else {
found_exponent = true;
if (negative_exponent) {
result.literal_exponent = -result.literal_exponent;
}
}
}
if (!found_exponent && RequireExponent(format_flags)) {
return result;
}
result.type = strings_internal::FloatType::kNumber;
if (result.mantissa > 0) {
result.exponent = result.literal_exponent +
(DigitMagnitude<base>() * exponent_adjustment);
} else {
result.exponent = 0;
}
result.end = begin;
return result;
}
template ParsedFloat ParseFloat<10>(const char* begin, const char* end,
chars_format format_flags);
template ParsedFloat ParseFloat<16>(const char* begin, const char* end,
chars_format format_flags);
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/charconv_parse.h"
#include <string>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
using absl::chars_format;
using absl::strings_internal::FloatType;
using absl::strings_internal::ParsedFloat;
using absl::strings_internal::ParseFloat;
namespace {
template <int base>
void ExpectParsedFloat(std::string s, absl::chars_format format_flags,
FloatType expected_type, uint64_t expected_mantissa,
int expected_exponent,
int expected_literal_exponent = -999) {
SCOPED_TRACE(s);
int begin_subrange = -1;
int end_subrange = -1;
std::string::size_type open_bracket_pos = s.find('[');
if (open_bracket_pos != std::string::npos) {
begin_subrange = static_cast<int>(open_bracket_pos);
s.replace(open_bracket_pos, 1, "");
std::string::size_type close_bracket_pos = s.find(']');
CHECK_NE(close_bracket_pos, absl::string_view::npos)
<< "Test input contains [ without matching ]";
end_subrange = static_cast<int>(close_bracket_pos);
s.replace(close_bracket_pos, 1, "");
}
const std::string::size_type expected_characters_matched = s.find('$');
CHECK_NE(expected_characters_matched, std::string::npos)
<< "Input string must contain $";
s.replace(expected_characters_matched, 1, "");
ParsedFloat parsed =
ParseFloat<base>(s.data(), s.data() + s.size(), format_flags);
EXPECT_NE(parsed.end, nullptr);
if (parsed.end == nullptr) {
return;
}
EXPECT_EQ(parsed.type, expected_type);
if (begin_subrange == -1) {
EXPECT_EQ(parsed.subrange_begin, nullptr);
EXPECT_EQ(parsed.subrange_end, nullptr);
} else {
EXPECT_EQ(parsed.subrange_begin, s.data() + begin_subrange);
EXPECT_EQ(parsed.subrange_end, s.data() + end_subrange);
}
if (parsed.type == FloatType::kNumber) {
EXPECT_EQ(parsed.mantissa, expected_mantissa);
EXPECT_EQ(parsed.exponent, expected_exponent);
if (expected_literal_exponent != -999) {
EXPECT_EQ(parsed.literal_exponent, expected_literal_exponent);
}
}
auto characters_matched = static_cast<int>(parsed.end - s.data());
EXPECT_EQ(characters_matched, expected_characters_matched);
}
template <int base>
void ExpectNumber(std::string s, absl::chars_format format_flags,
uint64_t expected_mantissa, int expected_exponent,
int expected_literal_exponent = -999) {
ExpectParsedFloat<base>(std::move(s), format_flags, FloatType::kNumber,
expected_mantissa, expected_exponent,
expected_literal_exponent);
}
void ExpectSpecial(const std::string& s, absl::chars_format format_flags,
FloatType type) {
ExpectParsedFloat<10>(s, format_flags, type, 0, 0);
ExpectParsedFloat<16>(s, format_flags, type, 0, 0);
}
template <int base>
void ExpectFailedParse(absl::string_view s, absl::chars_format format_flags) {
ParsedFloat parsed =
ParseFloat<base>(s.data(), s.data() + s.size(), format_flags);
EXPECT_EQ(parsed.end, nullptr);
}
TEST(ParseFloat, SimpleValue) {
ExpectNumber<10>("1.23456789e5$", chars_format::general, 123456789, -3);
ExpectNumber<10>("1.23456789e+5$", chars_format::general, 123456789, -3);
ExpectNumber<10>("1.23456789E5$", chars_format::general, 123456789, -3);
ExpectNumber<10>("1.23456789e05$", chars_format::general, 123456789, -3);
ExpectNumber<10>("123.456789e3$", chars_format::general, 123456789, -3);
ExpectNumber<10>("0.000123456789e9$", chars_format::general, 123456789, -3);
ExpectNumber<10>("123456.789$", chars_format::general, 123456789, -3);
ExpectNumber<10>("123456789e-3$", chars_format::general, 123456789, -3);
ExpectNumber<16>("1.234abcdefp28$", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<16>("1.234abcdefp+28$", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<16>("1.234ABCDEFp28$", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<16>("1.234AbCdEfP0028$", chars_format::general, 0x1234abcdef,
-8);
ExpectNumber<16>("123.4abcdefp20$", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<16>("0.0001234abcdefp44$", chars_format::general, 0x1234abcdef,
-8);
ExpectNumber<16>("1234abcd.ef$", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<16>("1234abcdefp-8$", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<10>("0001.2345678900e005$", chars_format::general, 12345678900,
-5);
ExpectNumber<16>("0001.234abcdef000p28$", chars_format::general,
0x1234abcdef000, -20);
ExpectNumber<10>("1.23456789e5$ ", chars_format::general, 123456789, -3);
ExpectNumber<10>("1.23456789e5$e5e5", chars_format::general, 123456789, -3);
ExpectNumber<10>("1.23456789e5$.25", chars_format::general, 123456789, -3);
ExpectNumber<10>("1.23456789e5$-", chars_format::general, 123456789, -3);
ExpectNumber<10>("1.23456789e5$PUPPERS!!!", chars_format::general, 123456789,
-3);
ExpectNumber<10>("123456.789$efghij", chars_format::general, 123456789, -3);
ExpectNumber<10>("123456.789$e", chars_format::general, 123456789, -3);
ExpectNumber<10>("123456.789$p5", chars_format::general, 123456789, -3);
ExpectNumber<10>("123456.789$.10", chars_format::general, 123456789, -3);
ExpectNumber<16>("1.234abcdefp28$ ", chars_format::general, 0x1234abcdef,
-8);
ExpectNumber<16>("1.234abcdefp28$p28", chars_format::general, 0x1234abcdef,
-8);
ExpectNumber<16>("1.234abcdefp28$.125", chars_format::general, 0x1234abcdef,
-8);
ExpectNumber<16>("1.234abcdefp28$-", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<16>("1.234abcdefp28$KITTEHS!!!", chars_format::general,
0x1234abcdef, -8);
ExpectNumber<16>("1234abcd.ef$ghijk", chars_format::general, 0x1234abcdef,
-8);
ExpectNumber<16>("1234abcd.ef$p", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<16>("1234abcd.ef$.10", chars_format::general, 0x1234abcdef, -8);
ExpectNumber<10>("9999999999999999999$", chars_format::general,
9999999999999999999u, 0);
ExpectNumber<16>("fffffffffffffff$", chars_format::general,
0xfffffffffffffffu, 0);
ExpectNumber<10>("0$", chars_format::general, 0, 0);
ExpectNumber<16>("0$", chars_format::general, 0, 0);
ExpectNumber<10>("000000000000000000000000000000000000000$",
chars_format::general, 0, 0);
ExpectNumber<16>("000000000000000000000000000000000000000$",
chars_format::general, 0, 0);
ExpectNumber<10>("0000000000000000000000.000000000000000000$",
chars_format::general, 0, 0);
ExpectNumber<16>("0000000000000000000000.000000000000000000$",
chars_format::general, 0, 0);
ExpectNumber<10>("0.00000000000000000000000000000000e123456$",
chars_format::general, 0, 0);
ExpectNumber<16>("0.00000000000000000000000000000000p123456$",
chars_format::general, 0, 0);
}
TEST(ParseFloat, LargeDecimalMantissa) {
ExpectNumber<10>("100000000000000000000000000$", chars_format::general,
1000000000000000000,
8);
ExpectNumber<10>("123456789123456789100000000$", chars_format::general,
1234567891234567891,
8);
ExpectNumber<10>("[123456789123456789123456789]$", chars_format::general,
1234567891234567891,
8,
0);
ExpectNumber<10>("[123456789123456789100000009]$", chars_format::general,
1234567891234567891,
8,
0);
ExpectNumber<10>("[123456789123456789120000000]$", chars_format::general,
1234567891234567891,
8,
0);
ExpectNumber<10>("[00000000123456789123456789123456789]$",
chars_format::general, 1234567891234567891,
8,
0);
ExpectNumber<10>("00000000123456789123456789100000000$",
chars_format::general, 1234567891234567891,
8);
ExpectNumber<10>("1.234567891234567891e123$", chars_format::general,
1234567891234567891, 105);
ExpectNumber<10>("[1.23456789123456789123456789]e123$", chars_format::general,
1234567891234567891,
105,
123);
ExpectNumber<10>("[1999999999999999999999]$", chars_format::general,
1999999999999999999,
3,
0);
}
TEST(ParseFloat, LargeHexadecimalMantissa) {
ExpectNumber<16>("123456789abcdef123456789abcdef$", chars_format::general,
0x123456789abcdef, 60);
ExpectNumber<16>("000000123456789abcdef123456789abcdef$",
chars_format::general, 0x123456789abcdef, 60);
ExpectNumber<16>("1.23456789abcdefp100$", chars_format::general,
0x123456789abcdef, 44);
ExpectNumber<16>("1.23456789abcdef123456789abcdefp100$",
chars_format::general, 0x123456789abcdef, 44);
ExpectNumber<16>("123456789abcdee123456789abcdee$", chars_format::general,
0x123456789abcdef, 60);
ExpectNumber<16>("123456789abcdee000000000000001$", chars_format::general,
0x123456789abcdef, 60);
ExpectNumber<16>("123456789abcdee000000000000000$", chars_format::general,
0x123456789abcdee, 60);
}
TEST(ParseFloat, ScientificVsFixed) {
ExpectNumber<10>("1.23456789$e5", chars_format::fixed, 123456789, -8);
ExpectNumber<10>("123456.789$", chars_format::fixed, 123456789, -3);
ExpectNumber<16>("1.234abcdef$p28", chars_format::fixed, 0x1234abcdef, -36);
ExpectNumber<16>("1234abcd.ef$", chars_format::fixed, 0x1234abcdef, -8);
ExpectNumber<10>("1.23456789e5$", chars_format::scientific, 123456789, -3);
ExpectFailedParse<10>("-123456.789$", chars_format::scientific);
ExpectNumber<16>("1.234abcdefp28$", chars_format::scientific, 0x1234abcdef,
-8);
ExpectFailedParse<16>("1234abcd.ef$", chars_format::scientific);
}
TEST(ParseFloat, Infinity) {
ExpectFailedParse<10>("in", chars_format::general);
ExpectFailedParse<16>("in", chars_format::general);
ExpectFailedParse<10>("inx", chars_format::general);
ExpectFailedParse<16>("inx", chars_format::general);
ExpectSpecial("inf$", chars_format::general, FloatType::kInfinity);
ExpectSpecial("Inf$", chars_format::general, FloatType::kInfinity);
ExpectSpecial("INF$", chars_format::general, FloatType::kInfinity);
ExpectSpecial("inf$inite", chars_format::general, FloatType::kInfinity);
ExpectSpecial("iNfInItY$", chars_format::general, FloatType::kInfinity);
ExpectSpecial("infinity$!!!", chars_format::general, FloatType::kInfinity);
}
TEST(ParseFloat, NaN) {
ExpectFailedParse<10>("na", chars_format::general);
ExpectFailedParse<16>("na", chars_format::general);
ExpectFailedParse<10>("nah", chars_format::general);
ExpectFailedParse<16>("nah", chars_format::general);
ExpectSpecial("nan$", chars_format::general, FloatType::kNan);
ExpectSpecial("NaN$", chars_format::general, FloatType::kNan);
ExpectSpecial("nAn$", chars_format::general, FloatType::kNan);
ExpectSpecial("NAN$", chars_format::general, FloatType::kNan);
ExpectSpecial("NaN$aNaNaNaNaBatman!", chars_format::general, FloatType::kNan);
ExpectSpecial("nan([0xabcdef])$", chars_format::general, FloatType::kNan);
ExpectSpecial("nan([0xabcdef])$...", chars_format::general, FloatType::kNan);
ExpectSpecial("nan([0xabcdef])$)...", chars_format::general, FloatType::kNan);
ExpectSpecial("nan([])$", chars_format::general, FloatType::kNan);
ExpectSpecial("nan([aAzZ09_])$", chars_format::general, FloatType::kNan);
ExpectSpecial("nan$(bad-char)", chars_format::general, FloatType::kNan);
ExpectSpecial("nan$(0xabcdef", chars_format::general, FloatType::kNan);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/charconv_parse.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/charconv_parse_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
e4f7969a-884d-459f-a3f8-7c48d3440123 | cpp | google/cel-cpp | error_value | common/values/error_value.cc | common/values/error_value_test.cc | #include <string>
#include "absl/base/no_destructor.h"
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "common/json.h"
#include "common/type.h"
#include "common/value.h"
namespace cel {
namespace {
std::string ErrorDebugString(const absl::Status& value) {
ABSL_DCHECK(!value.ok()) << "use of moved-from ErrorValue";
return value.ToString(absl::StatusToStringMode::kWithEverything);
}
const absl::Status& DefaultErrorValue() {
static const absl::NoDestructor<absl::Status> value(
absl::UnknownError("unknown error"));
return *value;
}
}
ErrorValue::ErrorValue() : ErrorValue(DefaultErrorValue()) {}
ErrorValue NoSuchFieldError(absl::string_view field) {
return ErrorValue(absl::NotFoundError(
absl::StrCat("no_such_field", field.empty() ? "" : " : ", field)));
}
ErrorValue NoSuchKeyError(absl::string_view key) {
return ErrorValue(
absl::NotFoundError(absl::StrCat("Key not found in map : ", key)));
}
ErrorValue NoSuchTypeError(absl::string_view type) {
return ErrorValue(
absl::NotFoundError(absl::StrCat("type not found: ", type)));
}
ErrorValue DuplicateKeyError() {
return ErrorValue(absl::AlreadyExistsError("duplicate key in map"));
}
ErrorValue TypeConversionError(absl::string_view from, absl::string_view to) {
return ErrorValue(absl::InvalidArgumentError(
absl::StrCat("type conversion error from '", from, "' to '", to, "'")));
}
ErrorValue TypeConversionError(const Type& from, const Type& to) {
return TypeConversionError(from.DebugString(), to.DebugString());
}
bool IsNoSuchField(const ErrorValue& value) {
return absl::IsNotFound(value.NativeValue()) &&
absl::StartsWith(value.NativeValue().message(), "no_such_field");
}
bool IsNoSuchKey(const ErrorValue& value) {
return absl::IsNotFound(value.NativeValue()) &&
absl::StartsWith(value.NativeValue().message(),
"Key not found in map");
}
std::string ErrorValue::DebugString() const { return ErrorDebugString(value_); }
absl::Status ErrorValue::SerializeTo(AnyToJsonConverter&, absl::Cord&) const {
return absl::FailedPreconditionError(
absl::StrCat(GetTypeName(), " is unserializable"));
}
absl::StatusOr<Json> ErrorValue::ConvertToJson(AnyToJsonConverter&) const {
return absl::FailedPreconditionError(
absl::StrCat(GetTypeName(), " is not convertable to JSON"));
}
absl::Status ErrorValue::Equal(ValueManager&, const Value&,
Value& result) const {
result = BoolValue{false};
return absl::OkStatus();
}
} | #include <sstream>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/types/optional.h"
#include "common/casting.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::StatusIs;
using ::testing::_;
using ::testing::An;
using ::testing::IsEmpty;
using ::testing::Ne;
using ::testing::Not;
using ErrorValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(ErrorValueTest, Default) {
ErrorValue value;
EXPECT_THAT(value.NativeValue(), StatusIs(absl::StatusCode::kUnknown));
}
TEST_P(ErrorValueTest, OkStatus) {
EXPECT_DEBUG_DEATH(static_cast<void>(ErrorValue(absl::OkStatus())), _);
}
TEST_P(ErrorValueTest, Kind) {
EXPECT_EQ(ErrorValue(absl::CancelledError()).kind(), ErrorValue::kKind);
EXPECT_EQ(Value(ErrorValue(absl::CancelledError())).kind(),
ErrorValue::kKind);
}
TEST_P(ErrorValueTest, DebugString) {
{
std::ostringstream out;
out << ErrorValue(absl::CancelledError());
EXPECT_THAT(out.str(), Not(IsEmpty()));
}
{
std::ostringstream out;
out << Value(ErrorValue(absl::CancelledError()));
EXPECT_THAT(out.str(), Not(IsEmpty()));
}
}
TEST_P(ErrorValueTest, SerializeTo) {
absl::Cord value;
EXPECT_THAT(ErrorValue().SerializeTo(value_manager(), value),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(ErrorValueTest, ConvertToJson) {
EXPECT_THAT(ErrorValue().ConvertToJson(value_manager()),
StatusIs(absl::StatusCode::kFailedPrecondition));
}
TEST_P(ErrorValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(ErrorValue(absl::CancelledError())),
NativeTypeId::For<ErrorValue>());
EXPECT_EQ(NativeTypeId::Of(Value(ErrorValue(absl::CancelledError()))),
NativeTypeId::For<ErrorValue>());
}
TEST_P(ErrorValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<ErrorValue>(ErrorValue(absl::CancelledError())));
EXPECT_TRUE(
InstanceOf<ErrorValue>(Value(ErrorValue(absl::CancelledError()))));
}
TEST_P(ErrorValueTest, Cast) {
EXPECT_THAT(Cast<ErrorValue>(ErrorValue(absl::CancelledError())),
An<ErrorValue>());
EXPECT_THAT(Cast<ErrorValue>(Value(ErrorValue(absl::CancelledError()))),
An<ErrorValue>());
}
TEST_P(ErrorValueTest, As) {
EXPECT_THAT(As<ErrorValue>(Value(ErrorValue(absl::CancelledError()))),
Ne(absl::nullopt));
}
INSTANTIATE_TEST_SUITE_P(
ErrorValueTest, ErrorValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
ErrorValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/error_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/error_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
ffee8254-4227-43c8-9032-3f027718dad1 | cpp | google/cel-cpp | duration_type | common/types/duration_type.h | common/types/duration_type_test.cc | #ifndef THIRD_PARTY_CEL_CPP_COMMON_TYPES_DURATION_TYPE_H_
#define THIRD_PARTY_CEL_CPP_COMMON_TYPES_DURATION_TYPE_H_
#include <ostream>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "common/type_kind.h"
namespace cel {
class Type;
class TypeParameters;
class DurationType final {
public:
static constexpr TypeKind kKind = TypeKind::kDuration;
static constexpr absl::string_view kName = "google.protobuf.Duration";
DurationType() = default;
DurationType(const DurationType&) = default;
DurationType(DurationType&&) = default;
DurationType& operator=(const DurationType&) = default;
DurationType& operator=(DurationType&&) = default;
static TypeKind kind() { return kKind; }
static absl::string_view name() { return kName; }
static TypeParameters GetParameters();
static std::string DebugString() { return std::string(name()); }
constexpr void swap(DurationType&) noexcept {}
};
inline constexpr void swap(DurationType& lhs, DurationType& rhs) noexcept {
lhs.swap(rhs);
}
inline constexpr bool operator==(DurationType, DurationType) { return true; }
inline constexpr bool operator!=(DurationType lhs, DurationType rhs) {
return !operator==(lhs, rhs);
}
template <typename H>
H AbslHashValue(H state, DurationType) {
return std::move(state);
}
inline std::ostream& operator<<(std::ostream& out, const DurationType& type) {
return out << type.DebugString();
}
}
#endif | #include <sstream>
#include "absl/hash/hash.h"
#include "common/type.h"
#include "internal/testing.h"
namespace cel {
namespace {
TEST(DurationType, Kind) {
EXPECT_EQ(DurationType().kind(), DurationType::kKind);
EXPECT_EQ(Type(DurationType()).kind(), DurationType::kKind);
}
TEST(DurationType, Name) {
EXPECT_EQ(DurationType().name(), DurationType::kName);
EXPECT_EQ(Type(DurationType()).name(), DurationType::kName);
}
TEST(DurationType, DebugString) {
{
std::ostringstream out;
out << DurationType();
EXPECT_EQ(out.str(), DurationType::kName);
}
{
std::ostringstream out;
out << Type(DurationType());
EXPECT_EQ(out.str(), DurationType::kName);
}
}
TEST(DurationType, Hash) {
EXPECT_EQ(absl::HashOf(DurationType()), absl::HashOf(DurationType()));
}
TEST(DurationType, Equal) {
EXPECT_EQ(DurationType(), DurationType());
EXPECT_EQ(Type(DurationType()), DurationType());
EXPECT_EQ(DurationType(), Type(DurationType()));
EXPECT_EQ(Type(DurationType()), Type(DurationType()));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/duration_type.h | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/types/duration_type_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
5dcd7ac0-6bda-4243-81e7-3c128c6e2b43 | cpp | tensorflow/tensorflow | cast | tensorflow/lite/delegates/gpu/common/tasks/cast.cc | tensorflow/lite/delegates/gpu/cl/kernels/cast_test.cc | #include "tensorflow/lite/delegates/gpu/common/tasks/cast.h"
#include <map>
#include <string>
#include <utility>
#include <vector>
#include "absl/strings/substitute.h"
#include "tensorflow/lite/delegates/gpu/common/task/util.h"
namespace tflite {
namespace gpu {
GPUOperation CreateCast(const OperationDef& definition,
const GpuInfo& gpu_info) {
ElementwiseDescriptor op_desc;
const std::string conversion =
GetTypeConversion(gpu_info, definition.src_tensors[0].GetDataType(),
definition.dst_tensors[0].GetDataType(), 4);
op_desc.code =
"out_value = " + absl::Substitute(conversion, "in_value") + ";\n";
return CreateGpuOperation(definition, std::move(op_desc));
}
}
} | #include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h"
#include "tensorflow/lite/delegates/gpu/common/operations.h"
#include "tensorflow/lite/delegates/gpu/common/status.h"
#include "tensorflow/lite/delegates/gpu/common/tasks/cast_test_util.h"
namespace tflite {
namespace gpu {
namespace cl {
namespace {
TEST_F(OpenCLOperationTest, Cast) {
auto status = CastTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CastToBool) {
auto status = CastToBoolTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
TEST_F(OpenCLOperationTest, CastFromBool) {
auto status = CastFromBoolTests(&exec_env_);
ASSERT_TRUE(status.ok()) << status.message();
}
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/common/tasks/cast.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/gpu/cl/kernels/cast_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
17e61239-1dca-4b8b-a5dd-20c400f1ed09 | cpp | google/libphonenumber | phonenumberutil | cpp/src/phonenumbers/phonenumberutil.cc | cpp/test/phonenumbers/phonenumberutil_test.cc | #include "phonenumbers/phonenumberutil.h"
#include <algorithm>
#include <cctype>
#include <cstring>
#include <iterator>
#include <map>
#include <utility>
#include <vector>
#include <unicode/uchar.h>
#include <unicode/utf8.h>
#include "phonenumbers/asyoutypeformatter.h"
#include "phonenumbers/base/basictypes.h"
#include "phonenumbers/base/logging.h"
#include "phonenumbers/base/memory/singleton.h"
#include "phonenumbers/default_logger.h"
#include "phonenumbers/encoding_utils.h"
#include "phonenumbers/matcher_api.h"
#include "phonenumbers/metadata.h"
#include "phonenumbers/normalize_utf8.h"
#include "phonenumbers/phonemetadata.pb.h"
#include "phonenumbers/phonenumber.h"
#include "phonenumbers/phonenumber.pb.h"
#include "phonenumbers/regex_based_matcher.h"
#include "phonenumbers/regexp_adapter.h"
#include "phonenumbers/regexp_cache.h"
#include "phonenumbers/regexp_factory.h"
#include "phonenumbers/region_code.h"
#include "phonenumbers/stl_util.h"
#include "phonenumbers/stringutil.h"
#include "phonenumbers/utf/unicodetext.h"
#include "phonenumbers/utf/utf.h"
namespace i18n {
namespace phonenumbers {
using google::protobuf::RepeatedField;
using gtl::OrderByFirst;
const size_t PhoneNumberUtil::kMinLengthForNsn;
const size_t PhoneNumberUtil::kMaxLengthForNsn;
const size_t PhoneNumberUtil::kMaxLengthCountryCode;
const int PhoneNumberUtil::kNanpaCountryCode;
const char PhoneNumberUtil::kPlusChars[] = "+\xEF\xBC\x8B";
const char PhoneNumberUtil::kValidPunctuation[] =
"-x\xE2\x80\x90-\xE2\x80\x95\xE2\x88\x92\xE3\x83\xBC\xEF\xBC\x8D-\xEF\xBC"
"\x8F \xC2\xA0\xC2\xAD\xE2\x80\x8B\xE2\x81\xA0\xE3\x80\x80()\xEF\xBC\x88"
"\xEF\xBC\x89\xEF\xBC\xBB\xEF\xBC\xBD.\\[\\]/~\xE2\x81\x93\xE2\x88\xBC";
const char PhoneNumberUtil::kCaptureUpToSecondNumberStart[] = "(.*)[\\\\/] *x";
const char PhoneNumberUtil::kRegionCodeForNonGeoEntity[] = "001";
namespace {
const char kPlusSign[] = "+";
const char kStarSign[] = "*";
const char kRfc3966ExtnPrefix[] = ";ext=";
const char kRfc3966Prefix[] = "tel:";
const char kRfc3966PhoneContext[] = ";phone-context=";
const char kRfc3966IsdnSubaddress[] = ";isub=";
const char kRfc3966VisualSeparator[] = "[\\-\\.\\(\\)]?";
const char kDigits[] = "\\p{Nd}";
const char kValidAlpha[] = "a-z";
const char kValidAlphaInclUppercase[] = "A-Za-z";
const char kDefaultExtnPrefix[] = " ext. ";
const char kPossibleSeparatorsBetweenNumberAndExtLabel[] =
"[ \xC2\xA0\\t,]*";
const char kPossibleCharsAfterExtLabel[] =
"[:\\.\xEF\xBC\x8E]?[ \xC2\xA0\\t,-]*";
const char kOptionalExtSuffix[] = "#?";
bool LoadCompiledInMetadata(PhoneMetadataCollection* metadata) {
if (!metadata->ParseFromArray(metadata_get(), metadata_size())) {
LOG(ERROR) << "Could not parse binary data.";
return false;
}
return true;
}
const PhoneNumberDesc* GetNumberDescByType(
const PhoneMetadata& metadata,
PhoneNumberUtil::PhoneNumberType type) {
switch (type) {
case PhoneNumberUtil::PREMIUM_RATE:
return &metadata.premium_rate();
case PhoneNumberUtil::TOLL_FREE:
return &metadata.toll_free();
case PhoneNumberUtil::MOBILE:
return &metadata.mobile();
case PhoneNumberUtil::FIXED_LINE:
case PhoneNumberUtil::FIXED_LINE_OR_MOBILE:
return &metadata.fixed_line();
case PhoneNumberUtil::SHARED_COST:
return &metadata.shared_cost();
case PhoneNumberUtil::VOIP:
return &metadata.voip();
case PhoneNumberUtil::PERSONAL_NUMBER:
return &metadata.personal_number();
case PhoneNumberUtil::PAGER:
return &metadata.pager();
case PhoneNumberUtil::UAN:
return &metadata.uan();
case PhoneNumberUtil::VOICEMAIL:
return &metadata.voicemail();
default:
return &metadata.general_desc();
}
}
void PrefixNumberWithCountryCallingCode(
int country_calling_code, PhoneNumberUtil::PhoneNumberFormat number_format,
std::string* formatted_number) {
switch (number_format) {
case PhoneNumberUtil::E164:
formatted_number->insert(0, StrCat(kPlusSign, country_calling_code));
return;
case PhoneNumberUtil::INTERNATIONAL:
formatted_number->insert(0, StrCat(kPlusSign, country_calling_code, " "));
return;
case PhoneNumberUtil::RFC3966:
formatted_number->insert(0, StrCat(kRfc3966Prefix, kPlusSign,
country_calling_code, "-"));
return;
case PhoneNumberUtil::NATIONAL:
default:
return;
}
}
bool IsNationalNumberSuffixOfTheOther(const PhoneNumber& first_number,
const PhoneNumber& second_number) {
const std::string& first_number_national_number =
SimpleItoa(static_cast<uint64>(first_number.national_number()));
const std::string& second_number_national_number =
SimpleItoa(static_cast<uint64>(second_number.national_number()));
return HasSuffixString(first_number_national_number,
second_number_national_number) ||
HasSuffixString(second_number_national_number,
first_number_national_number);
}
char32 ToUnicodeCodepoint(const char* unicode_char) {
char32 codepoint;
EncodingUtils::DecodeUTF8Char(unicode_char, &codepoint);
return codepoint;
}
std::string ExtnDigits(int max_length) {
return StrCat("([", kDigits, "]{1,", max_length, "})");
}
std::string CreateExtnPattern(bool for_parsing) {
int ext_limit_after_explicit_label = 20;
int ext_limit_after_likely_label = 15;
int ext_limit_after_ambiguous_char = 9;
int ext_limit_when_not_sure = 6;
std::string explicit_ext_labels =
"(?:e?xt(?:ensi(?:o\xCC\x81?|\xC3\xB3))?n?|(?:\xEF\xBD\x85)?"
"\xEF\xBD\x98\xEF\xBD\x94(?:\xEF\xBD\x8E)?|\xD0\xB4\xD0\xBE\xD0\xB1|"
"anexo)";
std::string ambiguous_ext_labels =
"(?:[x\xEF\xBD\x98#\xEF\xBC\x83~\xEF\xBD\x9E]|int|"
"\xEF\xBD\x89\xEF\xBD\x8E\xEF\xBD\x94)";
std::string ambiguous_separator = "[- ]+";
std::string rfc_extn = StrCat(
kRfc3966ExtnPrefix, ExtnDigits(ext_limit_after_explicit_label));
std::string explicit_extn = StrCat(
kPossibleSeparatorsBetweenNumberAndExtLabel, explicit_ext_labels,
kPossibleCharsAfterExtLabel, ExtnDigits(ext_limit_after_explicit_label),
kOptionalExtSuffix);
std::string ambiguous_extn = StrCat(
kPossibleSeparatorsBetweenNumberAndExtLabel, ambiguous_ext_labels,
kPossibleCharsAfterExtLabel, ExtnDigits(ext_limit_after_ambiguous_char),
kOptionalExtSuffix);
std::string american_style_extn_with_suffix = StrCat(
ambiguous_separator, ExtnDigits(ext_limit_when_not_sure), "#");
std::string extension_pattern =
StrCat(rfc_extn, "|", explicit_extn, "|", ambiguous_extn, "|",
american_style_extn_with_suffix);
if (for_parsing) {
std::string auto_dialling_and_ext_labels_found = "(?:,{2}|;)";
std::string possible_separators_number_extLabel_no_comma =
"[ \xC2\xA0\\t]*";
std::string auto_dialling_extn = StrCat(
possible_separators_number_extLabel_no_comma,
auto_dialling_and_ext_labels_found, kPossibleCharsAfterExtLabel,
ExtnDigits(ext_limit_after_likely_label),
kOptionalExtSuffix);
std::string only_commas_extn = StrCat(
possible_separators_number_extLabel_no_comma,
"(?:,)+", kPossibleCharsAfterExtLabel,
ExtnDigits(ext_limit_after_ambiguous_char),
kOptionalExtSuffix);
return StrCat(extension_pattern, "|",
auto_dialling_extn, "|",
only_commas_extn);
}
return extension_pattern;
}
void NormalizeHelper(const std::map<char32, char>& normalization_replacements,
bool remove_non_matches,
string* number) {
DCHECK(number);
UnicodeText number_as_unicode;
number_as_unicode.PointToUTF8(number->data(), static_cast<int>(number->size()));
if (!number_as_unicode.UTF8WasValid()) {
number->clear();
return;
}
std::string normalized_number;
char unicode_char[5];
for (UnicodeText::const_iterator it = number_as_unicode.begin();
it != number_as_unicode.end();
++it) {
std::map<char32, char>::const_iterator found_glyph_pair =
normalization_replacements.find(*it);
if (found_glyph_pair != normalization_replacements.end()) {
normalized_number.push_back(found_glyph_pair->second);
} else if (!remove_non_matches) {
int char_len = it.get_utf8(unicode_char);
normalized_number.append(unicode_char, char_len);
}
}
number->assign(normalized_number);
}
bool DescHasPossibleNumberData(const PhoneNumberDesc& desc) {
return desc.possible_length_size() != 1 || desc.possible_length(0) != -1;
}
bool DescHasData(const PhoneNumberDesc& desc) {
return desc.has_example_number() || DescHasPossibleNumberData(desc) ||
desc.has_national_number_pattern();
}
void GetSupportedTypesForMetadata(
const PhoneMetadata& metadata,
std::set<PhoneNumberUtil::PhoneNumberType>* types) {
DCHECK(types);
for (int i = 0; i <= static_cast<int>(PhoneNumberUtil::kMaxNumberType); ++i) {
PhoneNumberUtil::PhoneNumberType type =
static_cast<PhoneNumberUtil::PhoneNumberType>(i);
if (type == PhoneNumberUtil::FIXED_LINE_OR_MOBILE ||
type == PhoneNumberUtil::UNKNOWN) {
continue;
}
if (DescHasData(*GetNumberDescByType(metadata, type))) {
types->insert(type);
}
}
}
PhoneNumberUtil::ValidationResult TestNumberLength(
const std::string& number, const PhoneMetadata& metadata,
PhoneNumberUtil::PhoneNumberType type) {
const PhoneNumberDesc* desc_for_type = GetNumberDescByType(metadata, type);
RepeatedField<int> possible_lengths =
desc_for_type->possible_length_size() == 0
? metadata.general_desc().possible_length()
: desc_for_type->possible_length();
RepeatedField<int> local_lengths =
desc_for_type->possible_length_local_only();
if (type == PhoneNumberUtil::FIXED_LINE_OR_MOBILE) {
const PhoneNumberDesc* fixed_line_desc =
GetNumberDescByType(metadata, PhoneNumberUtil::FIXED_LINE);
if (!DescHasPossibleNumberData(*fixed_line_desc)) {
return TestNumberLength(number, metadata, PhoneNumberUtil::MOBILE);
} else {
const PhoneNumberDesc* mobile_desc =
GetNumberDescByType(metadata, PhoneNumberUtil::MOBILE);
if (DescHasPossibleNumberData(*mobile_desc)) {
possible_lengths.MergeFrom(
mobile_desc->possible_length_size() == 0
? metadata.general_desc().possible_length()
: mobile_desc->possible_length());
std::sort(possible_lengths.begin(), possible_lengths.end());
if (local_lengths.size() == 0) {
local_lengths = mobile_desc->possible_length_local_only();
} else {
local_lengths.MergeFrom(mobile_desc->possible_length_local_only());
std::sort(local_lengths.begin(), local_lengths.end());
}
}
}
}
if (possible_lengths.Get(0) == -1) {
return PhoneNumberUtil::INVALID_LENGTH;
}
int actual_length = static_cast<int>(number.length());
if (std::find(local_lengths.begin(), local_lengths.end(), actual_length) !=
local_lengths.end()) {
return PhoneNumberUtil::IS_POSSIBLE_LOCAL_ONLY;
}
int minimum_length = possible_lengths.Get(0);
if (minimum_length == actual_length) {
return PhoneNumberUtil::IS_POSSIBLE;
} else if (minimum_length > actual_length) {
return PhoneNumberUtil::TOO_SHORT;
} else if (*(possible_lengths.end() - 1) < actual_length) {
return PhoneNumberUtil::TOO_LONG;
}
return std::find(possible_lengths.begin() + 1, possible_lengths.end(),
actual_length) != possible_lengths.end()
? PhoneNumberUtil::IS_POSSIBLE
: PhoneNumberUtil::INVALID_LENGTH;
}
PhoneNumberUtil::ValidationResult TestNumberLength(
const std::string& number, const PhoneMetadata& metadata) {
return TestNumberLength(number, metadata, PhoneNumberUtil::UNKNOWN);
}
void CopyCoreFieldsOnly(const PhoneNumber& number, PhoneNumber* pruned_number) {
pruned_number->set_country_code(number.country_code());
pruned_number->set_national_number(number.national_number());
if (!number.extension().empty()) {
pruned_number->set_extension(number.extension());
}
if (number.italian_leading_zero()) {
pruned_number->set_italian_leading_zero(true);
pruned_number->set_number_of_leading_zeros(
number.number_of_leading_zeros());
}
}
bool IsMatch(const MatcherApi& matcher_api,
const std::string& number, const PhoneNumberDesc& desc) {
return matcher_api.MatchNationalNumber(number, desc, false);
}
}
void PhoneNumberUtil::SetLogger(Logger* logger) {
logger_.reset(logger);
Logger::set_logger_impl(logger_.get());
}
class PhoneNumberRegExpsAndMappings {
private:
void InitializeMapsAndSets() {
diallable_char_mappings_.insert(std::make_pair('+', '+'));
diallable_char_mappings_.insert(std::make_pair('*', '*'));
diallable_char_mappings_.insert(std::make_pair('#', '#'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("-"), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xEF\xBC\x8D" ), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE2\x80\x90" ), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE2\x80\x91" ), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE2\x80\x92" ), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE2\x80\x93" ), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE2\x80\x94" ), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE2\x80\x95" ), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE2\x88\x92" ), '-'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("/"), '/'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xEF\xBC\x8F" ), '/'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint(" "), ' '));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE3\x80\x80" ), ' '));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xE2\x81\xA0"), ' '));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("."), '.'));
all_plus_number_grouping_symbols_.insert(
std::make_pair(ToUnicodeCodepoint("\xEF\xBC\x8E" ), '.'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("A"), '2'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("B"), '2'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("C"), '2'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("D"), '3'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("E"), '3'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("F"), '3'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("G"), '4'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("H"), '4'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("I"), '4'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("J"), '5'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("K"), '5'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("L"), '5'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("M"), '6'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("N"), '6'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("O"), '6'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("P"), '7'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("Q"), '7'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("R"), '7'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("S"), '7'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("T"), '8'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("U"), '8'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("V"), '8'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("W"), '9'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("X"), '9'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("Y"), '9'));
alpha_mappings_.insert(std::make_pair(ToUnicodeCodepoint("Z"), '9'));
std::map<char32, char> lower_case_mappings;
std::map<char32, char> alpha_letters;
for (std::map<char32, char>::const_iterator it = alpha_mappings_.begin();
it != alpha_mappings_.end();
++it) {
if (it->first < 128) {
char letter_as_upper = static_cast<char>(it->first);
char32 letter_as_lower = static_cast<char32>(tolower(letter_as_upper));
lower_case_mappings.insert(std::make_pair(letter_as_lower, it->second));
alpha_letters.insert(std::make_pair(letter_as_lower, letter_as_upper));
alpha_letters.insert(std::make_pair(it->first, letter_as_upper));
}
}
alpha_mappings_.insert(lower_case_mappings.begin(),
lower_case_mappings.end());
alpha_phone_mappings_.insert(alpha_mappings_.begin(),
alpha_mappings_.end());
all_plus_number_grouping_symbols_.insert(alpha_letters.begin(),
alpha_letters.end());
for (char c = '0'; c <= '9'; ++c) {
diallable_char_mappings_.insert(std::make_pair(c, c));
alpha_phone_mappings_.insert(std::make_pair(c, c));
all_plus_number_grouping_symbols_.insert(std::make_pair(c, c));
}
mobile_token_mappings_.insert(std::make_pair(54, '9'));
countries_without_national_prefix_with_area_codes_.insert(52);
geo_mobile_countries_without_mobile_area_codes_.insert(86);
geo_mobile_countries_.insert(52);
geo_mobile_countries_.insert(54);
geo_mobile_countries_.insert(55);
geo_mobile_countries_.insert(62);
geo_mobile_countries_.insert(
geo_mobile_countries_without_mobile_area_codes_.begin(),
geo_mobile_countries_without_mobile_area_codes_.end());
}
const std::string valid_phone_number_;
const std::string extn_patterns_for_parsing_;
const std::string rfc3966_phone_digit_;
const std::string alphanum_;
const std::string rfc3966_domainlabel_;
const std::string rfc3966_toplabel_;
public:
scoped_ptr<const AbstractRegExpFactory> regexp_factory_;
scoped_ptr<RegExpCache> regexp_cache_;
std::map<char32, char> diallable_char_mappings_;
std::map<char32, char> alpha_mappings_;
std::map<char32, char> alpha_phone_mappings_;
std::map<char32, char> all_plus_number_grouping_symbols_;
std::map<int, char> mobile_token_mappings_;
std::set<int> countries_without_national_prefix_with_area_codes_;
std::set<int> geo_mobile_countries_without_mobile_area_codes_;
std::set<int> geo_mobile_countries_;
scoped_ptr<const RegExp> single_international_prefix_;
scoped_ptr<const RegExp> digits_pattern_;
scoped_ptr<const RegExp> capturing_digit_pattern_;
scoped_ptr<const RegExp> capturing_ascii_digits_pattern_;
scoped_ptr<const RegExp> valid_start_char_pattern_;
scoped_ptr<const RegExp> capture_up_to_second_number_start_pattern_;
scoped_ptr<const RegExp> unwanted_end_char_pattern_;
scoped_ptr<const RegExp> separator_pattern_;
const std::string extn_patterns_for_matching_;
scoped_ptr<const RegExp> extn_pattern_;
scoped_ptr<const RegExp> valid_phone_number_pattern_;
scoped_ptr<const RegExp> valid_alpha_phone_pattern_;
scoped_ptr<const RegExp> first_group_capturing_pattern_;
scoped_ptr<const RegExp> carrier_code_pattern_;
scoped_ptr<const RegExp> plus_chars_pattern_;
std::unique_ptr<const RegExp> rfc3966_global_number_digits_pattern_;
std::unique_ptr<const RegExp> rfc3966_domainname_pattern_;
PhoneNumberRegExpsAndMappings()
: valid_phone_number_(
StrCat(kDigits, "{", PhoneNumberUtil::kMinLengthForNsn, "}|[",
PhoneNumberUtil::kPlusChars, "]*(?:[",
PhoneNumberUtil::kValidPunctuation, kStarSign, "]*",
kDigits, "){3,}[", PhoneNumberUtil::kValidPunctuation,
kStarSign, kValidAlpha, kDigits, "]*")),
extn_patterns_for_parsing_(CreateExtnPattern( true)),
rfc3966_phone_digit_(
StrCat("(", kDigits, "|", kRfc3966VisualSeparator, ")")),
alphanum_(StrCat(kValidAlphaInclUppercase, kDigits)),
rfc3966_domainlabel_(
StrCat("[", alphanum_, "]+((\\-)*[", alphanum_, "])*")),
rfc3966_toplabel_(StrCat("[", kValidAlphaInclUppercase,
"]+((\\-)*[", alphanum_, "])*")),
regexp_factory_(new RegExpFactory()),
regexp_cache_(new RegExpCache(*regexp_factory_.get(), 128)),
diallable_char_mappings_(),
alpha_mappings_(),
alpha_phone_mappings_(),
all_plus_number_grouping_symbols_(),
mobile_token_mappings_(),
countries_without_national_prefix_with_area_codes_(),
geo_mobile_countries_without_mobile_area_codes_(),
geo_mobile_countries_(),
single_international_prefix_(regexp_factory_->CreateRegExp(
"[\\d]+(?:[~\xE2\x81\x93\xE2\x88\xBC\xEF\xBD\x9E][\\d]+)?")),
digits_pattern_(
regexp_factory_->CreateRegExp(StrCat("[", kDigits, "]*"))),
capturing_digit_pattern_(
regexp_factory_->CreateRegExp(StrCat("([", kDigits, "])"))),
capturing_ascii_digits_pattern_(
regexp_factory_->CreateRegExp("(\\d+)")),
valid_start_char_pattern_(regexp_factory_->CreateRegExp(
StrCat("[", PhoneNumberUtil::kPlusChars, kDigits, "]"))),
capture_up_to_second_number_start_pattern_(
regexp_factory_->CreateRegExp(
PhoneNumberUtil::kCaptureUpToSecondNumberStart)),
unwanted_end_char_pattern_(
regexp_factory_->CreateRegExp("[^\\p{N}\\p{L}#]")),
separator_pattern_(regexp_factory_->CreateRegExp(
StrCat("[", PhoneNumberUtil::kValidPunctuation, "]+"))),
extn_patterns_for_matching_(
CreateExtnPattern( false)),
extn_pattern_(regexp_factory_->CreateRegExp(
StrCat("(?i)(?:", extn_patterns_for_parsing_, ")$"))),
valid_phone_number_pattern_(regexp_factory_->CreateRegExp(
StrCat("(?i)", valid_phone_number_,
"(?:", extn_patterns_for_parsing_, ")?"))),
valid_alpha_phone_pattern_(regexp_factory_->CreateRegExp(
StrCat("(?i)(?:.*?[", kValidAlpha, "]){3}"))),
first_group_capturing_pattern_(
regexp_factory_->CreateRegExp("(\\$\\d)")),
carrier_code_pattern_(regexp_factory_->CreateRegExp("\\$CC")),
plus_chars_pattern_(regexp_factory_->CreateRegExp(
StrCat("[", PhoneNumberUtil::kPlusChars, "]+"))),
rfc3966_global_number_digits_pattern_(regexp_factory_->CreateRegExp(
StrCat("^\\", kPlusSign, rfc3966_phone_digit_, "*", kDigits,
rfc3966_phone_digit_, "*$"))),
rfc3966_domainname_pattern_(regexp_factory_->CreateRegExp(StrCat(
"^(", rfc3966_domainlabel_, "\\.)*", rfc3966_toplabel_, "\\.?$"))) {
InitializeMapsAndSets();
}
PhoneNumberRegExpsAndMappings(const PhoneNumberRegExpsAndMappings&) = delete;
PhoneNumberRegExpsAndMappings& operator=(
const PhoneNumberRegExpsAndMappings&) = delete;
};
PhoneNumberUtil::PhoneNumberUtil()
: logger_(Logger::set_logger_impl(new NullLogger())),
matcher_api_(new RegexBasedMatcher()),
reg_exps_(new PhoneNumberRegExpsAndMappings),
country_calling_code_to_region_code_map_(
new std::vector<IntRegionsPair>()),
nanpa_regions_(new absl::node_hash_set<std::string>()),
region_to_metadata_map_(new absl::node_hash_map<std::string, PhoneMetadata>()),
country_code_to_non_geographical_metadata_map_(
new absl::node_hash_map<int, PhoneMetadata>) {
Logger::set_logger_impl(logger_.get());
PhoneMetadataCollection metadata_collection;
if (!LoadCompiledInMetadata(&metadata_collection)) {
LOG(DFATAL) << "Could not parse compiled-in metadata.";
return;
}
std::map<int, std::list<std::string>* > country_calling_code_to_region_map;
for (RepeatedPtrField<PhoneMetadata>::const_iterator it =
metadata_collection.metadata().begin();
it != metadata_collection.metadata().end();
++it) {
const std::string& region_code = it->id();
if (region_code == RegionCode::GetUnknown()) {
continue;
}
int country_calling_code = it->country_code();
if (kRegionCodeForNonGeoEntity == region_code) {
country_code_to_non_geographical_metadata_map_->insert(
std::make_pair(country_calling_code, *it));
} else {
region_to_metadata_map_->insert(std::make_pair(region_code, *it));
}
std::map<int, std::list<std::string>* >::iterator calling_code_in_map =
country_calling_code_to_region_map.find(country_calling_code);
if (calling_code_in_map != country_calling_code_to_region_map.end()) {
if (it->main_country_for_code()) {
calling_code_in_map->second->push_front(region_code);
} else {
calling_code_in_map->second->push_back(region_code);
}
} else {
std::list<std::string>* list_with_region_code =
new std::list<std::string>();
list_with_region_code->push_back(region_code);
country_calling_code_to_region_map.insert(
std::make_pair(country_calling_code, list_with_region_code));
}
if (country_calling_code == kNanpaCountryCode) {
nanpa_regions_->insert(region_code);
}
}
country_calling_code_to_region_code_map_->insert(
country_calling_code_to_region_code_map_->begin(),
country_calling_code_to_region_map.begin(),
country_calling_code_to_region_map.end());
std::sort(country_calling_code_to_region_code_map_->begin(),
country_calling_code_to_region_code_map_->end(), OrderByFirst());
}
PhoneNumberUtil::~PhoneNumberUtil() {
gtl::STLDeleteContainerPairSecondPointers(
country_calling_code_to_region_code_map_->begin(),
country_calling_code_to_region_code_map_->end());
}
void PhoneNumberUtil::GetSupportedRegions(std::set<std::string>* regions)
const {
DCHECK(regions);
for (absl::node_hash_map<std::string, PhoneMetadata>::const_iterator it =
region_to_metadata_map_->begin(); it != region_to_metadata_map_->end();
++it) {
regions->insert(it->first);
}
}
void PhoneNumberUtil::GetSupportedGlobalNetworkCallingCodes(
std::set<int>* calling_codes) const {
DCHECK(calling_codes);
for (absl::node_hash_map<int, PhoneMetadata>::const_iterator it =
country_code_to_non_geographical_metadata_map_->begin();
it != country_code_to_non_geographical_metadata_map_->end(); ++it) {
calling_codes->insert(it->first);
}
}
void PhoneNumberUtil::GetSupportedCallingCodes(
std::set<int>* calling_codes) const {
DCHECK(calling_codes);
for (std::vector<IntRegionsPair>::const_iterator it =
country_calling_code_to_region_code_map_->begin();
it != country_calling_code_to_region_code_map_->end(); ++it) {
calling_codes->insert(it->first);
}
}
void PhoneNumberUtil::GetSupportedTypesForRegion(
const std::string& region_code,
std::set<PhoneNumberType>* types) const {
DCHECK(types);
if (!IsValidRegionCode(region_code)) {
LOG(WARNING) << "Invalid or unknown region code provided: " << region_code;
return;
}
const PhoneMetadata* metadata = GetMetadataForRegion(region_code);
GetSupportedTypesForMetadata(*metadata, types);
}
void PhoneNumberUtil::GetSupportedTypesForNonGeoEntity(
int country_calling_code,
std::set<PhoneNumberType>* types) const {
DCHECK(types);
const PhoneMetadata* metadata =
GetMetadataForNonGeographicalRegion(country_calling_code);
if (metadata == NULL) {
LOG(WARNING) << "Unknown country calling code for a non-geographical "
<< "entity provided: "
<< country_calling_code;
return;
}
GetSupportedTypesForMetadata(*metadata, types);
}
PhoneNumberUtil* PhoneNumberUtil::GetInstance() {
return Singleton<PhoneNumberUtil>::GetInstance();
}
const std::string& PhoneNumberUtil::GetExtnPatternsForMatching() const {
return reg_exps_->extn_patterns_for_matching_;
}
bool PhoneNumberUtil::StartsWithPlusCharsPattern(
const std::string& number) const {
const scoped_ptr<RegExpInput> number_string_piece(
reg_exps_->regexp_factory_->CreateInput(number));
return reg_exps_->plus_chars_pattern_->Consume(number_string_piece.get());
}
bool PhoneNumberUtil::ContainsOnlyValidDigits(const std::string& s) const {
return reg_exps_->digits_pattern_->FullMatch(s);
}
void PhoneNumberUtil::TrimUnwantedEndChars(std::string* number) const {
DCHECK(number);
UnicodeText number_as_unicode;
number_as_unicode.PointToUTF8(number->data(), static_cast<int>(number->size()));
if (!number_as_unicode.UTF8WasValid()) {
number->clear();
return;
}
char current_char[5];
int len;
UnicodeText::const_reverse_iterator reverse_it(number_as_unicode.end());
for (; reverse_it.base() != number_as_unicode.begin(); ++reverse_it) {
len = reverse_it.get_utf8(current_char);
current_char[len] = '\0';
if (!reg_exps_->unwanted_end_char_pattern_->FullMatch(current_char)) {
break;
}
}
number->assign(UnicodeText::UTF8Substring(number_as_unicode.begin(),
reverse_it.base()));
}
bool PhoneNumberUtil::IsFormatEligibleForAsYouTypeFormatter(
const std::string& format) const {
const RegExp& eligible_format_pattern = reg_exps_->regexp_cache_->GetRegExp(
StrCat("[", kValidPunctuation, "]*", "\\$1",
"[", kValidPunctuation, "]*", "(\\$\\d",
"[", kValidPunctuation, "]*)*"));
return eligible_format_pattern.FullMatch(format);
}
bool PhoneNumberUtil::FormattingRuleHasFirstGroupOnly(
const std::string& national_prefix_formatting_rule) const {
const RegExp& first_group_only_prefix_pattern =
reg_exps_->regexp_cache_->GetRegExp("\\(?\\$1\\)?");
return national_prefix_formatting_rule.empty() ||
first_group_only_prefix_pattern.FullMatch(
national_prefix_formatting_rule);
}
void PhoneNumberUtil::GetNddPrefixForRegion(
const std::string& region_code, bool strip_non_digits,
std::string* national_prefix) const {
DCHECK(national_prefix);
const PhoneMetadata* metadata = GetMetadataForRegion(region_code);
if (!metadata) {
LOG(WARNING) << "Invalid or unknown region code (" << region_code
<< ") provided.";
return;
}
national_prefix->assign(metadata->national_prefix());
if (strip_non_digits) {
strrmm(national_prefix, "~");
}
}
bool PhoneNumberUtil::IsValidRegionCode(const std::string& region_code) const {
return (region_to_metadata_map_->find(region_code) !=
region_to_metadata_map_->end());
}
bool PhoneNumberUtil::HasValidCountryCallingCode(
int country_calling_code) const {
IntRegionsPair target_pair;
target_pair.first = country_calling_code;
return (std::binary_search(country_calling_code_to_region_code_map_->begin(),
country_calling_code_to_region_code_map_->end(),
target_pair, OrderByFirst()));
}
const PhoneMetadata* PhoneNumberUtil::GetMetadataForRegion(
const std::string& region_code) const {
absl::node_hash_map<std::string, PhoneMetadata>::const_iterator it =
region_to_metadata_map_->find(region_code);
if (it != region_to_metadata_map_->end()) {
return &it->second;
}
return NULL;
}
const PhoneMetadata* PhoneNumberUtil::GetMetadataForNonGeographicalRegion(
int country_calling_code) const {
absl::node_hash_map<int, PhoneMetadata>::const_iterator it =
country_code_to_non_geographical_metadata_map_->find(
country_calling_code);
if (it != country_code_to_non_geographical_metadata_map_->end()) {
return &it->second;
}
return NULL;
}
void PhoneNumberUtil::Format(const PhoneNumber& number,
PhoneNumberFormat number_format,
std::string* formatted_number) const {
DCHECK(formatted_number);
if (number.national_number() == 0) {
const std::string& raw_input = number.raw_input();
if (!raw_input.empty()) {
formatted_number->assign(raw_input);
return;
}
}
int country_calling_code = number.country_code();
std::string national_significant_number;
GetNationalSignificantNumber(number, &national_significant_number);
if (number_format == E164) {
formatted_number->assign(national_significant_number);
PrefixNumberWithCountryCallingCode(country_calling_code, E164,
formatted_number);
return;
}
if (!HasValidCountryCallingCode(country_calling_code)) {
formatted_number->assign(national_significant_number);
return;
}
std::string region_code;
GetRegionCodeForCountryCode(country_calling_code, ®ion_code);
const PhoneMetadata* metadata =
GetMetadataForRegionOrCallingCode(country_calling_code, region_code);
FormatNsn(national_significant_number, *metadata, number_format,
formatted_number);
MaybeAppendFormattedExtension(number, *metadata, number_format,
formatted_number);
PrefixNumberWithCountryCallingCode(country_calling_code, number_format,
formatted_number);
}
void PhoneNumberUtil::FormatByPattern(
const PhoneNumber& number,
PhoneNumberFormat number_format,
const RepeatedPtrField<NumberFormat>& user_defined_formats,
std::string* formatted_number) const {
DCHECK(formatted_number);
int country_calling_code = number.country_code();
std::string national_significant_number;
GetNationalSignificantNumber(number, &national_significant_number);
if (!HasValidCountryCallingCode(country_calling_code)) {
formatted_number->assign(national_significant_number);
return;
}
std::string region_code;
GetRegionCodeForCountryCode(country_calling_code, ®ion_code);
const PhoneMetadata* metadata =
GetMetadataForRegionOrCallingCode(country_calling_code, region_code);
const NumberFormat* formatting_pattern =
ChooseFormattingPatternForNumber(user_defined_formats,
national_significant_number);
if (!formatting_pattern) {
formatted_number->assign(national_significant_number);
} else {
NumberFormat num_format_copy;
num_format_copy.MergeFrom(*formatting_pattern);
std::string national_prefix_formatting_rule(
formatting_pattern->national_prefix_formatting_rule());
if (!national_prefix_formatting_rule.empty()) {
const std::string& national_prefix = metadata->national_prefix();
if (!national_prefix.empty()) {
GlobalReplaceSubstring("$NP", national_prefix,
&national_prefix_formatting_rule);
GlobalReplaceSubstring("$FG", "$1", &national_prefix_formatting_rule);
num_format_copy.set_national_prefix_formatting_rule(
national_prefix_formatting_rule);
} else {
num_format_copy.clear_national_prefix_formatting_rule();
}
}
FormatNsnUsingPattern(national_significant_number, num_format_copy,
number_format, formatted_number);
}
MaybeAppendFormattedExtension(number, *metadata, NATIONAL, formatted_number);
PrefixNumberWithCountryCallingCode(country_calling_code, number_format,
formatted_number);
}
void PhoneNumberUtil::FormatNationalNumberWithCarrierCode(
const PhoneNumber& number, const std::string& carrier_code,
std::string* formatted_number) const {
int country_calling_code = number.country_code();
std::string national_significant_number;
GetNationalSignificantNumber(number, &national_significant_number);
if (!HasValidCountryCallingCode(country_calling_code)) {
formatted_number->assign(national_significant_number);
return;
}
std::string region_code;
GetRegionCodeForCountryCode(country_calling_code, ®ion_code);
const PhoneMetadata* metadata =
GetMetadataForRegionOrCallingCode(country_calling_code, region_code);
FormatNsnWithCarrier(national_significant_number, *metadata, NATIONAL,
carrier_code, formatted_number);
MaybeAppendFormattedExtension(number, *metadata, NATIONAL, formatted_number);
PrefixNumberWithCountryCallingCode(country_calling_code, NATIONAL,
formatted_number);
}
const PhoneMetadata* PhoneNumberUtil::GetMetadataForRegionOrCallingCode(
int country_calling_code, const std::string& region_code) const {
return kRegionCodeForNonGeoEntity == region_code
? GetMetadataForNonGeographicalRegion(country_calling_code)
: GetMetadataForRegion(region_code);
}
void PhoneNumberUtil::FormatNationalNumberWithPreferredCarrierCode(
const PhoneNumber& number,
const std::string& fallback_carrier_code,
std::string* formatted_number) const {
FormatNationalNumberWithCarrierCode(
number,
!number.preferred_domestic_carrier_code().empty()
? number.preferred_domestic_carrier_code()
: fallback_carrier_code,
formatted_number);
}
void PhoneNumberUtil::FormatNumberForMobileDialing(
const PhoneNumber& number,
const std::string& calling_from,
bool with_formatting,
std::string* formatted_number) const {
int country_calling_code = number.country_code();
if (!HasValidCountryCallingCode(country_calling_code)) {
formatted_number->assign(number.has_raw_input() ? number.raw_input() : "");
return;
}
formatted_number->assign("");
PhoneNumber number_no_extension(number);
number_no_extension.clear_extension();
std::string region_code;
GetRegionCodeForCountryCode(country_calling_code, ®ion_code);
PhoneNumberType number_type = GetNumberType(number_no_extension);
bool is_valid_number = (number_type != UNKNOWN);
if (calling_from == region_code) {
bool is_fixed_line_or_mobile =
(number_type == FIXED_LINE) || (number_type == MOBILE) ||
(number_type == FIXED_LINE_OR_MOBILE);
if ((region_code == "BR") && (is_fixed_line_or_mobile)) {
if (!number_no_extension.preferred_domestic_carrier_code().empty()) {
FormatNationalNumberWithPreferredCarrierCode(number_no_extension, "",
formatted_number);
} else {
formatted_number->assign("");
}
} else if (country_calling_code == kNanpaCountryCode) {
const PhoneMetadata* region_metadata = GetMetadataForRegion(calling_from);
std::string national_number;
GetNationalSignificantNumber(number_no_extension, &national_number);
if (CanBeInternationallyDialled(number_no_extension) &&
TestNumberLength(national_number, *region_metadata) != TOO_SHORT) {
Format(number_no_extension, INTERNATIONAL, formatted_number);
} else {
Format(number_no_extension, NATIONAL, formatted_number);
}
} else {
if ((region_code == kRegionCodeForNonGeoEntity ||
((region_code == "MX" ||
region_code == "CL" ||
region_code == "UZ") &&
is_fixed_line_or_mobile)) &&
CanBeInternationallyDialled(number_no_extension)) {
Format(number_no_extension, INTERNATIONAL, formatted_number);
} else {
Format(number_no_extension, NATIONAL, formatted_number);
}
}
} else if (is_valid_number &&
CanBeInternationallyDialled(number_no_extension)) {
with_formatting
? Format(number_no_extension, INTERNATIONAL, formatted_number)
: Format(number_no_extension, E164, formatted_number);
return;
}
if (!with_formatting) {
NormalizeDiallableCharsOnly(formatted_number);
}
}
void PhoneNumberUtil::FormatOutOfCountryCallingNumber(
const PhoneNumber& number, const std::string& calling_from,
std::string* formatted_number) const {
DCHECK(formatted_number);
if (!IsValidRegionCode(calling_from)) {
VLOG(1) << "Trying to format number from invalid region " << calling_from
<< ". International formatting applied.";
Format(number, INTERNATIONAL, formatted_number);
return;
}
int country_code = number.country_code();
std::string national_significant_number;
GetNationalSignificantNumber(number, &national_significant_number);
if (!HasValidCountryCallingCode(country_code)) {
formatted_number->assign(national_significant_number);
return;
}
if (country_code == kNanpaCountryCode) {
if (IsNANPACountry(calling_from)) {
Format(number, NATIONAL, formatted_number);
formatted_number->insert(0, StrCat(country_code, " "));
return;
}
} else if (country_code == GetCountryCodeForValidRegion(calling_from)) {
Format(number, NATIONAL, formatted_number);
return;
}
const PhoneMetadata* metadata_calling_from =
GetMetadataForRegion(calling_from);
const std::string& international_prefix =
metadata_calling_from->international_prefix();
std::string international_prefix_for_formatting;
if (metadata_calling_from->has_preferred_international_prefix()) {
international_prefix_for_formatting =
metadata_calling_from->preferred_international_prefix();
} else if (reg_exps_->single_international_prefix_->FullMatch(
international_prefix)) {
international_prefix_for_formatting = international_prefix;
}
std::string region_code;
GetRegionCodeForCountryCode(country_code, ®ion_code);
const PhoneMetadata* metadata_for_region =
GetMetadataForRegionOrCallingCode(country_code, region_code);
FormatNsn(national_significant_number, *metadata_for_region, INTERNATIONAL,
formatted_number);
MaybeAppendFormattedExtension(number, *metadata_for_region, INTERNATIONAL,
formatted_number);
if (!international_prefix_for_formatting.empty()) {
formatted_number->insert(
0, StrCat(international_prefix_for_formatting, " ", country_code, " "));
} else {
PrefixNumberWithCountryCallingCode(country_code, INTERNATIONAL,
formatted_number);
}
}
void PhoneNumberUtil::FormatInOriginalFormat(
const PhoneNumber& number, const std::string& region_calling_from,
std::string* formatted_number) const {
DCHECK(formatted_number);
if (number.has_raw_input() && !HasFormattingPatternForNumber(number)) {
formatted_number->assign(number.raw_input());
return;
}
if (!number.has_country_code_source()) {
Format(number, NATIONAL, formatted_number);
return;
}
switch (number.country_code_source()) {
case PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN:
Format(number, INTERNATIONAL, formatted_number);
break;
case PhoneNumber::FROM_NUMBER_WITH_IDD:
FormatOutOfCountryCallingNumber(number, region_calling_from,
formatted_number);
break;
case PhoneNumber::FROM_NUMBER_WITHOUT_PLUS_SIGN:
Format(number, INTERNATIONAL, formatted_number);
formatted_number->erase(formatted_number->begin());
break;
case PhoneNumber::FROM_DEFAULT_COUNTRY:
default:
std::string region_code;
GetRegionCodeForCountryCode(number.country_code(), ®ion_code);
std::string national_prefix;
GetNddPrefixForRegion(region_code, true ,
&national_prefix);
if (national_prefix.empty()) {
Format(number, NATIONAL, formatted_number);
break;
}
if (RawInputContainsNationalPrefix(number.raw_input(), national_prefix,
region_code)) {
Format(number, NATIONAL, formatted_number);
break;
}
const PhoneMetadata* metadata = GetMetadataForRegion(region_code);
std::string national_number;
GetNationalSignificantNumber(number, &national_number);
const NumberFormat* format_rule =
ChooseFormattingPatternForNumber(metadata->number_format(),
national_number);
if (!format_rule) {
Format(number, NATIONAL, formatted_number);
break;
}
std::string candidate_national_prefix_rule(
format_rule->national_prefix_formatting_rule());
if (!candidate_national_prefix_rule.empty()) {
size_t index_of_first_group = candidate_national_prefix_rule.find("$1");
if (index_of_first_group == std::string::npos) {
LOG(ERROR) << "First group missing in national prefix rule: "
<< candidate_national_prefix_rule;
Format(number, NATIONAL, formatted_number);
break;
}
candidate_national_prefix_rule.erase(index_of_first_group);
NormalizeDigitsOnly(&candidate_national_prefix_rule);
}
if (candidate_national_prefix_rule.empty()) {
Format(number, NATIONAL, formatted_number);
break;
}
RepeatedPtrField<NumberFormat> number_formats;
NumberFormat* number_format = number_formats.Add();
number_format->MergeFrom(*format_rule);
number_format->clear_national_prefix_formatting_rule();
FormatByPattern(number, NATIONAL, number_formats, formatted_number);
break;
}
if (!formatted_number->empty() && !number.raw_input().empty()) {
std::string normalized_formatted_number(*formatted_number);
NormalizeDiallableCharsOnly(&normalized_formatted_number);
std::string normalized_raw_input(number.raw_input());
NormalizeDiallableCharsOnly(&normalized_raw_input);
if (normalized_formatted_number != normalized_raw_input) {
formatted_number->assign(number.raw_input());
}
}
}
bool PhoneNumberUtil::RawInputContainsNationalPrefix(
const std::string& raw_input, const std::string& national_prefix,
const std::string& region_code) const {
std::string normalized_national_number(raw_input);
NormalizeDigitsOnly(&normalized_national_number);
if (HasPrefixString(normalized_national_number, national_prefix)) {
PhoneNumber number_without_national_prefix;
if (Parse(normalized_national_number.substr(national_prefix.length()),
region_code, &number_without_national_prefix)
== NO_PARSING_ERROR) {
return IsValidNumber(number_without_national_prefix);
}
}
return false;
}
bool PhoneNumberUtil::HasFormattingPatternForNumber(
const PhoneNumber& number) const {
int country_calling_code = number.country_code();
std::string region_code;
GetRegionCodeForCountryCode(country_calling_code, ®ion_code);
const PhoneMetadata* metadata =
GetMetadataForRegionOrCallingCode(country_calling_code, region_code);
if (!metadata) {
return false;
}
std::string national_number;
GetNationalSignificantNumber(number, &national_number);
const NumberFormat* format_rule =
ChooseFormattingPatternForNumber(metadata->number_format(),
national_number);
return format_rule;
}
void PhoneNumberUtil::FormatOutOfCountryKeepingAlphaChars(
const PhoneNumber& number, const std::string& calling_from,
std::string* formatted_number) const {
if (number.raw_input().empty()) {
FormatOutOfCountryCallingNumber(number, calling_from, formatted_number);
return;
}
int country_code = number.country_code();
if (!HasValidCountryCallingCode(country_code)) {
formatted_number->assign(number.raw_input());
return;
}
std::string raw_input_copy(number.raw_input());
NormalizeHelper(reg_exps_->all_plus_number_grouping_symbols_, true,
&raw_input_copy);
std::string national_number;
GetNationalSignificantNumber(number, &national_number);
if (national_number.length() > 3) {
size_t first_national_number_digit =
raw_input_copy.find(national_number.substr(0, 3));
if (first_national_number_digit != std::string::npos) {
raw_input_copy = raw_input_copy.substr(first_national_number_digit);
}
}
const PhoneMetadata* metadata = GetMetadataForRegion(calling_from);
if (country_code == kNanpaCountryCode) {
if (IsNANPACountry(calling_from)) {
StrAppend(formatted_number, country_code, " ", raw_input_copy);
return;
}
} else if (metadata &&
country_code == GetCountryCodeForValidRegion(calling_from)) {
const NumberFormat* formatting_pattern =
ChooseFormattingPatternForNumber(metadata->number_format(),
national_number);
if (!formatting_pattern) {
formatted_number->assign(raw_input_copy);
return;
}
NumberFormat new_format;
new_format.MergeFrom(*formatting_pattern);
new_format.set_pattern("(\\d+)(.*)");
new_format.set_format("$1$2");
FormatNsnUsingPattern(raw_input_copy, new_format, NATIONAL,
formatted_number);
return;
}
std::string international_prefix_for_formatting;
if (metadata) {
const std::string& international_prefix = metadata->international_prefix();
international_prefix_for_formatting =
reg_exps_->single_international_prefix_->FullMatch(international_prefix)
? international_prefix
: metadata->preferred_international_prefix();
}
if (!international_prefix_for_formatting.empty()) {
StrAppend(formatted_number, international_prefix_for_formatting, " ",
country_code, " ", raw_input_copy);
} else {
if (!IsValidRegionCode(calling_from)) {
VLOG(1) << "Trying to format number from invalid region " << calling_from
<< ". International formatting applied.";
}
formatted_number->assign(raw_input_copy);
PrefixNumberWithCountryCallingCode(country_code, INTERNATIONAL,
formatted_number);
}
}
const NumberFormat* PhoneNumberUtil::ChooseFormattingPatternForNumber(
const RepeatedPtrField<NumberFormat>& available_formats,
const std::string& national_number) const {
for (RepeatedPtrField<NumberFormat>::const_iterator
it = available_formats.begin(); it != available_formats.end(); ++it) {
int size = it->leading_digits_pattern_size();
if (size > 0) {
const scoped_ptr<RegExpInput> number_copy(
reg_exps_->regexp_factory_->CreateInput(national_number));
if (!reg_exps_->regexp_cache_->GetRegExp(
it->leading_digits_pattern(size - 1)).Consume(
number_copy.get())) {
continue;
}
}
const RegExp& pattern_to_match(
reg_exps_->regexp_cache_->GetRegExp(it->pattern()));
if (pattern_to_match.FullMatch(national_number)) {
return &(*it);
}
}
return NULL;
}
void PhoneNumberUtil::FormatNsnUsingPatternWithCarrier(
const std::string& national_number, const NumberFormat& formatting_pattern,
PhoneNumberUtil::PhoneNumberFormat number_format,
const std::string& carrier_code, std::string* formatted_number) const {
DCHECK(formatted_number);
std::string number_format_rule(formatting_pattern.format());
if (number_format == PhoneNumberUtil::NATIONAL &&
carrier_code.length() > 0 &&
formatting_pattern.domestic_carrier_code_formatting_rule().length() > 0) {
std::string carrier_code_formatting_rule =
formatting_pattern.domestic_carrier_code_formatting_rule();
reg_exps_->carrier_code_pattern_->Replace(&carrier_code_formatting_rule,
carrier_code);
reg_exps_->first_group_capturing_pattern_->
Replace(&number_format_rule, carrier_code_formatting_rule);
} else {
std::string national_prefix_formatting_rule =
formatting_pattern.national_prefix_formatting_rule();
if (number_format == PhoneNumberUtil::NATIONAL &&
national_prefix_formatting_rule.length() > 0) {
reg_exps_->first_group_capturing_pattern_->Replace(
&number_format_rule, national_prefix_formatting_rule);
}
}
formatted_number->assign(national_number);
const RegExp& pattern_to_match(
reg_exps_->regexp_cache_->GetRegExp(formatting_pattern.pattern()));
pattern_to_match.GlobalReplace(formatted_number, number_format_rule);
if (number_format == RFC3966) {
const scoped_ptr<RegExpInput> number(
reg_exps_->regexp_factory_->CreateInput(*formatted_number));
if (reg_exps_->separator_pattern_->Consume(number.get())) {
formatted_number->assign(number->ToString());
}
reg_exps_->separator_pattern_->GlobalReplace(formatted_number, "-");
}
}
void PhoneNumberUtil::FormatNsnUsingPattern(
const std::string& national_number, const NumberFormat& formatting_pattern,
PhoneNumberUtil::PhoneNumberFormat number_format,
std::string* formatted_number) const {
DCHECK(formatted_number);
FormatNsnUsingPatternWithCarrier(national_number, formatting_pattern,
number_format, "", formatted_number);
}
void PhoneNumberUtil::FormatNsn(const std::string& number,
const PhoneMetadata& metadata,
PhoneNumberFormat number_format,
std::string* formatted_number) const {
DCHECK(formatted_number);
FormatNsnWithCarrier(number, metadata, number_format, "", formatted_number);
}
void PhoneNumberUtil::FormatNsnWithCarrier(
const std::string& number, const PhoneMetadata& metadata,
PhoneNumberFormat number_format, const std::string& carrier_code,
std::string* formatted_number) const {
DCHECK(formatted_number);
const RepeatedPtrField<NumberFormat> available_formats =
(metadata.intl_number_format_size() == 0 || number_format == NATIONAL)
? metadata.number_format()
: metadata.intl_number_format();
const NumberFormat* formatting_pattern =
ChooseFormattingPatternForNumber(available_formats, number);
if (!formatting_pattern) {
formatted_number->assign(number);
} else {
FormatNsnUsingPatternWithCarrier(number, *formatting_pattern, number_format,
carrier_code, formatted_number);
}
}
void PhoneNumberUtil::MaybeAppendFormattedExtension(
const PhoneNumber& number,
const PhoneMetadata& metadata,
PhoneNumberFormat number_format,
std::string* formatted_number) const {
DCHECK(formatted_number);
if (number.has_extension() && number.extension().length() > 0) {
if (number_format == RFC3966) {
StrAppend(formatted_number, kRfc3966ExtnPrefix, number.extension());
} else {
if (metadata.has_preferred_extn_prefix()) {
StrAppend(formatted_number, metadata.preferred_extn_prefix(),
number.extension());
} else {
StrAppend(formatted_number, kDefaultExtnPrefix, number.extension());
}
}
}
}
bool PhoneNumberUtil::IsNANPACountry(const std::string& region_code) const {
return nanpa_regions_->find(region_code) != nanpa_regions_->end();
}
void PhoneNumberUtil::GetRegionCodesForCountryCallingCode(
int country_calling_code,
std::list<std::string>* region_codes) const {
DCHECK(region_codes);
IntRegionsPair target_pair;
target_pair.first = country_calling_code;
typedef std::vector<IntRegionsPair>::const_iterator ConstIterator;
std::pair<ConstIterator, ConstIterator> range =
std::equal_range(country_calling_code_to_region_code_map_->begin(),
country_calling_code_to_region_code_map_->end(),
target_pair, OrderByFirst());
if (range.first != range.second) {
region_codes->insert(region_codes->begin(),
range.first->second->begin(),
range.first->second->end());
}
}
void PhoneNumberUtil::GetRegionCodeForCountryCode(
int country_calling_code,
std::string* region_code) const {
DCHECK(region_code);
std::list<std::string> region_codes;
GetRegionCodesForCountryCallingCode(country_calling_code, ®ion_codes);
*region_code = (region_codes.size() > 0) ?
region_codes.front() : RegionCode::GetUnknown();
}
void PhoneNumberUtil::GetRegionCodeForNumber(
const PhoneNumber& number, std::string* region_code) const {
DCHECK(region_code);
int country_calling_code = number.country_code();
std::list<std::string> region_codes;
GetRegionCodesForCountryCallingCode(country_calling_code, ®ion_codes);
if (region_codes.size() == 0) {
VLOG(1) << "Missing/invalid country calling code ("
<< country_calling_code << ")";
*region_code = RegionCode::GetUnknown();
return;
}
if (region_codes.size() == 1) {
*region_code = region_codes.front();
} else {
GetRegionCodeForNumberFromRegionList(number, region_codes, region_code);
}
}
void PhoneNumberUtil::GetRegionCodeForNumberFromRegionList(
const PhoneNumber& number,
const std::list<std::string>& region_codes,
std::string* region_code) const {
DCHECK(region_code);
std::string national_number;
GetNationalSignificantNumber(number, &national_number);
for (std::list<std::string>::const_iterator it = region_codes.begin();
it != region_codes.end(); ++it) {
const PhoneMetadata* metadata = GetMetadataForRegion(*it);
if (metadata->has_leading_digits()) {
const scoped_ptr<RegExpInput> number(
reg_exps_->regexp_factory_->CreateInput(national_number));
if (reg_exps_->regexp_cache_->
GetRegExp(metadata->leading_digits()).Consume(number.get())) {
*region_code = *it;
return;
}
} else if (GetNumberTypeHelper(national_number, *metadata) != UNKNOWN) {
*region_code = *it;
return;
}
}
*region_code = RegionCode::GetUnknown();
}
int PhoneNumberUtil::GetCountryCodeForRegion(const std::string& region_code) const {
if (!IsValidRegionCode(region_code)) {
LOG(WARNING) << "Invalid or unknown region code (" << region_code
<< ") provided.";
return 0;
}
return GetCountryCodeForValidRegion(region_code);
}
int PhoneNumberUtil::GetCountryCodeForValidRegion(
const std::string& region_code) const {
const PhoneMetadata* metadata = GetMetadataForRegion(region_code);
return metadata->country_code();
}
bool PhoneNumberUtil::GetExampleNumber(const std::string& region_code,
PhoneNumber* number) const {
DCHECK(number);
return GetExampleNumberForType(region_code, FIXED_LINE, number);
}
bool PhoneNumberUtil::GetInvalidExampleNumber(const std::string& region_code,
PhoneNumber* number) const {
DCHECK(number);
if (!IsValidRegionCode(region_code)) {
LOG(WARNING) << "Invalid or unknown region code (" << region_code
<< ") provided.";
return false;
}
const PhoneMetadata* region_metadata = GetMetadataForRegion(region_code);
const PhoneNumberDesc* desc =
GetNumberDescByType(*region_metadata, FIXED_LINE);
if (!desc->has_example_number()) {
return false;
}
const std::string& example_number = desc->example_number();
for (size_t phone_number_length = example_number.length() - 1;
phone_number_length >= kMinLengthForNsn;
phone_number_length--) {
std::string number_to_try = example_number.substr(0, phone_number_length);
PhoneNumber possibly_valid_number;
Parse(number_to_try, region_code, &possibly_valid_number);
if (!IsValidNumber(possibly_valid_number)) {
number->MergeFrom(possibly_valid_number);
return true;
}
}
return false;
}
bool PhoneNumberUtil::GetExampleNumberForType(
const std::string& region_code,
PhoneNumberUtil::PhoneNumberType type,
PhoneNumber* number) const {
DCHECK(number);
if (!IsValidRegionCode(region_code)) {
LOG(WARNING) << "Invalid or unknown region code (" << region_code
<< ") provided.";
return false;
}
const PhoneMetadata* region_metadata = GetMetadataForRegion(region_code);
const PhoneNumberDesc* desc = GetNumberDescByType(*region_metadata, type);
if (desc && desc->has_example_number()) {
ErrorType success = Parse(desc->example_number(), region_code, number);
if (success == NO_PARSING_ERROR) {
return true;
} else {
LOG(ERROR) << "Error parsing example number ("
<< static_cast<int>(success) << ")";
}
}
return false;
}
bool PhoneNumberUtil::GetExampleNumberForType(
PhoneNumberUtil::PhoneNumberType type,
PhoneNumber* number) const {
DCHECK(number);
std::set<std::string> regions;
GetSupportedRegions(®ions);
for (const std::string& region_code : regions) {
if (GetExampleNumberForType(region_code, type, number)) {
return true;
}
}
std::set<int> global_network_calling_codes;
GetSupportedGlobalNetworkCallingCodes(&global_network_calling_codes);
for (std::set<int>::const_iterator it = global_network_calling_codes.begin();
it != global_network_calling_codes.end(); ++it) {
int country_calling_code = *it;
const PhoneMetadata* metadata =
GetMetadataForNonGeographicalRegion(country_calling_code);
const PhoneNumberDesc* desc = GetNumberDescByType(*metadata, type);
if (desc->has_example_number()) {
ErrorType success = Parse(StrCat(kPlusSign,
country_calling_code,
desc->example_number()),
RegionCode::GetUnknown(), number);
if (success == NO_PARSING_ERROR) {
return true;
} else {
LOG(ERROR) << "Error parsing example number ("
<< static_cast<int>(success) << ")";
}
}
}
return false;
}
bool PhoneNumberUtil::GetExampleNumberForNonGeoEntity(
int country_calling_code, PhoneNumber* number) const {
DCHECK(number);
const PhoneMetadata* metadata =
GetMetadataForNonGeographicalRegion(country_calling_code);
if (metadata) {
const int kNumberTypes = 7;
PhoneNumberDesc types[kNumberTypes] = {
metadata->mobile(), metadata->toll_free(), metadata->shared_cost(),
metadata->voip(), metadata->voicemail(), metadata->uan(),
metadata->premium_rate()};
for (int i = 0; i < kNumberTypes; ++i) {
if (types[i].has_example_number()) {
ErrorType success = Parse(StrCat(kPlusSign,
SimpleItoa(country_calling_code),
types[i].example_number()),
RegionCode::GetUnknown(), number);
if (success == NO_PARSING_ERROR) {
return true;
} else {
LOG(ERROR) << "Error parsing example number ("
<< static_cast<int>(success) << ")";
}
}
}
} else {
LOG(WARNING) << "Invalid or unknown country calling code provided: "
<< country_calling_code;
}
return false;
}
PhoneNumberUtil::ErrorType PhoneNumberUtil::Parse(
absl::string_view number_to_parse, const std::string& default_region,
PhoneNumber* number) const {
DCHECK(number);
return ParseHelper(number_to_parse, default_region, false, true, number);
}
PhoneNumberUtil::ErrorType PhoneNumberUtil::ParseAndKeepRawInput(
absl::string_view number_to_parse, const std::string& default_region,
PhoneNumber* number) const {
DCHECK(number);
return ParseHelper(number_to_parse, default_region, true, true, number);
}
bool PhoneNumberUtil::CheckRegionForParsing(
const std::string& number_to_parse,
const std::string& default_region) const {
if (!IsValidRegionCode(default_region) && !number_to_parse.empty()) {
const scoped_ptr<RegExpInput> number(
reg_exps_->regexp_factory_->CreateInput(number_to_parse));
if (!reg_exps_->plus_chars_pattern_->Consume(number.get())) {
return false;
}
}
return true;
}
absl::optional<absl::string_view> PhoneNumberUtil::ExtractPhoneContext(
const absl::string_view number_to_extract_from,
const size_t index_of_phone_context) const {
if (index_of_phone_context == std::string::npos) {
return absl::nullopt;
}
size_t phone_context_start =
index_of_phone_context + strlen(kRfc3966PhoneContext);
if (phone_context_start >= number_to_extract_from.length()) {
return "";
}
size_t phone_context_end =
number_to_extract_from.find(';', phone_context_start);
if (phone_context_end != std::string::npos) {
return number_to_extract_from.substr(
phone_context_start, phone_context_end - phone_context_start);
} else {
return number_to_extract_from.substr(phone_context_start);
}
}
bool PhoneNumberUtil::IsPhoneContextValid(
const absl::optional<absl::string_view> phone_context) const {
if (!phone_context.has_value()) {
return true;
}
if (phone_context.value().empty()) {
return false;
}
return reg_exps_->rfc3966_global_number_digits_pattern_->FullMatch(
std::string{phone_context.value()}) ||
reg_exps_->rfc3966_domainname_pattern_->FullMatch(
std::string{phone_context.value()});
}
PhoneNumberUtil::ErrorType PhoneNumberUtil::BuildNationalNumberForParsing(
absl::string_view number_to_parse, std::string* national_number) const {
size_t index_of_phone_context = number_to_parse.find(kRfc3966PhoneContext);
absl::optional<absl::string_view> phone_context =
ExtractPhoneContext(number_to_parse, index_of_phone_context);
if (!IsPhoneContextValid(phone_context)) {
VLOG(2) << "The phone-context value is invalid.";
return NOT_A_NUMBER;
}
if (phone_context.has_value()) {
if (phone_context.value().at(0) == kPlusSign[0]) {
StrAppend(national_number, phone_context.value());
}
size_t index_of_rfc_prefix = number_to_parse.find(kRfc3966Prefix);
int index_of_national_number = (index_of_rfc_prefix != std::string::npos) ?
static_cast<int>(index_of_rfc_prefix + strlen(kRfc3966Prefix)) : 0;
StrAppend(
national_number,
number_to_parse.substr(
index_of_national_number,
index_of_phone_context - index_of_national_number));
} else {
ExtractPossibleNumber(number_to_parse, national_number);
}
size_t index_of_isdn = national_number->find(kRfc3966IsdnSubaddress);
if (index_of_isdn != std::string::npos) {
national_number->erase(index_of_isdn);
}
return NO_PARSING_ERROR;
}
PhoneNumberUtil::ErrorType PhoneNumberUtil::ParseHelper(
absl::string_view number_to_parse, const std::string& default_region,
bool keep_raw_input, bool check_region, PhoneNumber* phone_number) const {
DCHECK(phone_number);
std::string national_number;
PhoneNumberUtil::ErrorType build_national_number_for_parsing_return =
BuildNationalNumberForParsing(number_to_parse, &national_number);
if (build_national_number_for_parsing_return != NO_PARSING_ERROR) {
return build_national_number_for_parsing_return;
}
if (!IsViablePhoneNumber(national_number)) {
VLOG(2) << "The string supplied did not seem to be a phone number.";
return NOT_A_NUMBER;
}
if (check_region &&
!CheckRegionForParsing(national_number, default_region)) {
VLOG(1) << "Missing or invalid default country.";
return INVALID_COUNTRY_CODE_ERROR;
}
PhoneNumber temp_number;
if (keep_raw_input) {
temp_number.set_raw_input(number_to_parse.data(), number_to_parse.size());
}
std::string extension;
MaybeStripExtension(&national_number, &extension);
if (!extension.empty()) {
temp_number.set_extension(extension);
}
const PhoneMetadata* country_metadata = GetMetadataForRegion(default_region);
std::string normalized_national_number(national_number);
ErrorType country_code_error =
MaybeExtractCountryCode(country_metadata, keep_raw_input,
&normalized_national_number, &temp_number);
if (country_code_error != NO_PARSING_ERROR) {
const scoped_ptr<RegExpInput> number_string_piece(
reg_exps_->regexp_factory_->CreateInput(national_number));
if ((country_code_error == INVALID_COUNTRY_CODE_ERROR) &&
(reg_exps_->plus_chars_pattern_->Consume(number_string_piece.get()))) {
normalized_national_number.assign(number_string_piece->ToString());
MaybeExtractCountryCode(country_metadata,
keep_raw_input,
&normalized_national_number,
&temp_number);
if (temp_number.country_code() == 0) {
return INVALID_COUNTRY_CODE_ERROR;
}
} else {
return country_code_error;
}
}
int country_code = temp_number.country_code();
if (country_code != 0) {
std::string phone_number_region;
GetRegionCodeForCountryCode(country_code, &phone_number_region);
if (phone_number_region != default_region) {
country_metadata =
GetMetadataForRegionOrCallingCode(country_code, phone_number_region);
}
} else if (country_metadata) {
country_code = country_metadata->country_code();
}
if (normalized_national_number.length() < kMinLengthForNsn) {
VLOG(2) << "The string supplied is too short to be a phone number.";
return TOO_SHORT_NSN;
}
if (country_metadata) {
std::string carrier_code;
std::string potential_national_number(normalized_national_number);
MaybeStripNationalPrefixAndCarrierCode(*country_metadata,
&potential_national_number,
&carrier_code);
ValidationResult validation_result =
TestNumberLength(potential_national_number, *country_metadata);
if (validation_result != TOO_SHORT &&
validation_result != IS_POSSIBLE_LOCAL_ONLY &&
validation_result != INVALID_LENGTH) {
normalized_national_number.assign(potential_national_number);
if (keep_raw_input && !carrier_code.empty()) {
temp_number.set_preferred_domestic_carrier_code(carrier_code);
}
}
}
size_t normalized_national_number_length =
normalized_national_number.length();
if (normalized_national_number_length < kMinLengthForNsn) {
VLOG(2) << "The string supplied is too short to be a phone number.";
return TOO_SHORT_NSN;
}
if (normalized_national_number_length > kMaxLengthForNsn) {
VLOG(2) << "The string supplied is too long to be a phone number.";
return TOO_LONG_NSN;
}
temp_number.set_country_code(country_code);
SetItalianLeadingZerosForPhoneNumber(normalized_national_number,
&temp_number);
uint64 number_as_int;
safe_strtou64(normalized_national_number, &number_as_int);
temp_number.set_national_number(number_as_int);
phone_number->Swap(&temp_number);
return NO_PARSING_ERROR;
}
void PhoneNumberUtil::ExtractPossibleNumber(
absl::string_view number, std::string* extracted_number) const {
DCHECK(extracted_number);
UnicodeText number_as_unicode;
number_as_unicode.PointToUTF8(number.data(), static_cast<int>(number.size()));
if (!number_as_unicode.UTF8WasValid()) {
extracted_number->clear();
return;
}
char current_char[5];
int len;
UnicodeText::const_iterator it;
for (it = number_as_unicode.begin(); it != number_as_unicode.end(); ++it) {
len = it.get_utf8(current_char);
current_char[len] = '\0';
if (reg_exps_->valid_start_char_pattern_->FullMatch(current_char)) {
break;
}
}
if (it == number_as_unicode.end()) {
extracted_number->clear();
return;
}
extracted_number->assign(
UnicodeText::UTF8Substring(it, number_as_unicode.end()));
TrimUnwantedEndChars(extracted_number);
if (extracted_number->length() == 0) {
return;
}
reg_exps_->capture_up_to_second_number_start_pattern_->
PartialMatch(*extracted_number, extracted_number);
}
bool PhoneNumberUtil::IsPossibleNumber(const PhoneNumber& number) const {
ValidationResult result = IsPossibleNumberWithReason(number);
return result == IS_POSSIBLE || result == IS_POSSIBLE_LOCAL_ONLY;
}
bool PhoneNumberUtil::IsPossibleNumberForType(
const PhoneNumber& number, const PhoneNumberType type) const {
ValidationResult result = IsPossibleNumberForTypeWithReason(number, type);
return result == IS_POSSIBLE || result == IS_POSSIBLE_LOCAL_ONLY;
}
bool PhoneNumberUtil::IsPossibleNumberForString(
absl::string_view number, const std::string& region_dialing_from) const {
PhoneNumber number_proto;
if (Parse(number, region_dialing_from, &number_proto) == NO_PARSING_ERROR) {
return IsPossibleNumber(number_proto);
} else {
return false;
}
}
PhoneNumberUtil::ValidationResult PhoneNumberUtil::IsPossibleNumberWithReason(
const PhoneNumber& number) const {
return IsPossibleNumberForTypeWithReason(number, PhoneNumberUtil::UNKNOWN);
}
PhoneNumberUtil::ValidationResult
PhoneNumberUtil::IsPossibleNumberForTypeWithReason(const PhoneNumber& number,
PhoneNumberType type) const {
std::string national_number;
GetNationalSignificantNumber(number, &national_number);
int country_code = number.country_code();
if (!HasValidCountryCallingCode(country_code)) {
return INVALID_COUNTRY_CODE;
}
std::string region_code;
GetRegionCodeForCountryCode(country_code, ®ion_code);
const PhoneMetadata* metadata =
GetMetadataForRegionOrCallingCode(country_code, region_code);
return TestNumberLength(national_number, *metadata, type);
}
bool PhoneNumberUtil::TruncateTooLongNumber(PhoneNumber* number) const {
if (IsValidNumber(*number)) {
return true;
}
PhoneNumber number_copy(*number);
uint64 national_number = number->national_number();
do {
national_number /= 10;
number_copy.set_national_number(national_number);
if (IsPossibleNumberWithReason(number_copy) == TOO_SHORT ||
national_number == 0) {
return false;
}
} while (!IsValidNumber(number_copy));
number->set_national_number(national_number);
return true;
}
PhoneNumberUtil::PhoneNumberType PhoneNumberUtil::GetNumberType(
const PhoneNumber& number) const {
std::string region_code;
GetRegionCodeForNumber(number, ®ion_code);
const PhoneMetadata* metadata =
GetMetadataForRegionOrCallingCode(number.country_code(), region_code);
if (!metadata) {
return UNKNOWN;
}
std::string national_significant_number;
GetNationalSignificantNumber(number, &national_significant_number);
return GetNumberTypeHelper(national_significant_number, *metadata);
}
bool PhoneNumberUtil::IsValidNumber(const PhoneNumber& number) const {
std::string region_code;
GetRegionCodeForNumber(number, ®ion_code);
return IsValidNumberForRegion(number, region_code);
}
bool PhoneNumberUtil::IsValidNumberForRegion(const PhoneNumber& number,
const std::string& region_code) const {
int country_code = number.country_code();
const PhoneMetadata* metadata =
GetMetadataForRegionOrCallingCode(country_code, region_code);
if (!metadata ||
((kRegionCodeForNonGeoEntity != region_code) &&
country_code != GetCountryCodeForValidRegion(region_code))) {
return false;
}
std::string national_number;
GetNationalSignificantNumber(number, &national_number);
return GetNumberTypeHelper(national_number, *metadata) != UNKNOWN;
}
bool PhoneNumberUtil::IsNumberGeographical(
const PhoneNumber& phone_number) const {
return IsNumberGeographical(GetNumberType(phone_number),
phone_number.country_code());
}
bool PhoneNumberUtil::IsNumberGeographical(
PhoneNumberType phone_number_type, int country_calling_code) const {
return phone_number_type == PhoneNumberUtil::FIXED_LINE ||
phone_number_type == PhoneNumberUtil::FIXED_LINE_OR_MOBILE ||
(reg_exps_->geo_mobile_countries_.find(country_calling_code)
!= reg_exps_->geo_mobile_countries_.end() &&
phone_number_type == PhoneNumberUtil::MOBILE);
}
void PhoneNumberUtil::SetItalianLeadingZerosForPhoneNumber(
const std::string& national_number, PhoneNumber* phone_number) const {
if (national_number.length() > 1 && national_number[0] == '0') {
phone_number->set_italian_leading_zero(true);
size_t number_of_leading_zeros = 1;
while (number_of_leading_zeros < national_number.length() - 1 &&
national_number[number_of_leading_zeros] == '0') {
number_of_leading_zeros++;
}
if (number_of_leading_zeros != 1) {
phone_number->set_number_of_leading_zeros(static_cast<int32_t>(number_of_leading_zeros));
}
}
}
bool PhoneNumberUtil::IsNumberMatchingDesc(
const std::string& national_number, const PhoneNumberDesc& number_desc) const {
int actual_length = static_cast<int>(national_number.length());
if (number_desc.possible_length_size() > 0 &&
std::find(number_desc.possible_length().begin(),
number_desc.possible_length().end(),
actual_length) == number_desc.possible_length().end()) {
return false;
}
return IsMatch(*matcher_api_, national_number, number_desc);
}
PhoneNumberUtil::PhoneNumberType PhoneNumberUtil::GetNumberTypeHelper(
const std::string& national_number, const PhoneMetadata& metadata) const {
if (!IsNumberMatchingDesc(national_number, metadata.general_desc())) {
VLOG(4) << "Number type unknown - doesn't match general national number"
<< " pattern.";
return PhoneNumberUtil::UNKNOWN;
}
if (IsNumberMatchingDesc(national_number, metadata.premium_rate())) {
VLOG(4) << "Number is a premium number.";
return PhoneNumberUtil::PREMIUM_RATE;
}
if (IsNumberMatchingDesc(national_number, metadata.toll_free())) {
VLOG(4) << "Number is a toll-free number.";
return PhoneNumberUtil::TOLL_FREE;
}
if (IsNumberMatchingDesc(national_number, metadata.shared_cost())) {
VLOG(4) << "Number is a shared cost number.";
return PhoneNumberUtil::SHARED_COST;
}
if (IsNumberMatchingDesc(national_number, metadata.voip())) {
VLOG(4) << "Number is a VOIP (Voice over IP) number.";
return PhoneNumberUtil::VOIP;
}
if (IsNumberMatchingDesc(national_number, metadata.personal_number())) {
VLOG(4) << "Number is a personal number.";
return PhoneNumberUtil::PERSONAL_NUMBER;
}
if (IsNumberMatchingDesc(national_number, metadata.pager())) {
VLOG(4) << "Number is a pager number.";
return PhoneNumberUtil::PAGER;
}
if (IsNumberMatchingDesc(national_number, metadata.uan())) {
VLOG(4) << "Number is a UAN.";
return PhoneNumberUtil::UAN;
}
if (IsNumberMatchingDesc(national_number, metadata.voicemail())) {
VLOG(4) << "Number is a voicemail number.";
return PhoneNumberUtil::VOICEMAIL;
}
bool is_fixed_line =
IsNumberMatchingDesc(national_number, metadata.fixed_line());
if (is_fixed_line) {
if (metadata.same_mobile_and_fixed_line_pattern()) {
VLOG(4) << "Fixed-line and mobile patterns equal, number is fixed-line"
<< " or mobile";
return PhoneNumberUtil::FIXED_LINE_OR_MOBILE;
} else if (IsNumberMatchingDesc(national_number, metadata.mobile())) {
VLOG(4) << "Fixed-line and mobile patterns differ, but number is "
<< "still fixed-line or mobile";
return PhoneNumberUtil::FIXED_LINE_OR_MOBILE;
}
VLOG(4) << "Number is a fixed line number.";
return PhoneNumberUtil::FIXED_LINE;
}
if (!metadata.same_mobile_and_fixed_line_pattern() &&
IsNumberMatchingDesc(national_number, metadata.mobile())) {
VLOG(4) << "Number is a mobile number.";
return PhoneNumberUtil::MOBILE;
}
VLOG(4) << "Number type unknown - doesn\'t match any specific number type"
<< " pattern.";
return PhoneNumberUtil::UNKNOWN;
}
void PhoneNumberUtil::GetNationalSignificantNumber(
const PhoneNumber& number, std::string* national_number) const {
DCHECK(national_number);
StrAppend(national_number, number.italian_leading_zero() ?
std::string(std::max(number.number_of_leading_zeros(), 0), '0') : "");
StrAppend(national_number, number.national_number());
}
int PhoneNumberUtil::GetLengthOfGeographicalAreaCode(
const PhoneNumber& number) const {
std::string region_code;
GetRegionCodeForNumber(number, ®ion_code);
const PhoneMetadata* metadata = GetMetadataForRegion(region_code);
if (!metadata) {
return 0;
}
PhoneNumberType type = GetNumberType(number);
int country_calling_code = number.country_code();
if (!metadata->has_national_prefix() && !number.italian_leading_zero() &&
reg_exps_->countries_without_national_prefix_with_area_codes_.find(
country_calling_code) ==
reg_exps_->countries_without_national_prefix_with_area_codes_.end()) {
return 0;
}
if (type == PhoneNumberUtil::MOBILE &&
reg_exps_->geo_mobile_countries_without_mobile_area_codes_.find(
country_calling_code) !=
reg_exps_->geo_mobile_countries_without_mobile_area_codes_.end()) {
return 0;
}
if (!IsNumberGeographical(type, country_calling_code)) {
return 0;
}
return GetLengthOfNationalDestinationCode(number);
}
int PhoneNumberUtil::GetLengthOfNationalDestinationCode(
const PhoneNumber& number) const {
PhoneNumber copied_proto(number);
if (number.has_extension()) {
copied_proto.clear_extension();
}
std::string formatted_number;
Format(copied_proto, INTERNATIONAL, &formatted_number);
const scoped_ptr<RegExpInput> i18n_number(
reg_exps_->regexp_factory_->CreateInput(formatted_number));
std::string digit_group;
std::string ndc;
std::string third_group;
for (int i = 0; i < 3; ++i) {
if (!reg_exps_->capturing_ascii_digits_pattern_->FindAndConsume(
i18n_number.get(), &digit_group)) {
return 0;
}
if (i == 1) {
ndc = digit_group;
} else if (i == 2) {
third_group = digit_group;
}
}
if (GetNumberType(number) == MOBILE) {
std::string mobile_token;
GetCountryMobileToken(number.country_code(), &mobile_token);
if (!mobile_token.empty()) {
return static_cast<int>(third_group.size() + mobile_token.size());
}
}
return static_cast<int>(ndc.size());
}
void PhoneNumberUtil::GetCountryMobileToken(int country_calling_code,
std::string* mobile_token) const {
DCHECK(mobile_token);
std::map<int, char>::iterator it = reg_exps_->mobile_token_mappings_.find(
country_calling_code);
if (it != reg_exps_->mobile_token_mappings_.end()) {
*mobile_token = it->second;
} else {
mobile_token->assign("");
}
}
void PhoneNumberUtil::NormalizeDigitsOnly(std::string* number) const {
DCHECK(number);
const RegExp& non_digits_pattern = reg_exps_->regexp_cache_->GetRegExp(
StrCat("[^", kDigits, "]"));
non_digits_pattern.GlobalReplace(number, "");
number->assign(NormalizeUTF8::NormalizeDecimalDigits(*number));
}
void PhoneNumberUtil::NormalizeDiallableCharsOnly(std::string* number) const {
DCHECK(number);
NormalizeHelper(reg_exps_->diallable_char_mappings_,
true , number);
}
bool PhoneNumberUtil::IsAlphaNumber(const std::string& number) const {
if (!IsViablePhoneNumber(number)) {
return false;
}
std::string number_copy(number);
std::string extension;
MaybeStripExtension(&number_copy, &extension);
return reg_exps_->valid_alpha_phone_pattern_->FullMatch(number_copy);
}
void PhoneNumberUtil::ConvertAlphaCharactersInNumber(
std::string* number) const {
DCHECK(number);
NormalizeHelper(reg_exps_->alpha_phone_mappings_, false, number);
}
void PhoneNumberUtil::Normalize(std::string* number) const {
DCHECK(number);
if (reg_exps_->valid_alpha_phone_pattern_->PartialMatch(*number)) {
NormalizeHelper(reg_exps_->alpha_phone_mappings_, true, number);
}
NormalizeDigitsOnly(number);
}
bool PhoneNumberUtil::IsViablePhoneNumber(const std::string& number) const {
if (number.length() < kMinLengthForNsn) {
return false;
}
return reg_exps_->valid_phone_number_pattern_->FullMatch(number);
}
bool PhoneNumberUtil::ParsePrefixAsIdd(const RegExp& idd_pattern,
std::string* number) const {
DCHECK(number);
const scoped_ptr<RegExpInput> number_copy(
reg_exps_->regexp_factory_->CreateInput(*number));
if (idd_pattern.Consume(number_copy.get())) {
std::string extracted_digit;
if (reg_exps_->capturing_digit_pattern_->PartialMatch(
number_copy->ToString(), &extracted_digit)) {
NormalizeDigitsOnly(&extracted_digit);
if (extracted_digit == "0") {
return false;
}
}
number->assign(number_copy->ToString());
return true;
}
return false;
}
PhoneNumber::CountryCodeSource
PhoneNumberUtil::MaybeStripInternationalPrefixAndNormalize(
const std::string& possible_idd_prefix, std::string* number) const {
DCHECK(number);
if (number->empty()) {
return PhoneNumber::FROM_DEFAULT_COUNTRY;
}
const scoped_ptr<RegExpInput> number_string_piece(
reg_exps_->regexp_factory_->CreateInput(*number));
if (reg_exps_->plus_chars_pattern_->Consume(number_string_piece.get())) {
number->assign(number_string_piece->ToString());
Normalize(number);
return PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN;
}
const RegExp& idd_pattern =
reg_exps_->regexp_cache_->GetRegExp(possible_idd_prefix);
Normalize(number);
return ParsePrefixAsIdd(idd_pattern, number)
? PhoneNumber::FROM_NUMBER_WITH_IDD
: PhoneNumber::FROM_DEFAULT_COUNTRY;
}
bool PhoneNumberUtil::MaybeStripNationalPrefixAndCarrierCode(
const PhoneMetadata& metadata, std::string* number,
std::string* carrier_code) const {
DCHECK(number);
std::string carrier_code_temp;
const std::string& possible_national_prefix =
metadata.national_prefix_for_parsing();
if (number->empty() || possible_national_prefix.empty()) {
return false;
}
const scoped_ptr<RegExpInput> number_copy(
reg_exps_->regexp_factory_->CreateInput(*number));
const scoped_ptr<RegExpInput> number_copy_without_transform(
reg_exps_->regexp_factory_->CreateInput(*number));
std::string number_string_copy(*number);
std::string captured_part_of_prefix;
const PhoneNumberDesc& general_desc = metadata.general_desc();
bool is_viable_original_number =
IsMatch(*matcher_api_, *number, general_desc);
const std::string& transform_rule = metadata.national_prefix_transform_rule();
const RegExp& possible_national_prefix_pattern =
reg_exps_->regexp_cache_->GetRegExp(possible_national_prefix);
if (!transform_rule.empty() &&
(possible_national_prefix_pattern.Consume(
number_copy.get(), &carrier_code_temp, &captured_part_of_prefix) ||
possible_national_prefix_pattern.Consume(
number_copy.get(), &captured_part_of_prefix)) &&
!captured_part_of_prefix.empty()) {
possible_national_prefix_pattern.Replace(&number_string_copy,
transform_rule);
if (is_viable_original_number &&
!IsMatch(*matcher_api_, number_string_copy, general_desc)) {
return false;
}
number->assign(number_string_copy);
if (carrier_code) {
carrier_code->assign(carrier_code_temp);
}
} else if (possible_national_prefix_pattern.Consume(
number_copy_without_transform.get(), &carrier_code_temp) ||
possible_national_prefix_pattern.Consume(
number_copy_without_transform.get())) {
VLOG(4) << "Parsed the first digits as a national prefix.";
const std::string number_copy_as_string =
number_copy_without_transform->ToString();
if (is_viable_original_number &&
!IsMatch(*matcher_api_, number_copy_as_string, general_desc)) {
return false;
}
number->assign(number_copy_as_string);
if (carrier_code) {
carrier_code->assign(carrier_code_temp);
}
} else {
return false;
VLOG(4) << "The first digits did not match the national prefix.";
}
return true;
}
bool PhoneNumberUtil::MaybeStripExtension(std::string* number,
std::string* extension) const {
DCHECK(number);
DCHECK(extension);
std::string possible_extension_one;
std::string possible_extension_two;
std::string possible_extension_three;
std::string possible_extension_four;
std::string possible_extension_five;
std::string possible_extension_six;
std::string number_copy(*number);
const scoped_ptr<RegExpInput> number_copy_as_regexp_input(
reg_exps_->regexp_factory_->CreateInput(number_copy));
if (reg_exps_->extn_pattern_->Consume(
number_copy_as_regexp_input.get(), false, &possible_extension_one,
&possible_extension_two, &possible_extension_three,
&possible_extension_four, &possible_extension_five,
&possible_extension_six)) {
reg_exps_->extn_pattern_->Replace(&number_copy, "");
if ((!possible_extension_one.empty() || !possible_extension_two.empty() ||
!possible_extension_three.empty() ||
!possible_extension_four.empty() || !possible_extension_five.empty() ||
!possible_extension_six.empty()) &&
IsViablePhoneNumber(number_copy)) {
number->assign(number_copy);
if (!possible_extension_one.empty()) {
extension->assign(possible_extension_one);
} else if (!possible_extension_two.empty()) {
extension->assign(possible_extension_two);
} else if (!possible_extension_three.empty()) {
extension->assign(possible_extension_three);
} else if (!possible_extension_four.empty()) {
extension->assign(possible_extension_four);
} else if (!possible_extension_five.empty()) {
extension->assign(possible_extension_five);
} else if (!possible_extension_six.empty()) {
extension->assign(possible_extension_six);
}
return true;
}
}
return false;
}
int PhoneNumberUtil::ExtractCountryCode(std::string* national_number) const {
int potential_country_code;
if (national_number->empty() || (national_number->at(0) == '0')) {
return 0;
}
for (size_t i = 1; i <= kMaxLengthCountryCode; ++i) {
safe_strto32(national_number->substr(0, i), &potential_country_code);
std::string region_code;
GetRegionCodeForCountryCode(potential_country_code, ®ion_code);
if (region_code != RegionCode::GetUnknown()) {
national_number->erase(0, i);
return potential_country_code;
}
}
return 0;
}
PhoneNumberUtil::ErrorType PhoneNumberUtil::MaybeExtractCountryCode(
const PhoneMetadata* default_region_metadata,
bool keep_raw_input,
std::string* national_number,
PhoneNumber* phone_number) const {
DCHECK(national_number);
DCHECK(phone_number);
std::string possible_country_idd_prefix = default_region_metadata
? default_region_metadata->international_prefix()
: "NonMatch";
PhoneNumber::CountryCodeSource country_code_source =
MaybeStripInternationalPrefixAndNormalize(possible_country_idd_prefix,
national_number);
if (keep_raw_input) {
phone_number->set_country_code_source(country_code_source);
}
if (country_code_source != PhoneNumber::FROM_DEFAULT_COUNTRY) {
if (national_number->length() <= kMinLengthForNsn) {
VLOG(2) << "Phone number had an IDD, but after this was not "
<< "long enough to be a viable phone number.";
return TOO_SHORT_AFTER_IDD;
}
int potential_country_code = ExtractCountryCode(national_number);
if (potential_country_code != 0) {
phone_number->set_country_code(potential_country_code);
return NO_PARSING_ERROR;
}
return INVALID_COUNTRY_CODE_ERROR;
} else if (default_region_metadata) {
int default_country_code = default_region_metadata->country_code();
std::string default_country_code_string(SimpleItoa(default_country_code));
VLOG(4) << "Possible country calling code: " << default_country_code_string;
std::string potential_national_number;
if (TryStripPrefixString(*national_number,
default_country_code_string,
&potential_national_number)) {
const PhoneNumberDesc& general_num_desc =
default_region_metadata->general_desc();
MaybeStripNationalPrefixAndCarrierCode(*default_region_metadata,
&potential_national_number,
NULL);
VLOG(4) << "Number without country calling code prefix";
if ((!IsMatch(*matcher_api_, *national_number, general_num_desc) &&
IsMatch(
*matcher_api_, potential_national_number, general_num_desc)) ||
TestNumberLength(*national_number, *default_region_metadata) ==
TOO_LONG) {
national_number->assign(potential_national_number);
if (keep_raw_input) {
phone_number->set_country_code_source(
PhoneNumber::FROM_NUMBER_WITHOUT_PLUS_SIGN);
}
phone_number->set_country_code(default_country_code);
return NO_PARSING_ERROR;
}
}
}
phone_number->set_country_code(0);
return NO_PARSING_ERROR;
}
PhoneNumberUtil::MatchType PhoneNumberUtil::IsNumberMatch(
const PhoneNumber& first_number_in,
const PhoneNumber& second_number_in) const {
PhoneNumber first_number;
CopyCoreFieldsOnly(first_number_in, &first_number);
PhoneNumber second_number;
CopyCoreFieldsOnly(second_number_in, &second_number);
if (first_number.has_extension() && second_number.has_extension() &&
first_number.extension() != second_number.extension()) {
return NO_MATCH;
}
int first_number_country_code = first_number.country_code();
int second_number_country_code = second_number.country_code();
if (first_number_country_code != 0 && second_number_country_code != 0) {
if (ExactlySameAs(first_number, second_number)) {
return EXACT_MATCH;
} else if (first_number_country_code == second_number_country_code &&
IsNationalNumberSuffixOfTheOther(first_number, second_number)) {
return SHORT_NSN_MATCH;
}
return NO_MATCH;
}
first_number.set_country_code(second_number_country_code);
if (ExactlySameAs(first_number, second_number)) {
return NSN_MATCH;
}
if (IsNationalNumberSuffixOfTheOther(first_number, second_number)) {
return SHORT_NSN_MATCH;
}
return NO_MATCH;
}
PhoneNumberUtil::MatchType PhoneNumberUtil::IsNumberMatchWithTwoStrings(
absl::string_view first_number, absl::string_view second_number) const {
PhoneNumber first_number_as_proto;
ErrorType error_type =
Parse(first_number, RegionCode::GetUnknown(), &first_number_as_proto);
if (error_type == NO_PARSING_ERROR) {
return IsNumberMatchWithOneString(first_number_as_proto, second_number);
}
if (error_type == INVALID_COUNTRY_CODE_ERROR) {
PhoneNumber second_number_as_proto;
ErrorType error_type = Parse(second_number, RegionCode::GetUnknown(),
&second_number_as_proto);
if (error_type == NO_PARSING_ERROR) {
return IsNumberMatchWithOneString(second_number_as_proto, first_number);
}
if (error_type == INVALID_COUNTRY_CODE_ERROR) {
error_type = ParseHelper(first_number, RegionCode::GetUnknown(), false,
false, &first_number_as_proto);
if (error_type == NO_PARSING_ERROR) {
error_type = ParseHelper(second_number, RegionCode::GetUnknown(), false,
false, &second_number_as_proto);
if (error_type == NO_PARSING_ERROR) {
return IsNumberMatch(first_number_as_proto, second_number_as_proto);
}
}
}
}
return INVALID_NUMBER;
}
PhoneNumberUtil::MatchType PhoneNumberUtil::IsNumberMatchWithOneString(
const PhoneNumber& first_number, absl::string_view second_number) const {
PhoneNumber second_number_as_proto;
ErrorType error_type =
Parse(second_number, RegionCode::GetUnknown(), &second_number_as_proto);
if (error_type == NO_PARSING_ERROR) {
return IsNumberMatch(first_number, second_number_as_proto);
}
if (error_type == INVALID_COUNTRY_CODE_ERROR) {
std::string first_number_region;
GetRegionCodeForCountryCode(first_number.country_code(),
&first_number_region);
if (first_number_region != RegionCode::GetUnknown()) {
PhoneNumber second_number_with_first_number_region;
Parse(second_number, first_number_region,
&second_number_with_first_number_region);
MatchType match = IsNumberMatch(first_number,
second_number_with_first_number_region);
if (match == EXACT_MATCH) {
return NSN_MATCH;
}
return match;
} else {
error_type = ParseHelper(second_number, RegionCode::GetUnknown(), false,
false, &second_number_as_proto);
if (error_type == NO_PARSING_ERROR) {
return IsNumberMatch(first_number, second_number_as_proto);
}
}
}
return INVALID_NUMBER;
}
AsYouTypeFormatter* PhoneNumberUtil::GetAsYouTypeFormatter(
const std::string& region_code) const {
return new AsYouTypeFormatter(region_code);
}
bool PhoneNumberUtil::CanBeInternationallyDialled(
const PhoneNumber& number) const {
std::string region_code;
GetRegionCodeForNumber(number, ®ion_code);
const PhoneMetadata* metadata = GetMetadataForRegion(region_code);
if (!metadata) {
return true;
}
std::string national_significant_number;
GetNationalSignificantNumber(number, &national_significant_number);
return !IsNumberMatchingDesc(
national_significant_number, metadata->no_international_dialling());
}
}
} | #include "phonenumbers/phonenumberutil.h"
#include <algorithm>
#include <iostream>
#include <list>
#include <set>
#include <string>
#include <gtest/gtest.h>
#include <unicode/uchar.h>
#include "phonenumbers/default_logger.h"
#include "phonenumbers/normalize_utf8.h"
#include "phonenumbers/phonemetadata.pb.h"
#include "phonenumbers/phonenumber.h"
#include "phonenumbers/phonenumber.pb.h"
#include "phonenumbers/test_util.h"
namespace i18n {
namespace phonenumbers {
using std::find;
using std::ostream;
using google::protobuf::RepeatedPtrField;
static const int kInvalidCountryCode = 2;
class PhoneNumberUtilTest : public testing::Test {
public:
PhoneNumberUtilTest(const PhoneNumberUtilTest&) = delete;
PhoneNumberUtilTest& operator=(const PhoneNumberUtilTest&) = delete;
protected:
PhoneNumberUtilTest() : phone_util_(*PhoneNumberUtil::GetInstance()) {
PhoneNumberUtil::GetInstance()->SetLogger(new StdoutLogger());
}
const PhoneMetadata* GetPhoneMetadata(const string& region_code) const {
return phone_util_.GetMetadataForRegion(region_code);
}
const PhoneMetadata* GetMetadataForNonGeographicalRegion(
int country_code) const {
return phone_util_.GetMetadataForNonGeographicalRegion(country_code);
}
void ExtractPossibleNumber(const string& number,
string* extracted_number) const {
phone_util_.ExtractPossibleNumber(number, extracted_number);
}
bool IsViablePhoneNumber(const string& number) const {
return phone_util_.IsViablePhoneNumber(number);
}
void Normalize(string* number) const {
phone_util_.Normalize(number);
}
PhoneNumber::CountryCodeSource MaybeStripInternationalPrefixAndNormalize(
const string& possible_idd_prefix,
string* number) const {
return phone_util_.MaybeStripInternationalPrefixAndNormalize(
possible_idd_prefix,
number);
}
void MaybeStripNationalPrefixAndCarrierCode(const PhoneMetadata& metadata,
string* number,
string* carrier_code) const {
phone_util_.MaybeStripNationalPrefixAndCarrierCode(metadata, number,
carrier_code);
}
bool MaybeStripExtension(string* number, string* extension) const {
return phone_util_.MaybeStripExtension(number, extension);
}
PhoneNumberUtil::ErrorType MaybeExtractCountryCode(
const PhoneMetadata* default_region_metadata,
bool keep_raw_input,
string* national_number,
PhoneNumber* phone_number) const {
return phone_util_.MaybeExtractCountryCode(default_region_metadata,
keep_raw_input,
national_number,
phone_number);
}
bool ContainsOnlyValidDigits(const string& s) const {
return phone_util_.ContainsOnlyValidDigits(s);
}
void AssertThrowsForInvalidPhoneContext(const string number_to_parse) {
PhoneNumber actual_number;
EXPECT_EQ(
PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse(number_to_parse, RegionCode::ZZ(), &actual_number));
}
const PhoneNumberUtil& phone_util_;
};
TEST_F(PhoneNumberUtilTest, ContainsOnlyValidDigits) {
EXPECT_TRUE(ContainsOnlyValidDigits(""));
EXPECT_TRUE(ContainsOnlyValidDigits("2"));
EXPECT_TRUE(ContainsOnlyValidDigits("25"));
EXPECT_TRUE(ContainsOnlyValidDigits("\xEF\xBC\x96" ));
EXPECT_FALSE(ContainsOnlyValidDigits("a"));
EXPECT_FALSE(ContainsOnlyValidDigits("2a"));
}
TEST_F(PhoneNumberUtilTest, InterchangeInvalidCodepoints) {
PhoneNumber phone_number;
std::vector<string> valid_inputs = {
"+44" "\xE2\x80\x93" "2087654321",
};
for (auto input : valid_inputs) {
EXPECT_EQ(input, NormalizeUTF8::NormalizeDecimalDigits(input));
EXPECT_TRUE(IsViablePhoneNumber(input));
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(input, RegionCode::GB(), &phone_number));
}
std::vector<string> invalid_inputs = {
"+44" "\x96" "2087654321",
"+44" "\xC2\x96" "2087654321",
"+44" "\xEF\xBF\xBE" "2087654321",
};
for (auto input : invalid_inputs) {
EXPECT_TRUE(NormalizeUTF8::NormalizeDecimalDigits(input).empty());
EXPECT_FALSE(IsViablePhoneNumber(input));
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse(input, RegionCode::GB(), &phone_number));
}
}
TEST_F(PhoneNumberUtilTest, GetSupportedRegions) {
std::set<string> regions;
phone_util_.GetSupportedRegions(®ions);
EXPECT_GT(regions.size(), 0U);
}
TEST_F(PhoneNumberUtilTest, GetSupportedGlobalNetworkCallingCodes) {
std::set<int> calling_codes;
phone_util_.GetSupportedGlobalNetworkCallingCodes(&calling_codes);
EXPECT_GT(calling_codes.size(), 0U);
for (std::set<int>::const_iterator it = calling_codes.begin();
it != calling_codes.end(); ++it) {
EXPECT_GT(*it, 0);
string region_code;
phone_util_.GetRegionCodeForCountryCode(*it, ®ion_code);
EXPECT_EQ(RegionCode::UN001(), region_code);
}
}
TEST_F(PhoneNumberUtilTest, GetSupportedCallingCodes) {
std::set<int> calling_codes;
phone_util_.GetSupportedCallingCodes(&calling_codes);
EXPECT_GT(calling_codes.size(), 0U);
for (std::set<int>::const_iterator it = calling_codes.begin();
it != calling_codes.end(); ++it) {
EXPECT_GT(*it, 0);
string region_code;
phone_util_.GetRegionCodeForCountryCode(*it, ®ion_code);
EXPECT_NE(RegionCode::ZZ(), region_code);
}
std::set<int> supported_global_network_calling_codes;
phone_util_.GetSupportedGlobalNetworkCallingCodes(
&supported_global_network_calling_codes);
EXPECT_GT(calling_codes.size(),
supported_global_network_calling_codes.size());
EXPECT_NE(calling_codes.find(979), calling_codes.end());
}
TEST_F(PhoneNumberUtilTest, GetSupportedTypesForRegion) {
std::set<PhoneNumberUtil::PhoneNumberType> types;
phone_util_.GetSupportedTypesForRegion(RegionCode::BR(), &types);
EXPECT_NE(types.find(PhoneNumberUtil::FIXED_LINE), types.end());
EXPECT_EQ(types.find(PhoneNumberUtil::MOBILE), types.end());
EXPECT_EQ(types.find(PhoneNumberUtil::UNKNOWN), types.end());
types.clear();
phone_util_.GetSupportedTypesForRegion(RegionCode::US(), &types);
EXPECT_NE(types.find(PhoneNumberUtil::FIXED_LINE), types.end());
EXPECT_NE(types.find(PhoneNumberUtil::MOBILE), types.end());
EXPECT_EQ(types.find(PhoneNumberUtil::FIXED_LINE_OR_MOBILE), types.end());
types.clear();
phone_util_.GetSupportedTypesForRegion(RegionCode::ZZ(), &types);
EXPECT_EQ(0u, types.size());
}
TEST_F(PhoneNumberUtilTest, GetSupportedTypesForNonGeoEntity) {
std::set<PhoneNumberUtil::PhoneNumberType> types;
phone_util_.GetSupportedTypesForNonGeoEntity(999, &types);
EXPECT_EQ(0u, types.size());
types.clear();
phone_util_.GetSupportedTypesForNonGeoEntity(979, &types);
EXPECT_NE(types.find(PhoneNumberUtil::PREMIUM_RATE), types.end());
EXPECT_EQ(types.find(PhoneNumberUtil::MOBILE), types.end());
EXPECT_EQ(types.find(PhoneNumberUtil::UNKNOWN), types.end());
}
TEST_F(PhoneNumberUtilTest, GetRegionCodesForCountryCallingCode) {
std::list<string> regions;
phone_util_.GetRegionCodesForCountryCallingCode(1, ®ions);
EXPECT_TRUE(find(regions.begin(), regions.end(), RegionCode::US())
!= regions.end());
EXPECT_TRUE(find(regions.begin(), regions.end(), RegionCode::BS())
!= regions.end());
regions.clear();
phone_util_.GetRegionCodesForCountryCallingCode(44, ®ions);
EXPECT_TRUE(find(regions.begin(), regions.end(), RegionCode::GB())
!= regions.end());
regions.clear();
phone_util_.GetRegionCodesForCountryCallingCode(49, ®ions);
EXPECT_TRUE(find(regions.begin(), regions.end(), RegionCode::DE())
!= regions.end());
regions.clear();
phone_util_.GetRegionCodesForCountryCallingCode(800, ®ions);
EXPECT_TRUE(find(regions.begin(), regions.end(), RegionCode::UN001())
!= regions.end());
regions.clear();
phone_util_.GetRegionCodesForCountryCallingCode(
kInvalidCountryCode, ®ions);
EXPECT_TRUE(regions.empty());
}
TEST_F(PhoneNumberUtilTest, GetInstanceLoadUSMetadata) {
const PhoneMetadata* metadata = GetPhoneMetadata(RegionCode::US());
EXPECT_EQ("US", metadata->id());
EXPECT_EQ(1, metadata->country_code());
EXPECT_EQ("011", metadata->international_prefix());
EXPECT_TRUE(metadata->has_national_prefix());
ASSERT_EQ(2, metadata->number_format_size());
EXPECT_EQ("(\\d{3})(\\d{3})(\\d{4})",
metadata->number_format(1).pattern());
EXPECT_EQ("$1 $2 $3", metadata->number_format(1).format());
EXPECT_EQ("[13-689]\\d{9}|2[0-35-9]\\d{8}",
metadata->general_desc().national_number_pattern());
EXPECT_EQ("[13-689]\\d{9}|2[0-35-9]\\d{8}",
metadata->fixed_line().national_number_pattern());
EXPECT_EQ(1, metadata->general_desc().possible_length_size());
EXPECT_EQ(10, metadata->general_desc().possible_length(0));
EXPECT_EQ(0, metadata->toll_free().possible_length_size());
EXPECT_EQ("900\\d{7}", metadata->premium_rate().national_number_pattern());
EXPECT_FALSE(metadata->shared_cost().has_national_number_pattern());
}
TEST_F(PhoneNumberUtilTest, GetInstanceLoadDEMetadata) {
const PhoneMetadata* metadata = GetPhoneMetadata(RegionCode::DE());
EXPECT_EQ("DE", metadata->id());
EXPECT_EQ(49, metadata->country_code());
EXPECT_EQ("00", metadata->international_prefix());
EXPECT_EQ("0", metadata->national_prefix());
ASSERT_EQ(6, metadata->number_format_size());
EXPECT_EQ(1, metadata->number_format(5).leading_digits_pattern_size());
EXPECT_EQ("900", metadata->number_format(5).leading_digits_pattern(0));
EXPECT_EQ("(\\d{3})(\\d{3,4})(\\d{4})",
metadata->number_format(5).pattern());
EXPECT_EQ(2, metadata->general_desc().possible_length_local_only_size());
EXPECT_EQ(8, metadata->general_desc().possible_length_size());
EXPECT_EQ(0, metadata->fixed_line().possible_length_size());
EXPECT_EQ(2, metadata->mobile().possible_length_size());
EXPECT_EQ("$1 $2 $3", metadata->number_format(5).format());
EXPECT_EQ("(?:[24-6]\\d{2}|3[03-9]\\d|[789](?:0[2-9]|[1-9]\\d))\\d{1,8}",
metadata->fixed_line().national_number_pattern());
EXPECT_EQ("30123456", metadata->fixed_line().example_number());
EXPECT_EQ(10, metadata->toll_free().possible_length(0));
EXPECT_EQ("900([135]\\d{6}|9\\d{7})",
metadata->premium_rate().national_number_pattern());
}
TEST_F(PhoneNumberUtilTest, GetInstanceLoadARMetadata) {
const PhoneMetadata* metadata = GetPhoneMetadata(RegionCode::AR());
EXPECT_EQ("AR", metadata->id());
EXPECT_EQ(54, metadata->country_code());
EXPECT_EQ("00", metadata->international_prefix());
EXPECT_EQ("0", metadata->national_prefix());
EXPECT_EQ("0(?:(11|343|3715)15)?", metadata->national_prefix_for_parsing());
EXPECT_EQ("9$1", metadata->national_prefix_transform_rule());
ASSERT_EQ(5, metadata->number_format_size());
EXPECT_EQ("$2 15 $3-$4", metadata->number_format(2).format());
EXPECT_EQ("(\\d)(\\d{4})(\\d{2})(\\d{4})",
metadata->number_format(3).pattern());
EXPECT_EQ("(\\d)(\\d{4})(\\d{2})(\\d{4})",
metadata->intl_number_format(3).pattern());
EXPECT_EQ("$1 $2 $3 $4", metadata->intl_number_format(3).format());
}
TEST_F(PhoneNumberUtilTest, GetInstanceLoadInternationalTollFreeMetadata) {
const PhoneMetadata* metadata = GetMetadataForNonGeographicalRegion(800);
EXPECT_FALSE(metadata == NULL);
EXPECT_EQ("001", metadata->id());
EXPECT_EQ(800, metadata->country_code());
EXPECT_EQ("$1 $2", metadata->number_format(0).format());
EXPECT_EQ("(\\d{4})(\\d{4})", metadata->number_format(0).pattern());
EXPECT_EQ(0, metadata->general_desc().possible_length_local_only_size());
EXPECT_EQ(1, metadata->general_desc().possible_length_size());
EXPECT_EQ("12345678", metadata->toll_free().example_number());
}
TEST_F(PhoneNumberUtilTest, GetNationalSignificantNumber) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{6502530000});
string national_significant_number;
phone_util_.GetNationalSignificantNumber(number,
&national_significant_number);
EXPECT_EQ("6502530000", national_significant_number);
national_significant_number.clear();
number.set_country_code(39);
number.set_national_number(uint64{312345678});
phone_util_.GetNationalSignificantNumber(number,
&national_significant_number);
EXPECT_EQ("312345678", national_significant_number);
national_significant_number.clear();
number.set_country_code(39);
number.set_national_number(uint64{236618300});
number.set_italian_leading_zero(true);
phone_util_.GetNationalSignificantNumber(number,
&national_significant_number);
EXPECT_EQ("0236618300", national_significant_number);
national_significant_number.clear();
number.Clear();
number.set_country_code(800);
number.set_national_number(uint64{12345678});
phone_util_.GetNationalSignificantNumber(number,
&national_significant_number);
EXPECT_EQ("12345678", national_significant_number);
}
TEST_F(PhoneNumberUtilTest, GetNationalSignificantNumber_ManyLeadingZeros) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{650});
number.set_italian_leading_zero(true);
number.set_number_of_leading_zeros(2);
string national_significant_number;
phone_util_.GetNationalSignificantNumber(number,
&national_significant_number);
EXPECT_EQ("00650", national_significant_number);
number.set_number_of_leading_zeros(-3);
national_significant_number.clear();
phone_util_.GetNationalSignificantNumber(number,
&national_significant_number);
EXPECT_EQ("650", national_significant_number);
}
TEST_F(PhoneNumberUtilTest, GetExampleNumber) {
PhoneNumber de_number;
de_number.set_country_code(49);
de_number.set_national_number(uint64{30123456});
PhoneNumber test_number;
bool success = phone_util_.GetExampleNumber(RegionCode::DE(), &test_number);
EXPECT_TRUE(success);
EXPECT_EQ(de_number, test_number);
success = phone_util_.GetExampleNumberForType(
RegionCode::DE(), PhoneNumberUtil::FIXED_LINE, &test_number);
EXPECT_TRUE(success);
EXPECT_EQ(de_number, test_number);
success = phone_util_.GetExampleNumberForType(
RegionCode::DE(), PhoneNumberUtil::FIXED_LINE_OR_MOBILE, &test_number);
EXPECT_EQ(de_number, test_number);
success = phone_util_.GetExampleNumberForType(
RegionCode::DE(), PhoneNumberUtil::MOBILE, &test_number);
success = phone_util_.GetExampleNumberForType(
RegionCode::US(), PhoneNumberUtil::VOICEMAIL, &test_number);
test_number.Clear();
EXPECT_FALSE(success);
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
success = phone_util_.GetExampleNumberForType(
RegionCode::US(), PhoneNumberUtil::FIXED_LINE, &test_number);
EXPECT_TRUE(success);
EXPECT_NE(PhoneNumber::default_instance(), test_number);
success = phone_util_.GetExampleNumberForType(
RegionCode::US(), PhoneNumberUtil::MOBILE, &test_number);
EXPECT_TRUE(success);
EXPECT_NE(PhoneNumber::default_instance(), test_number);
test_number.Clear();
EXPECT_FALSE(phone_util_.GetExampleNumberForType(
RegionCode::CS(), PhoneNumberUtil::MOBILE, &test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_FALSE(phone_util_.GetExampleNumber(RegionCode::UN001(), &test_number));
}
TEST_F(PhoneNumberUtilTest, GetInvalidExampleNumber) {
PhoneNumber test_number;
EXPECT_FALSE(phone_util_.GetInvalidExampleNumber(RegionCode::UN001(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_FALSE(phone_util_.GetInvalidExampleNumber(RegionCode::CS(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_TRUE(phone_util_.GetInvalidExampleNumber(RegionCode::US(),
&test_number));
EXPECT_EQ(1, test_number.country_code());
EXPECT_NE(0u, test_number.national_number());
}
TEST_F(PhoneNumberUtilTest, GetExampleNumberForNonGeoEntity) {
PhoneNumber toll_free_number;
toll_free_number.set_country_code(800);
toll_free_number.set_national_number(uint64{12345678});
PhoneNumber test_number;
bool success =
phone_util_.GetExampleNumberForNonGeoEntity(800 , &test_number);
EXPECT_TRUE(success);
EXPECT_EQ(toll_free_number, test_number);
PhoneNumber universal_premium_rate;
universal_premium_rate.set_country_code(979);
universal_premium_rate.set_national_number(uint64{123456789});
success = phone_util_.GetExampleNumberForNonGeoEntity(979 , &test_number);
EXPECT_TRUE(success);
EXPECT_EQ(universal_premium_rate, test_number);
}
TEST_F(PhoneNumberUtilTest, GetExampleNumberWithoutRegion) {
PhoneNumber test_number;
bool success = phone_util_.GetExampleNumberForType(
PhoneNumberUtil::FIXED_LINE,
&test_number);
EXPECT_TRUE(success);
EXPECT_NE(PhoneNumber::default_instance(), test_number);
test_number.Clear();
success = phone_util_.GetExampleNumberForType(PhoneNumberUtil::MOBILE,
&test_number);
EXPECT_TRUE(success);
EXPECT_NE(PhoneNumber::default_instance(), test_number);
test_number.Clear();
success = phone_util_.GetExampleNumberForType(PhoneNumberUtil::PREMIUM_RATE,
&test_number);
EXPECT_TRUE(success);
EXPECT_NE(PhoneNumber::default_instance(), test_number);
}
TEST_F(PhoneNumberUtilTest, FormatUSNumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(1);
test_number.set_national_number(uint64{6502530000});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("650 253 0000", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+1 650 253 0000", formatted_number);
test_number.set_national_number(uint64{8002530000});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("800 253 0000", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+1 800 253 0000", formatted_number);
test_number.set_national_number(uint64{9002530000});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("900 253 0000", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+1 900 253 0000", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::RFC3966, &formatted_number);
EXPECT_EQ("tel:+1-900-253-0000", formatted_number);
test_number.set_national_number(uint64{0});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("0", formatted_number);
test_number.set_raw_input("000-000-0000");
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("000-000-0000", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatBSNumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(1);
test_number.set_national_number(uint64{2421234567});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("242 123 4567", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+1 242 123 4567", formatted_number);
test_number.set_national_number(uint64{8002530000});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("800 253 0000", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+1 800 253 0000", formatted_number);
test_number.set_national_number(uint64{9002530000});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("900 253 0000", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+1 900 253 0000", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatGBNumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(44);
test_number.set_national_number(uint64{2087389353});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("(020) 8738 9353", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+44 20 8738 9353", formatted_number);
test_number.set_national_number(uint64{7912345678});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("(07912) 345 678", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+44 7912 345 678", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatDENumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(49);
test_number.set_national_number(uint64{301234});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("030/1234", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+49 30/1234", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::RFC3966, &formatted_number);
EXPECT_EQ("tel:+49-30-1234", formatted_number);
test_number.set_national_number(uint64{291123});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("0291 123", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+49 291 123", formatted_number);
test_number.set_national_number(uint64{29112345678});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("0291 12345678", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+49 291 12345678", formatted_number);
test_number.set_national_number(uint64{9123123});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("09123 123", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+49 9123 123", formatted_number);
test_number.set_national_number(uint64{80212345});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("08021 2345", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+49 8021 2345", formatted_number);
test_number.set_national_number(uint64{1234});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("1234", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+49 1234", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatITNumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(39);
test_number.set_national_number(uint64{236618300});
test_number.set_italian_leading_zero(true);
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("02 3661 8300", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+39 02 3661 8300", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+390236618300", formatted_number);
test_number.set_national_number(uint64{345678901});
test_number.set_italian_leading_zero(false);
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("345 678 901", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+39 345 678 901", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+39345678901", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatAUNumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(61);
test_number.set_national_number(uint64{236618300});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("02 3661 8300", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+61 2 3661 8300", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+61236618300", formatted_number);
test_number.set_national_number(uint64{1800123456});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("1800 123 456", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+61 1800 123 456", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+611800123456", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatARNumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(54);
test_number.set_national_number(uint64{1187654321});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("011 8765-4321", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+54 11 8765-4321", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+541187654321", formatted_number);
test_number.set_national_number(uint64{91187654321});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("011 15 8765-4321", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+54 9 11 8765 4321", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+5491187654321", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatMXNumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(52);
test_number.set_national_number(uint64{12345678900});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("045 234 567 8900", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+52 1 234 567 8900", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+5212345678900", formatted_number);
test_number.set_national_number(uint64{15512345678});
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("045 55 1234 5678", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+52 1 55 1234 5678", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+5215512345678", formatted_number);
test_number.set_national_number(3312345678LL);
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("01 33 1234 5678", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+52 33 1234 5678", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+523312345678", formatted_number);
test_number.set_national_number(8211234567LL);
phone_util_.Format(test_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("01 821 123 4567", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+52 821 123 4567", formatted_number);
phone_util_.Format(test_number, PhoneNumberUtil::E164,
&formatted_number);
EXPECT_EQ("+528211234567", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatOutOfCountryCallingNumber) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(1);
test_number.set_national_number(uint64{9002530000});
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::DE(),
&formatted_number);
EXPECT_EQ("00 1 900 253 0000", formatted_number);
test_number.set_national_number(uint64{6502530000});
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::BS(),
&formatted_number);
EXPECT_EQ("1 650 253 0000", formatted_number);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::PL(),
&formatted_number);
EXPECT_EQ("00 1 650 253 0000", formatted_number);
test_number.set_country_code(44);
test_number.set_national_number(uint64{7912345678});
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("011 44 7912 345 678", formatted_number);
test_number.set_country_code(49);
test_number.set_national_number(uint64{1234});
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::GB(),
&formatted_number);
EXPECT_EQ("00 49 1234", formatted_number);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::DE(),
&formatted_number);
EXPECT_EQ("1234", formatted_number);
test_number.set_country_code(39);
test_number.set_national_number(uint64{236618300});
test_number.set_italian_leading_zero(true);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("011 39 02 3661 8300", formatted_number);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::IT(),
&formatted_number);
EXPECT_EQ("02 3661 8300", formatted_number);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::SG(),
&formatted_number);
EXPECT_EQ("+39 02 3661 8300", formatted_number);
test_number.set_country_code(65);
test_number.set_national_number(uint64{94777892});
test_number.set_italian_leading_zero(false);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::SG(),
&formatted_number);
EXPECT_EQ("9477 7892", formatted_number);
test_number.set_country_code(800);
test_number.set_national_number(uint64{12345678});
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("011 800 1234 5678", formatted_number);
test_number.set_country_code(54);
test_number.set_national_number(uint64{91187654321});
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("011 54 9 11 8765 4321", formatted_number);
test_number.set_extension("1234");
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("011 54 9 11 8765 4321 ext. 1234", formatted_number);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::AU(),
&formatted_number);
EXPECT_EQ("0011 54 9 11 8765 4321 ext. 1234", formatted_number);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::AR(),
&formatted_number);
EXPECT_EQ("011 15 8765-4321 ext. 1234", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatOutOfCountryWithInvalidRegion) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(1);
test_number.set_national_number(uint64{6502530000});
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::AQ(),
&formatted_number);
EXPECT_EQ("+1 650 253 0000", formatted_number);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::UN001(),
&formatted_number);
EXPECT_EQ("+1 650 253 0000", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatOutOfCountryWithPreferredIntlPrefix) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(39);
test_number.set_national_number(uint64{236618300});
test_number.set_italian_leading_zero(true);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::AU(),
&formatted_number);
EXPECT_EQ("0011 39 02 3661 8300", formatted_number);
phone_util_.FormatOutOfCountryCallingNumber(test_number, RegionCode::UZ(),
&formatted_number);
EXPECT_EQ("8~10 39 02 3661 8300", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatOutOfCountryKeepingAlphaChars) {
PhoneNumber alpha_numeric_number;
string formatted_number;
alpha_numeric_number.set_country_code(1);
alpha_numeric_number.set_national_number(uint64{8007493524});
alpha_numeric_number.set_raw_input("1800 six-flag");
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AU(),
&formatted_number);
EXPECT_EQ("0011 1 800 SIX-FLAG", formatted_number);
formatted_number.clear();
alpha_numeric_number.set_raw_input("1-800-SIX-flag");
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AU(),
&formatted_number);
EXPECT_EQ("0011 1 800-SIX-FLAG", formatted_number);
formatted_number.clear();
alpha_numeric_number.set_raw_input("Call us from UK: 00 1 800 SIX-flag");
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AU(),
&formatted_number);
EXPECT_EQ("0011 1 800 SIX-FLAG", formatted_number);
formatted_number.clear();
alpha_numeric_number.set_raw_input("800 SIX-flag");
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AU(),
&formatted_number);
EXPECT_EQ("0011 1 800 SIX-FLAG", formatted_number);
formatted_number.clear();
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::US(),
&formatted_number);
EXPECT_EQ("1 800 SIX-FLAG", formatted_number);
formatted_number.clear();
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::BS(),
&formatted_number);
EXPECT_EQ("1 800 SIX-FLAG", formatted_number);
alpha_numeric_number.clear_raw_input();
formatted_number.clear();
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::DE(),
&formatted_number);
EXPECT_EQ("00 1 800 749 3524", formatted_number);
alpha_numeric_number.set_country_code(61);
alpha_numeric_number.set_national_number(uint64{827493524});
alpha_numeric_number.set_raw_input("+61 82749-FLAG");
formatted_number.clear();
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AU(),
&formatted_number);
EXPECT_EQ("082749-FLAG", formatted_number);
alpha_numeric_number.set_raw_input("082749-FLAG");
formatted_number.clear();
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AU(),
&formatted_number);
EXPECT_EQ("082749-FLAG", formatted_number);
alpha_numeric_number.set_national_number(uint64{18007493524});
alpha_numeric_number.set_raw_input("1-800-SIX-flag");
formatted_number.clear();
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AU(),
&formatted_number);
EXPECT_EQ("1-800-SIX-FLAG", formatted_number);
formatted_number.clear();
alpha_numeric_number.set_national_number(uint64{1800749352});
phone_util_.FormatOutOfCountryCallingNumber(alpha_numeric_number,
RegionCode::AU(),
&formatted_number);
EXPECT_EQ("1800 749 352", formatted_number);
formatted_number.clear();
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::SG(),
&formatted_number);
EXPECT_EQ("+61 1-800-SIX-FLAG", formatted_number);
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AQ(),
&formatted_number);
EXPECT_EQ("+61 1-800-SIX-FLAG", formatted_number);
formatted_number.clear();
alpha_numeric_number.set_country_code(0);
alpha_numeric_number.set_national_number(uint64{18007493524});
alpha_numeric_number.set_raw_input("1-800-SIX-flag");
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::DE(),
&formatted_number);
EXPECT_EQ("1-800-SIX-flag", formatted_number);
formatted_number.clear();
alpha_numeric_number.set_country_code(1);
alpha_numeric_number.set_national_number(uint64{80749});
alpha_numeric_number.set_raw_input("180-SIX");
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::DE(),
&formatted_number);
EXPECT_EQ("00 1 180-SIX", formatted_number);
phone_util_.FormatOutOfCountryKeepingAlphaChars(alpha_numeric_number,
RegionCode::AQ(),
&formatted_number);
EXPECT_EQ("+1 180-SIX", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatWithCarrierCode) {
PhoneNumber ar_number;
string formatted_number;
ar_number.set_country_code(54);
ar_number.set_national_number(uint64{91234125678});
phone_util_.Format(ar_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("01234 12-5678", formatted_number);
phone_util_.FormatNationalNumberWithCarrierCode(ar_number, "15",
&formatted_number);
EXPECT_EQ("01234 15 12-5678", formatted_number);
phone_util_.FormatNationalNumberWithCarrierCode(ar_number, "",
&formatted_number);
EXPECT_EQ("01234 12-5678", formatted_number);
phone_util_.Format(ar_number, PhoneNumberUtil::E164, &formatted_number);
EXPECT_EQ("+5491234125678", formatted_number);
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{4241231234});
phone_util_.Format(us_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("424 123 1234", formatted_number);
phone_util_.FormatNationalNumberWithCarrierCode(us_number, "15",
&formatted_number);
EXPECT_EQ("424 123 1234", formatted_number);
PhoneNumber invalid_number;
invalid_number.set_country_code(kInvalidCountryCode);
invalid_number.set_national_number(uint64{12345});
phone_util_.FormatNationalNumberWithCarrierCode(invalid_number, "89",
&formatted_number);
EXPECT_EQ("12345", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatWithPreferredCarrierCode) {
PhoneNumber ar_number;
string formatted_number;
ar_number.set_country_code(54);
ar_number.set_national_number(uint64{91234125678});
phone_util_.FormatNationalNumberWithPreferredCarrierCode(ar_number, "15",
&formatted_number);
EXPECT_EQ("01234 15 12-5678", formatted_number);
phone_util_.FormatNationalNumberWithPreferredCarrierCode(ar_number, "",
&formatted_number);
EXPECT_EQ("01234 12-5678", formatted_number);
ar_number.set_preferred_domestic_carrier_code("19");
phone_util_.Format(ar_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("01234 12-5678", formatted_number);
phone_util_.FormatNationalNumberWithPreferredCarrierCode(ar_number, "15",
&formatted_number);
EXPECT_EQ("01234 19 12-5678", formatted_number);
phone_util_.FormatNationalNumberWithPreferredCarrierCode(ar_number, "",
&formatted_number);
EXPECT_EQ("01234 19 12-5678", formatted_number);
ar_number.set_preferred_domestic_carrier_code(" ");
phone_util_.FormatNationalNumberWithPreferredCarrierCode(ar_number, "15",
&formatted_number);
EXPECT_EQ("01234 12-5678", formatted_number);
ar_number.set_preferred_domestic_carrier_code("");
phone_util_.FormatNationalNumberWithPreferredCarrierCode(ar_number, "15",
&formatted_number);
EXPECT_EQ("01234 15 12-5678", formatted_number);
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{4241231234});
us_number.set_preferred_domestic_carrier_code("99");
phone_util_.Format(us_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("424 123 1234", formatted_number);
phone_util_.FormatNationalNumberWithPreferredCarrierCode(us_number, "15",
&formatted_number);
EXPECT_EQ("424 123 1234", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatNumberForMobileDialing) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(57);
test_number.set_national_number(uint64{6012345678});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::CO(), false,
&formatted_number);
EXPECT_EQ("6012345678", formatted_number);
test_number.set_country_code(49);
test_number.set_national_number(uint64{30123456});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::DE(), false,
&formatted_number);
EXPECT_EQ("030123456", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::CH(), false,
&formatted_number);
EXPECT_EQ("+4930123456", formatted_number);
test_number.set_extension("1234");
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::DE(), false,
&formatted_number);
EXPECT_EQ("030123456", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::CH(), false,
&formatted_number);
EXPECT_EQ("+4930123456", formatted_number);
test_number.set_country_code(1);
test_number.clear_extension();
test_number.set_national_number(uint64{8002530000});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), true,
&formatted_number);
EXPECT_EQ("800 253 0000", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::CN(), true, &formatted_number);
EXPECT_EQ("", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false,
&formatted_number);
EXPECT_EQ("8002530000", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::CN(), false, &formatted_number);
EXPECT_EQ("", formatted_number);
test_number.set_national_number(uint64{6502530000});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), true, &formatted_number);
EXPECT_EQ("+1 650 253 0000", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false, &formatted_number);
EXPECT_EQ("+16502530000", formatted_number);
test_number.set_extension("1234");
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), true, &formatted_number);
EXPECT_EQ("+1 650 253 0000", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false, &formatted_number);
EXPECT_EQ("+16502530000", formatted_number);
test_number.set_national_number(uint64{65025300001});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), true, &formatted_number);
EXPECT_EQ("+1 65025300001", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false, &formatted_number);
EXPECT_EQ("+165025300001", formatted_number);
test_number.set_country_code(81);
test_number.set_national_number(uint64{2345});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::JP(), true, &formatted_number);
EXPECT_EQ("*2345", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::JP(), false, &formatted_number);
EXPECT_EQ("*2345", formatted_number);
test_number.set_country_code(800);
test_number.set_national_number(uint64{12345678});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::JP(), false, &formatted_number);
EXPECT_EQ("+80012345678", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::JP(), true, &formatted_number);
EXPECT_EQ("+800 1234 5678", formatted_number);
test_number.set_country_code(971);
test_number.set_national_number(uint64{600123456});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::JP(), false, &formatted_number);
EXPECT_EQ("+971600123456", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::AE(), true, &formatted_number);
EXPECT_EQ("600123456", formatted_number);
test_number.set_country_code(52);
test_number.set_national_number(uint64{3312345678});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::MX(), false, &formatted_number);
EXPECT_EQ("+523312345678", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false, &formatted_number);
EXPECT_EQ("+523312345678", formatted_number);
test_number.set_country_code(998);
test_number.set_national_number(uint64{612201234});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::UZ(), false, &formatted_number);
EXPECT_EQ("+998612201234", formatted_number);
test_number.set_country_code(998);
test_number.set_national_number(uint64{950123456});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::UZ(), false, &formatted_number);
EXPECT_EQ("+998950123456", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false, &formatted_number);
EXPECT_EQ("+998950123456", formatted_number);
test_number.set_country_code(800);
test_number.set_national_number(uint64{12345678});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false, &formatted_number);
EXPECT_EQ("+80012345678", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::UN001(), false, &formatted_number);
EXPECT_EQ("+80012345678", formatted_number);
test_number.set_country_code(49);
test_number.set_national_number(123L);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::DE(), false, &formatted_number);
EXPECT_EQ("123", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::IT(), false, &formatted_number);
EXPECT_EQ("", formatted_number);
test_number.set_country_code(1);
test_number.set_national_number(uint64{6502530000});
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false, &formatted_number);
EXPECT_EQ("+16502530000", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::CA(), false, &formatted_number);
EXPECT_EQ("+16502530000", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::BR(), false, &formatted_number);
EXPECT_EQ("+16502530000", formatted_number);
test_number.set_national_number(911L);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::US(), false, &formatted_number);
EXPECT_EQ("911", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::CA(), false, &formatted_number);
EXPECT_EQ("", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::BR(), false, &formatted_number);
EXPECT_EQ("", formatted_number);
test_number.set_country_code(61);
test_number.set_national_number(0L);
test_number.set_italian_leading_zero(true);
test_number.set_number_of_leading_zeros(2);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::AU(), false, &formatted_number);
EXPECT_EQ("000", formatted_number);
phone_util_.FormatNumberForMobileDialing(
test_number, RegionCode::NZ(), false, &formatted_number);
EXPECT_EQ("", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatByPattern) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(1);
test_number.set_national_number(uint64{6502530000});
RepeatedPtrField<NumberFormat> number_formats;
NumberFormat* number_format = number_formats.Add();
number_format->set_pattern("(\\d{3})(\\d{3})(\\d{4})");
number_format->set_format("($1) $2-$3");
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::NATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("(650) 253-0000", formatted_number);
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::INTERNATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("+1 (650) 253-0000", formatted_number);
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::RFC3966,
number_formats,
&formatted_number);
EXPECT_EQ("tel:+1-650-253-0000", formatted_number);
number_format->set_national_prefix_formatting_rule("$NP ($FG)");
number_format->set_format("$1 $2-$3");
test_number.set_country_code(1);
test_number.set_national_number(uint64{4168819999});
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::NATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("1 (416) 881-9999", formatted_number);
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::INTERNATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("+1 416 881-9999", formatted_number);
test_number.set_country_code(39);
test_number.set_national_number(uint64{236618300});
test_number.set_italian_leading_zero(true);
number_format->set_pattern("(\\d{2})(\\d{5})(\\d{3})");
number_format->set_format("$1-$2 $3");
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::NATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("02-36618 300", formatted_number);
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::INTERNATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("+39 02-36618 300", formatted_number);
test_number.set_country_code(44);
test_number.set_national_number(uint64{2012345678});
test_number.set_italian_leading_zero(false);
number_format->set_national_prefix_formatting_rule("$NP$FG");
number_format->set_pattern("(\\d{2})(\\d{4})(\\d{4})");
number_format->set_format("$1 $2 $3");
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::NATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("020 1234 5678", formatted_number);
number_format->set_national_prefix_formatting_rule("($NP$FG)");
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::NATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("(020) 1234 5678", formatted_number);
number_format->set_national_prefix_formatting_rule("");
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::NATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("20 1234 5678", formatted_number);
number_format->set_national_prefix_formatting_rule("");
phone_util_.FormatByPattern(test_number, PhoneNumberUtil::INTERNATIONAL,
number_formats,
&formatted_number);
EXPECT_EQ("+44 20 1234 5678", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatE164Number) {
PhoneNumber test_number;
string formatted_number;
test_number.set_country_code(1);
test_number.set_national_number(uint64{6502530000});
phone_util_.Format(test_number, PhoneNumberUtil::E164, &formatted_number);
EXPECT_EQ("+16502530000", formatted_number);
test_number.set_country_code(49);
test_number.set_national_number(uint64{301234});
phone_util_.Format(test_number, PhoneNumberUtil::E164, &formatted_number);
EXPECT_EQ("+49301234", formatted_number);
test_number.set_country_code(800);
test_number.set_national_number(uint64{12345678});
phone_util_.Format(test_number, PhoneNumberUtil::E164, &formatted_number);
EXPECT_EQ("+80012345678", formatted_number);
}
TEST_F(PhoneNumberUtilTest, FormatNumberWithExtension) {
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{33316005});
nz_number.set_extension("1234");
string formatted_number;
phone_util_.Format(nz_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("03-331 6005 ext. 1234", formatted_number);
phone_util_.Format(nz_number, PhoneNumberUtil::RFC3966, &formatted_number);
EXPECT_EQ("tel:+64-3-331-6005;ext=1234", formatted_number);
PhoneNumber us_number_with_extension;
us_number_with_extension.set_country_code(1);
us_number_with_extension.set_national_number(uint64{6502530000});
us_number_with_extension.set_extension("4567");
phone_util_.Format(us_number_with_extension,
PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("650 253 0000 extn. 4567", formatted_number);
}
TEST_F(PhoneNumberUtilTest, GetLengthOfGeographicalAreaCode) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{6502530000});
EXPECT_EQ(3, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(1);
number.set_national_number(uint64{8002530000});
EXPECT_EQ(0, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(1);
number.set_national_number(uint64{650253000});
EXPECT_EQ(0, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(44);
number.set_national_number(uint64{2070313000});
EXPECT_EQ(2, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(44);
number.set_national_number(uint64{7912345678});
EXPECT_EQ(0, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(54);
number.set_national_number(uint64{1155303000});
EXPECT_EQ(2, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(54);
number.set_national_number(uint64{91187654321});
EXPECT_EQ(3, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(61);
number.set_national_number(uint64{293744000});
EXPECT_EQ(1, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(52);
number.set_national_number(uint64_t{3312345678});
EXPECT_EQ(2, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(39);
number.set_national_number(uint64{236618300});
number.set_italian_leading_zero(true);
EXPECT_EQ(2, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(65);
number.set_national_number(uint64{65218000});
number.set_italian_leading_zero(false);
EXPECT_EQ(0, phone_util_.GetLengthOfGeographicalAreaCode(number));
number.set_country_code(800);
number.set_national_number(uint64{12345678});
EXPECT_EQ(0, phone_util_.GetLengthOfGeographicalAreaCode(number));
PhoneNumber cn_mobile;
cn_mobile.set_country_code(86);
cn_mobile.set_national_number(uint64{18912341234});
EXPECT_EQ(0, phone_util_.GetLengthOfGeographicalAreaCode(cn_mobile));
}
TEST_F(PhoneNumberUtilTest, GetLengthOfNationalDestinationCode) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{6502530000});
EXPECT_EQ(3, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(1);
number.set_national_number(uint64{8002530000});
EXPECT_EQ(3, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(44);
number.set_national_number(uint64{2070313000});
EXPECT_EQ(2, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(44);
number.set_national_number(uint64{7912345678});
EXPECT_EQ(4, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(54);
number.set_national_number(uint64{1155303000});
EXPECT_EQ(2, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(54);
number.set_national_number(uint64{91187654321});
EXPECT_EQ(3, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(61);
number.set_national_number(uint64{293744000});
EXPECT_EQ(1, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(65);
number.set_national_number(uint64{65218000});
EXPECT_EQ(4, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(1);
number.set_national_number(uint64{650253000});
EXPECT_EQ(0, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(123);
number.set_national_number(uint64{650253000});
EXPECT_EQ(0, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(376);
number.set_national_number(uint64{12345});
EXPECT_EQ(0, phone_util_.GetLengthOfNationalDestinationCode(number));
number.set_country_code(376);
number.set_national_number(uint64{12345});
number.set_extension("321");
EXPECT_EQ(0, phone_util_.GetLengthOfNationalDestinationCode(number));
number.Clear();
number.set_country_code(800);
number.set_national_number(uint64{12345678});
EXPECT_EQ(4, phone_util_.GetLengthOfNationalDestinationCode(number));
PhoneNumber cn_mobile;
cn_mobile.set_country_code(86);
cn_mobile.set_national_number(uint64{18912341234});
EXPECT_EQ(3, phone_util_.GetLengthOfNationalDestinationCode(cn_mobile));
}
TEST_F(PhoneNumberUtilTest, GetCountryMobileToken) {
int country_calling_code;
string mobile_token;
country_calling_code = phone_util_.GetCountryCodeForRegion(RegionCode::AR());
phone_util_.GetCountryMobileToken(country_calling_code, &mobile_token);
EXPECT_EQ("9", mobile_token);
country_calling_code = phone_util_.GetCountryCodeForRegion(RegionCode::SE());
phone_util_.GetCountryMobileToken(country_calling_code, &mobile_token);
EXPECT_EQ("", mobile_token);
}
TEST_F(PhoneNumberUtilTest, ExtractPossibleNumber) {
string extracted_number;
ExtractPossibleNumber("Tel:0800-345-600", &extracted_number);
EXPECT_EQ("0800-345-600", extracted_number);
ExtractPossibleNumber("Tel:0800 FOR PIZZA", &extracted_number);
EXPECT_EQ("0800 FOR PIZZA", extracted_number);
ExtractPossibleNumber("Tel:+800-345-600", &extracted_number);
EXPECT_EQ("+800-345-600", extracted_number);
ExtractPossibleNumber("\xEF\xBC\x90\xEF\xBC\x92\xEF\xBC\x93" ,
&extracted_number);
EXPECT_EQ("\xEF\xBC\x90\xEF\xBC\x92\xEF\xBC\x93" ,
extracted_number);
ExtractPossibleNumber("Num-\xEF\xBC\x91\xEF\xBC\x92\xEF\xBC\x93"
, &extracted_number);
EXPECT_EQ("\xEF\xBC\x91\xEF\xBC\x92\xEF\xBC\x93" ,
extracted_number);
ExtractPossibleNumber("Num-....", &extracted_number);
EXPECT_EQ("", extracted_number);
ExtractPossibleNumber("(650) 253-0000", &extracted_number);
EXPECT_EQ("650) 253-0000", extracted_number);
ExtractPossibleNumber("(650) 253-0000..- ..", &extracted_number);
EXPECT_EQ("650) 253-0000", extracted_number);
ExtractPossibleNumber("(650) 253-0000.", &extracted_number);
EXPECT_EQ("650) 253-0000", extracted_number);
ExtractPossibleNumber("(650) 253-0000\xE2\x80\x8F"
, &extracted_number);
EXPECT_EQ("650) 253-0000", extracted_number);
}
TEST_F(PhoneNumberUtilTest, IsNANPACountry) {
EXPECT_TRUE(phone_util_.IsNANPACountry(RegionCode::US()));
EXPECT_TRUE(phone_util_.IsNANPACountry(RegionCode::BS()));
EXPECT_FALSE(phone_util_.IsNANPACountry(RegionCode::DE()));
EXPECT_FALSE(phone_util_.IsNANPACountry(RegionCode::GetUnknown()));
EXPECT_FALSE(phone_util_.IsNANPACountry(RegionCode::UN001()));
}
TEST_F(PhoneNumberUtilTest, IsValidNumber) {
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{6502530000});
EXPECT_TRUE(phone_util_.IsValidNumber(us_number));
PhoneNumber it_number;
it_number.set_country_code(39);
it_number.set_national_number(uint64{236618300});
it_number.set_italian_leading_zero(true);
EXPECT_TRUE(phone_util_.IsValidNumber(it_number));
PhoneNumber gb_number;
gb_number.set_country_code(44);
gb_number.set_national_number(uint64{7912345678});
EXPECT_TRUE(phone_util_.IsValidNumber(gb_number));
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{21387835});
EXPECT_TRUE(phone_util_.IsValidNumber(nz_number));
PhoneNumber intl_toll_free_number;
intl_toll_free_number.set_country_code(800);
intl_toll_free_number.set_national_number(uint64{12345678});
EXPECT_TRUE(phone_util_.IsValidNumber(intl_toll_free_number));
PhoneNumber universal_premium_rate;
universal_premium_rate.set_country_code(979);
universal_premium_rate.set_national_number(uint64{123456789});
EXPECT_TRUE(phone_util_.IsValidNumber(universal_premium_rate));
}
TEST_F(PhoneNumberUtilTest, IsValidForRegion) {
PhoneNumber bs_number;
bs_number.set_country_code(1);
bs_number.set_national_number(uint64{2423232345});
EXPECT_TRUE(phone_util_.IsValidNumber(bs_number));
EXPECT_TRUE(phone_util_.IsValidNumberForRegion(bs_number, RegionCode::BS()));
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(bs_number, RegionCode::US()));
bs_number.set_national_number(uint64{2421232345});
EXPECT_FALSE(phone_util_.IsValidNumber(bs_number));
PhoneNumber re_number;
re_number.set_country_code(262);
re_number.set_national_number(uint64{262123456});
EXPECT_TRUE(phone_util_.IsValidNumber(re_number));
EXPECT_TRUE(phone_util_.IsValidNumberForRegion(re_number, RegionCode::RE()));
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(re_number, RegionCode::YT()));
re_number.set_national_number(uint64{269601234});
EXPECT_TRUE(phone_util_.IsValidNumberForRegion(re_number, RegionCode::YT()));
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(re_number, RegionCode::RE()));
re_number.set_national_number(uint64{269123456});
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(re_number, RegionCode::YT()));
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(re_number, RegionCode::RE()));
EXPECT_FALSE(phone_util_.IsValidNumber(re_number));
string region_code;
phone_util_.GetRegionCodeForNumber(re_number, ®ion_code);
EXPECT_EQ(RegionCode::YT(), region_code);
re_number.set_national_number(uint64{800123456});
EXPECT_TRUE(phone_util_.IsValidNumberForRegion(re_number, RegionCode::YT()));
EXPECT_TRUE(phone_util_.IsValidNumberForRegion(re_number, RegionCode::RE()));
PhoneNumber intl_toll_free_number;
intl_toll_free_number.set_country_code(800);
intl_toll_free_number.set_national_number(uint64{12345678});
EXPECT_TRUE(phone_util_.IsValidNumberForRegion(intl_toll_free_number,
RegionCode::UN001()));
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(intl_toll_free_number,
RegionCode::US()));
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(intl_toll_free_number,
RegionCode::ZZ()));
PhoneNumber invalid_number;
invalid_number.set_country_code(3923);
invalid_number.set_national_number(uint64{2366});
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(invalid_number,
RegionCode::ZZ()));
invalid_number.set_country_code(3923);
invalid_number.set_national_number(uint64{2366});
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(invalid_number,
RegionCode::UN001()));
invalid_number.set_country_code(0);
invalid_number.set_national_number(uint64{2366});
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(invalid_number,
RegionCode::UN001()));
invalid_number.set_country_code(0);
EXPECT_FALSE(phone_util_.IsValidNumberForRegion(invalid_number,
RegionCode::ZZ()));
}
TEST_F(PhoneNumberUtilTest, IsNotValidNumber) {
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{2530000});
EXPECT_FALSE(phone_util_.IsValidNumber(us_number));
PhoneNumber it_number;
it_number.set_country_code(39);
it_number.set_national_number(uint64{23661830000});
it_number.set_italian_leading_zero(true);
EXPECT_FALSE(phone_util_.IsValidNumber(it_number));
PhoneNumber gb_number;
gb_number.set_country_code(44);
gb_number.set_national_number(uint64{791234567});
EXPECT_FALSE(phone_util_.IsValidNumber(gb_number));
PhoneNumber de_number;
de_number.set_country_code(49);
de_number.set_national_number(uint64{1234});
EXPECT_FALSE(phone_util_.IsValidNumber(de_number));
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{3316005});
EXPECT_FALSE(phone_util_.IsValidNumber(nz_number));
PhoneNumber invalid_number;
invalid_number.set_country_code(3923);
invalid_number.set_national_number(uint64{2366});
EXPECT_FALSE(phone_util_.IsValidNumber(invalid_number));
invalid_number.set_country_code(0);
EXPECT_FALSE(phone_util_.IsValidNumber(invalid_number));
PhoneNumber intl_toll_free_number_too_long;
intl_toll_free_number_too_long.set_country_code(800);
intl_toll_free_number_too_long.set_national_number(uint64{123456789});
EXPECT_FALSE(phone_util_.IsValidNumber(intl_toll_free_number_too_long));
}
TEST_F(PhoneNumberUtilTest, GetRegionCodeForCountryCode) {
string region_code;
phone_util_.GetRegionCodeForCountryCode(1, ®ion_code);
EXPECT_EQ(RegionCode::US(), region_code);
phone_util_.GetRegionCodeForCountryCode(44, ®ion_code);
EXPECT_EQ(RegionCode::GB(), region_code);
phone_util_.GetRegionCodeForCountryCode(49, ®ion_code);
EXPECT_EQ(RegionCode::DE(), region_code);
phone_util_.GetRegionCodeForCountryCode(800, ®ion_code);
EXPECT_EQ(RegionCode::UN001(), region_code);
phone_util_.GetRegionCodeForCountryCode(979, ®ion_code);
EXPECT_EQ(RegionCode::UN001(), region_code);
}
TEST_F(PhoneNumberUtilTest, GetRegionCodeForNumber) {
string region_code;
PhoneNumber bs_number;
bs_number.set_country_code(1);
bs_number.set_national_number(uint64{2423232345});
phone_util_.GetRegionCodeForNumber(bs_number, ®ion_code);
EXPECT_EQ(RegionCode::BS(), region_code);
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{4241231234});
phone_util_.GetRegionCodeForNumber(us_number, ®ion_code);
EXPECT_EQ(RegionCode::US(), region_code);
PhoneNumber gb_mobile;
gb_mobile.set_country_code(44);
gb_mobile.set_national_number(uint64{7912345678});
phone_util_.GetRegionCodeForNumber(gb_mobile, ®ion_code);
EXPECT_EQ(RegionCode::GB(), region_code);
PhoneNumber intl_toll_free_number;
intl_toll_free_number.set_country_code(800);
intl_toll_free_number.set_national_number(uint64{12345678});
phone_util_.GetRegionCodeForNumber(intl_toll_free_number, ®ion_code);
EXPECT_EQ(RegionCode::UN001(), region_code);
PhoneNumber universal_premium_rate;
universal_premium_rate.set_country_code(979);
universal_premium_rate.set_national_number(uint64{123456789});
phone_util_.GetRegionCodeForNumber(universal_premium_rate, ®ion_code);
EXPECT_EQ(RegionCode::UN001(), region_code);
}
TEST_F(PhoneNumberUtilTest, IsPossibleNumber) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{6502530000});
EXPECT_TRUE(phone_util_.IsPossibleNumber(number));
number.set_country_code(1);
number.set_national_number(uint64{2530000});
EXPECT_TRUE(phone_util_.IsPossibleNumber(number));
number.set_country_code(44);
number.set_national_number(uint64{2070313000});
EXPECT_TRUE(phone_util_.IsPossibleNumber(number));
number.set_country_code(800);
number.set_national_number(uint64{12345678});
EXPECT_TRUE(phone_util_.IsPossibleNumber(number));
EXPECT_TRUE(phone_util_.IsPossibleNumberForString("+1 650 253 0000",
RegionCode::US()));
EXPECT_TRUE(phone_util_.IsPossibleNumberForString("+1 650 GOO OGLE",
RegionCode::US()));
EXPECT_TRUE(phone_util_.IsPossibleNumberForString("(650) 253-0000",
RegionCode::US()));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForString("253-0000", RegionCode::US()));
EXPECT_TRUE(phone_util_.IsPossibleNumberForString("+1 650 253 0000",
RegionCode::GB()));
EXPECT_TRUE(phone_util_.IsPossibleNumberForString("+44 20 7031 3000",
RegionCode::GB()));
EXPECT_TRUE(phone_util_.IsPossibleNumberForString("(020) 7031 300",
RegionCode::GB()));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForString("7031 3000", RegionCode::GB()));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForString("3331 6005", RegionCode::NZ()));
EXPECT_TRUE(phone_util_.IsPossibleNumberForString("+800 1234 5678",
RegionCode::UN001()));
}
TEST_F(PhoneNumberUtilTest, IsPossibleNumberForType_DifferentTypeLengths) {
PhoneNumber number;
number.set_country_code(54);
number.set_national_number(uint64{12345});
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::UNKNOWN));
number.set_national_number(uint64{123456});
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::UNKNOWN));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::MOBILE));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::TOLL_FREE));
number.set_national_number(uint64{123456789});
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::UNKNOWN));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::MOBILE));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::TOLL_FREE));
number.set_national_number(uint64{1234567890});
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::UNKNOWN));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::MOBILE));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::TOLL_FREE));
number.set_national_number(uint64{12345678901});
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::UNKNOWN));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::MOBILE));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::TOLL_FREE));
}
TEST_F(PhoneNumberUtilTest, IsPossibleNumberForType_LocalOnly) {
PhoneNumber number;
number.set_country_code(49);
number.set_national_number(uint64{12});
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::UNKNOWN));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::MOBILE));
}
TEST_F(PhoneNumberUtilTest, IsPossibleNumberForType_DataMissingForSizeReasons) {
PhoneNumber number;
number.set_country_code(55);
number.set_national_number(uint64{12345678});
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::UNKNOWN));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
number.set_national_number(uint64{1234567890});
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::UNKNOWN));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
}
TEST_F(PhoneNumberUtilTest,
IsPossibleNumberForType_NumberTypeNotSupportedForRegion) {
PhoneNumber number;
number.set_country_code(55);
number.set_national_number(12345678L);
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::MOBILE));
EXPECT_TRUE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
EXPECT_TRUE(phone_util_.IsPossibleNumberForType(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
number.set_country_code(979);
number.set_national_number(123456789L);
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::MOBILE));
EXPECT_FALSE(
phone_util_.IsPossibleNumberForType(number, PhoneNumberUtil::FIXED_LINE));
EXPECT_FALSE(phone_util_.IsPossibleNumberForType(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
EXPECT_TRUE(phone_util_.IsPossibleNumberForType(
number, PhoneNumberUtil::PREMIUM_RATE));
}
TEST_F(PhoneNumberUtilTest, IsPossibleNumberWithReason) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{6502530000});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberWithReason(number));
number.set_country_code(1);
number.set_national_number(uint64{2530000});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE_LOCAL_ONLY,
phone_util_.IsPossibleNumberWithReason(number));
number.set_country_code(0);
number.set_national_number(uint64{2530000});
EXPECT_EQ(PhoneNumberUtil::INVALID_COUNTRY_CODE,
phone_util_.IsPossibleNumberWithReason(number));
number.set_country_code(1);
number.set_national_number(uint64{253000});
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberWithReason(number));
number.set_country_code(1);
number.set_national_number(uint64{65025300000});
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberWithReason(number));
number.set_country_code(44);
number.set_national_number(uint64{2070310000});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberWithReason(number));
number.set_country_code(49);
number.set_national_number(uint64{30123456});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberWithReason(number));
number.set_country_code(65);
number.set_national_number(uint64{1234567890});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberWithReason(number));
number.set_country_code(800);
number.set_national_number(uint64{123456789});
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberWithReason(number));
}
TEST_F(PhoneNumberUtilTest,
IsPossibleNumberForTypeWithReason_DifferentTypeLengths) {
PhoneNumber number;
number.set_country_code(54);
number.set_national_number(uint64{12345});
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::UNKNOWN));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
number.set_national_number(uint64{123456});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::UNKNOWN));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::TOLL_FREE));
number.set_national_number(uint64{123456789});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::UNKNOWN));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::TOLL_FREE));
number.set_national_number(uint64{1234567890});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::UNKNOWN));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::TOLL_FREE));
number.set_national_number(uint64{12345678901});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::UNKNOWN));
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::TOLL_FREE));
}
TEST_F(PhoneNumberUtilTest, IsPossibleNumberForTypeWithReason_LocalOnly) {
PhoneNumber number;
number.set_country_code(49);
number.set_national_number(uint64{12});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE_LOCAL_ONLY,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::UNKNOWN));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE_LOCAL_ONLY,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
}
TEST_F(PhoneNumberUtilTest,
IsPossibleNumberForTypeWithReason_DataMissingForSizeReasons) {
PhoneNumber number;
number.set_country_code(55);
number.set_national_number(uint64{12345678});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE_LOCAL_ONLY,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::UNKNOWN));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE_LOCAL_ONLY,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
number.set_national_number(uint64{1234567890});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::UNKNOWN));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
}
TEST_F(PhoneNumberUtilTest,
IsPossibleNumberForTypeWithReason_NumberTypeNotSupportedForRegion) {
PhoneNumber number;
number.set_country_code(55);
number.set_national_number(uint64{12345678});
EXPECT_EQ(PhoneNumberUtil::INVALID_LENGTH,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE_LOCAL_ONLY,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
number.set_national_number(uint64{1234567});
EXPECT_EQ(PhoneNumberUtil::INVALID_LENGTH,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
number.set_country_code(882);
number.set_national_number(uint64{1234567});
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
EXPECT_EQ(PhoneNumberUtil::INVALID_LENGTH,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
number.set_country_code(979);
number.set_national_number(uint64{123456789});
EXPECT_EQ(PhoneNumberUtil::INVALID_LENGTH,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::INVALID_LENGTH,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::INVALID_LENGTH,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::PREMIUM_RATE));
}
TEST_F(PhoneNumberUtilTest,
IsPossibleNumberForTypeWithReason_FixedLineOrMobile) {
PhoneNumber number;
number.set_country_code(290);
number.set_national_number(uint64{1234});
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
number.set_national_number(uint64{12345});
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::INVALID_LENGTH,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
number.set_national_number(uint64{123456});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
number.set_national_number(uint64{1234567});
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE));
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::MOBILE));
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
number.set_national_number(uint64{12345678});
EXPECT_EQ(PhoneNumberUtil::IS_POSSIBLE,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::TOLL_FREE));
EXPECT_EQ(PhoneNumberUtil::TOO_LONG,
phone_util_.IsPossibleNumberForTypeWithReason(
number, PhoneNumberUtil::FIXED_LINE_OR_MOBILE));
}
TEST_F(PhoneNumberUtilTest, IsNotPossibleNumber) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{65025300000});
EXPECT_FALSE(phone_util_.IsPossibleNumber(number));
number.set_country_code(800);
number.set_national_number(uint64{123456789});
EXPECT_FALSE(phone_util_.IsPossibleNumber(number));
number.set_country_code(1);
number.set_national_number(uint64{253000});
EXPECT_FALSE(phone_util_.IsPossibleNumber(number));
number.set_country_code(44);
number.set_national_number(uint64{300});
EXPECT_FALSE(phone_util_.IsPossibleNumber(number));
EXPECT_FALSE(phone_util_.IsPossibleNumberForString("+1 650 253 00000",
RegionCode::US()));
EXPECT_FALSE(phone_util_.IsPossibleNumberForString("(650) 253-00000",
RegionCode::US()));
EXPECT_FALSE(phone_util_.IsPossibleNumberForString("I want a Pizza",
RegionCode::US()));
EXPECT_FALSE(phone_util_.IsPossibleNumberForString("253-000",
RegionCode::US()));
EXPECT_FALSE(phone_util_.IsPossibleNumberForString("1 3000",
RegionCode::GB()));
EXPECT_FALSE(phone_util_.IsPossibleNumberForString("+44 300",
RegionCode::GB()));
EXPECT_FALSE(phone_util_.IsPossibleNumberForString("+800 1234 5678 9",
RegionCode::UN001()));
}
TEST_F(PhoneNumberUtilTest, TruncateTooLongNumber) {
PhoneNumber too_long_number;
too_long_number.set_country_code(1);
too_long_number.set_national_number(uint64{65025300001});
PhoneNumber valid_number;
valid_number.set_country_code(1);
valid_number.set_national_number(uint64{6502530000});
EXPECT_TRUE(phone_util_.TruncateTooLongNumber(&too_long_number));
EXPECT_EQ(valid_number, too_long_number);
too_long_number.set_country_code(800);
too_long_number.set_national_number(uint64{123456789});
valid_number.set_country_code(800);
valid_number.set_national_number(uint64{12345678});
EXPECT_TRUE(phone_util_.TruncateTooLongNumber(&too_long_number));
EXPECT_EQ(valid_number, too_long_number);
too_long_number.set_country_code(44);
too_long_number.set_national_number(uint64{80123456780123});
valid_number.set_country_code(44);
valid_number.set_national_number(uint64{8012345678});
EXPECT_TRUE(phone_util_.TruncateTooLongNumber(&too_long_number));
EXPECT_EQ(valid_number, too_long_number);
too_long_number.set_country_code(39);
too_long_number.set_national_number(uint64{2234567890123});
too_long_number.set_italian_leading_zero(true);
valid_number.set_country_code(39);
valid_number.set_national_number(uint64{2234567890});
valid_number.set_italian_leading_zero(true);
EXPECT_TRUE(phone_util_.TruncateTooLongNumber(&too_long_number));
EXPECT_EQ(valid_number, too_long_number);
PhoneNumber valid_number_copy(valid_number);
EXPECT_TRUE(phone_util_.TruncateTooLongNumber(&valid_number));
EXPECT_EQ(valid_number_copy, valid_number);
PhoneNumber number_with_invalid_prefix;
number_with_invalid_prefix.set_country_code(1);
number_with_invalid_prefix.set_national_number(uint64{2401234567});
PhoneNumber invalid_number_copy(number_with_invalid_prefix);
EXPECT_FALSE(phone_util_.TruncateTooLongNumber(&number_with_invalid_prefix));
EXPECT_EQ(invalid_number_copy, number_with_invalid_prefix);
PhoneNumber too_short_number;
too_short_number.set_country_code(1);
too_short_number.set_national_number(uint64{1234});
PhoneNumber too_short_number_copy(too_short_number);
EXPECT_FALSE(phone_util_.TruncateTooLongNumber(&too_short_number));
EXPECT_EQ(too_short_number_copy, too_short_number);
}
TEST_F(PhoneNumberUtilTest, IsNumberGeographical) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{2423570000});
EXPECT_FALSE(phone_util_.IsNumberGeographical(number));
number.set_country_code(61);
number.set_national_number(uint64{236618300});
EXPECT_TRUE(phone_util_.IsNumberGeographical(number));
number.set_country_code(800);
number.set_national_number(uint64{12345678});
EXPECT_FALSE(phone_util_.IsNumberGeographical(number));
number.set_country_code(54);
number.set_national_number(uint64{91187654321});
EXPECT_TRUE(phone_util_.IsNumberGeographical(number));
number.set_country_code(52);
number.set_national_number(uint64{12345678900});
EXPECT_TRUE(phone_util_.IsNumberGeographical(number));
number.set_country_code(52);
number.set_national_number(uint64{15512345678});
EXPECT_TRUE(phone_util_.IsNumberGeographical(number));
}
TEST_F(PhoneNumberUtilTest, FormatInOriginalFormat) {
PhoneNumber phone_number;
string formatted_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("+442087654321", RegionCode::GB(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::GB(),
&formatted_number);
EXPECT_EQ("+44 20 8765 4321", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("02087654321", RegionCode::GB(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::GB(),
&formatted_number);
EXPECT_EQ("(020) 8765 4321", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("011442087654321",
RegionCode::US(), &phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("011 44 20 8765 4321", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("442087654321", RegionCode::GB(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::GB(),
&formatted_number);
EXPECT_EQ("44 20 8765 4321", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+442087654321", RegionCode::GB(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::GB(),
&formatted_number);
EXPECT_EQ("(020) 8765 4321", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("7345678901", RegionCode::US(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("734 567 8901", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("0734567 8901", RegionCode::US(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("0734567 8901", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("02-4567-8900", RegionCode::KR(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::KR(),
&formatted_number);
EXPECT_EQ("02-4567-8900", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("01180012345678",
RegionCode::US(), &phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("011 800 1234 5678", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("+80012345678", RegionCode::KR(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::KR(),
&formatted_number);
EXPECT_EQ("+800 1234 5678", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("2530000", RegionCode::US(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("253 0000", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("18003456789", RegionCode::US(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("1 800 345 6789", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("2087654321", RegionCode::GB(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::GB(),
&formatted_number);
EXPECT_EQ("20 8765 4321", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+442087654321", RegionCode::GB(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::GB(),
&formatted_number);
EXPECT_EQ("(020) 8765 4321", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("013312345678", RegionCode::MX(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::MX(),
&formatted_number);
EXPECT_EQ("01 33 1234 5678", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("3312345678", RegionCode::MX(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::MX(),
&formatted_number);
EXPECT_EQ("33 1234 5678", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("0212345678", RegionCode::IT(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::IT(),
&formatted_number);
EXPECT_EQ("02 1234 5678", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("00777012", RegionCode::JP(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::JP(),
&formatted_number);
EXPECT_EQ("0077-7012", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("0777012", RegionCode::JP(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::JP(),
&formatted_number);
EXPECT_EQ("0777012", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("012 3121286979", RegionCode::BR(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::BR(),
&formatted_number);
EXPECT_EQ("012 3121286979", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("044(33)1234-5678",
RegionCode::MX(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::MX(),
&formatted_number);
EXPECT_EQ("044(33)1234-5678", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("045(33)1234-5678",
RegionCode::MX(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::MX(),
&formatted_number);
EXPECT_EQ("045 33 1234 5678", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("0012 16502530000",
RegionCode::AU(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::AU(),
&formatted_number);
EXPECT_EQ("0012 16502530000", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("0011 16502530000",
RegionCode::AU(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::AU(),
&formatted_number);
EXPECT_EQ("0011 1 650 253 0000", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("*1234",
RegionCode::JP(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::JP(),
&formatted_number);
EXPECT_EQ("*1234", formatted_number);
phone_number.Clear();
formatted_number.clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("1234",
RegionCode::JP(),
&phone_number));
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::JP(),
&formatted_number);
EXPECT_EQ("1234", formatted_number);
phone_number.Clear();
formatted_number.clear();
phone_number.set_country_code_source(PhoneNumber::FROM_DEFAULT_COUNTRY);
phone_number.set_country_code(1);
phone_number.set_national_number(uint64{650253000});
phone_util_.FormatInOriginalFormat(phone_number, RegionCode::US(),
&formatted_number);
EXPECT_EQ("650253000", formatted_number);
}
TEST_F(PhoneNumberUtilTest, IsPremiumRate) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{9004433030});
EXPECT_EQ(PhoneNumberUtil::PREMIUM_RATE, phone_util_.GetNumberType(number));
number.set_country_code(39);
number.set_national_number(uint64{892123});
EXPECT_EQ(PhoneNumberUtil::PREMIUM_RATE, phone_util_.GetNumberType(number));
number.set_country_code(44);
number.set_national_number(uint64{9187654321});
EXPECT_EQ(PhoneNumberUtil::PREMIUM_RATE, phone_util_.GetNumberType(number));
number.set_country_code(49);
number.set_national_number(uint64{9001654321});
EXPECT_EQ(PhoneNumberUtil::PREMIUM_RATE, phone_util_.GetNumberType(number));
number.set_country_code(49);
number.set_national_number(uint64{90091234567});
EXPECT_EQ(PhoneNumberUtil::PREMIUM_RATE, phone_util_.GetNumberType(number));
number.set_country_code(979);
number.set_national_number(uint64{123456789});
EXPECT_EQ(PhoneNumberUtil::PREMIUM_RATE, phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, IsTollFree) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{8881234567});
EXPECT_EQ(PhoneNumberUtil::TOLL_FREE, phone_util_.GetNumberType(number));
number.set_country_code(39);
number.set_national_number(uint64{803123});
EXPECT_EQ(PhoneNumberUtil::TOLL_FREE, phone_util_.GetNumberType(number));
number.set_country_code(44);
number.set_national_number(uint64{8012345678});
EXPECT_EQ(PhoneNumberUtil::TOLL_FREE, phone_util_.GetNumberType(number));
number.set_country_code(49);
number.set_national_number(uint64{8001234567});
EXPECT_EQ(PhoneNumberUtil::TOLL_FREE, phone_util_.GetNumberType(number));
number.set_country_code(800);
number.set_national_number(uint64{12345678});
EXPECT_EQ(PhoneNumberUtil::TOLL_FREE, phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, IsMobile) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{2423570000});
EXPECT_EQ(PhoneNumberUtil::MOBILE, phone_util_.GetNumberType(number));
number.set_country_code(39);
number.set_national_number(uint64{312345678});
EXPECT_EQ(PhoneNumberUtil::MOBILE, phone_util_.GetNumberType(number));
number.set_country_code(44);
number.set_national_number(uint64{7912345678});
EXPECT_EQ(PhoneNumberUtil::MOBILE, phone_util_.GetNumberType(number));
number.set_country_code(49);
number.set_national_number(uint64{15123456789});
EXPECT_EQ(PhoneNumberUtil::MOBILE, phone_util_.GetNumberType(number));
number.set_country_code(54);
number.set_national_number(uint64{91187654321});
EXPECT_EQ(PhoneNumberUtil::MOBILE, phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, IsFixedLine) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{2423651234});
EXPECT_EQ(PhoneNumberUtil::FIXED_LINE, phone_util_.GetNumberType(number));
number.Clear();
number.set_country_code(39);
number.set_national_number(uint64{236618300});
number.set_italian_leading_zero(true);
EXPECT_EQ(PhoneNumberUtil::FIXED_LINE, phone_util_.GetNumberType(number));
number.Clear();
number.set_country_code(44);
number.set_national_number(uint64{2012345678});
EXPECT_EQ(PhoneNumberUtil::FIXED_LINE, phone_util_.GetNumberType(number));
number.set_country_code(49);
number.set_national_number(uint64{301234});
EXPECT_EQ(PhoneNumberUtil::FIXED_LINE, phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, IsFixedLineAndMobile) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{6502531111});
EXPECT_EQ(PhoneNumberUtil::FIXED_LINE_OR_MOBILE,
phone_util_.GetNumberType(number));
number.set_country_code(54);
number.set_national_number(uint64{1987654321});
EXPECT_EQ(PhoneNumberUtil::FIXED_LINE_OR_MOBILE,
phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, IsSharedCost) {
PhoneNumber number;
number.set_country_code(44);
number.set_national_number(uint64{8431231234});
EXPECT_EQ(PhoneNumberUtil::SHARED_COST, phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, IsVoip) {
PhoneNumber number;
number.set_country_code(44);
number.set_national_number(uint64{5631231234});
EXPECT_EQ(PhoneNumberUtil::VOIP, phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, IsPersonalNumber) {
PhoneNumber number;
number.set_country_code(44);
number.set_national_number(uint64{7031231234});
EXPECT_EQ(PhoneNumberUtil::PERSONAL_NUMBER,
phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, IsUnknown) {
PhoneNumber number;
number.set_country_code(1);
number.set_national_number(uint64{65025311111});
EXPECT_EQ(PhoneNumberUtil::UNKNOWN, phone_util_.GetNumberType(number));
}
TEST_F(PhoneNumberUtilTest, GetCountryCodeForRegion) {
EXPECT_EQ(1, phone_util_.GetCountryCodeForRegion(RegionCode::US()));
EXPECT_EQ(64, phone_util_.GetCountryCodeForRegion(RegionCode::NZ()));
EXPECT_EQ(0, phone_util_.GetCountryCodeForRegion(RegionCode::GetUnknown()));
EXPECT_EQ(0, phone_util_.GetCountryCodeForRegion(RegionCode::UN001()));
EXPECT_EQ(0, phone_util_.GetCountryCodeForRegion(RegionCode::CS()));
}
TEST_F(PhoneNumberUtilTest, GetNationalDiallingPrefixForRegion) {
string ndd_prefix;
phone_util_.GetNddPrefixForRegion(RegionCode::US(), false, &ndd_prefix);
EXPECT_EQ("1", ndd_prefix);
ndd_prefix.clear();
phone_util_.GetNddPrefixForRegion(RegionCode::BS(), false, &ndd_prefix);
EXPECT_EQ("1", ndd_prefix);
ndd_prefix.clear();
phone_util_.GetNddPrefixForRegion(RegionCode::NZ(), false, &ndd_prefix);
EXPECT_EQ("0", ndd_prefix);
ndd_prefix.clear();
phone_util_.GetNddPrefixForRegion(RegionCode::AO(), false, &ndd_prefix);
EXPECT_EQ("0~0", ndd_prefix);
ndd_prefix.clear();
phone_util_.GetNddPrefixForRegion(RegionCode::AO(), true, &ndd_prefix);
EXPECT_EQ("00", ndd_prefix);
ndd_prefix.clear();
phone_util_.GetNddPrefixForRegion(RegionCode::GetUnknown(), false,
&ndd_prefix);
EXPECT_EQ("", ndd_prefix);
ndd_prefix.clear();
phone_util_.GetNddPrefixForRegion(RegionCode::UN001(), false, &ndd_prefix);
EXPECT_EQ("", ndd_prefix);
ndd_prefix.clear();
phone_util_.GetNddPrefixForRegion(RegionCode::CS(), false, &ndd_prefix);
EXPECT_EQ("", ndd_prefix);
}
TEST_F(PhoneNumberUtilTest, IsViablePhoneNumber) {
EXPECT_FALSE(IsViablePhoneNumber("1"));
EXPECT_FALSE(IsViablePhoneNumber("1+1+1"));
EXPECT_FALSE(IsViablePhoneNumber("80+0"));
EXPECT_TRUE(IsViablePhoneNumber("00"));
EXPECT_TRUE(IsViablePhoneNumber("111"));
EXPECT_TRUE(IsViablePhoneNumber("0800-4-pizza"));
EXPECT_TRUE(IsViablePhoneNumber("0800-4-PIZZA"));
EXPECT_FALSE(IsViablePhoneNumber("08-PIZZA"));
EXPECT_FALSE(IsViablePhoneNumber("8-PIZZA"));
EXPECT_FALSE(IsViablePhoneNumber("12. March"));
}
TEST_F(PhoneNumberUtilTest, IsViablePhoneNumberNonAscii) {
EXPECT_TRUE(IsViablePhoneNumber("1\xE3\x80\x80" "34" ));
EXPECT_FALSE(IsViablePhoneNumber("1\xE3\x80\x80" "3+4" ));
EXPECT_TRUE(IsViablePhoneNumber("\xEF\xBC\x88" "1\xEF\xBC\x89\xE3\x80\x80"
"3456789" ));
EXPECT_TRUE(IsViablePhoneNumber("+1\xEF\xBC\x89\xE3\x80\x80"
"3456789" ));
}
TEST_F(PhoneNumberUtilTest, ConvertAlphaCharactersInNumber) {
string input("1800-ABC-DEF");
phone_util_.ConvertAlphaCharactersInNumber(&input);
static const string kExpectedOutput = "1800-222-333";
EXPECT_EQ(kExpectedOutput, input);
input.assign("1\xE3\x80\x80\xEF\xBC\x88" "800) ABC-DEF"
);
static const string kExpectedFullwidthOutput =
"1\xE3\x80\x80\xEF\xBC\x88" "800) 222-333" ;
phone_util_.ConvertAlphaCharactersInNumber(&input);
EXPECT_EQ(kExpectedFullwidthOutput, input);
}
TEST_F(PhoneNumberUtilTest, NormaliseRemovePunctuation) {
string input_number("034-56&+#2" "\xC2\xAD" "34");
Normalize(&input_number);
static const string kExpectedOutput("03456234");
EXPECT_EQ(kExpectedOutput, input_number)
<< "Conversion did not correctly remove punctuation";
}
TEST_F(PhoneNumberUtilTest, NormaliseReplaceAlphaCharacters) {
string input_number("034-I-am-HUNGRY");
Normalize(&input_number);
static const string kExpectedOutput("034426486479");
EXPECT_EQ(kExpectedOutput, input_number)
<< "Conversion did not correctly replace alpha characters";
}
TEST_F(PhoneNumberUtilTest, NormaliseOtherDigits) {
string input_number("\xEF\xBC\x92" "5\xD9\xA5" );
Normalize(&input_number);
static const string kExpectedOutput("255");
EXPECT_EQ(kExpectedOutput, input_number)
<< "Conversion did not correctly replace non-latin digits";
string eastern_arabic_input_number("\xDB\xB5" "2\xDB\xB0" );
Normalize(&eastern_arabic_input_number);
static const string kExpectedOutput2("520");
EXPECT_EQ(kExpectedOutput2, eastern_arabic_input_number)
<< "Conversion did not correctly replace non-latin digits";
}
TEST_F(PhoneNumberUtilTest, NormaliseStripAlphaCharacters) {
string input_number("034-56&+a#234");
phone_util_.NormalizeDigitsOnly(&input_number);
static const string kExpectedOutput("03456234");
EXPECT_EQ(kExpectedOutput, input_number)
<< "Conversion did not correctly remove alpha characters";
}
TEST_F(PhoneNumberUtilTest, NormaliseStripNonDiallableCharacters) {
string input_number("03*4-56&+1a#234");
phone_util_.NormalizeDiallableCharsOnly(&input_number);
static const string kExpectedOutput("03*456+1#234");
EXPECT_EQ(kExpectedOutput, input_number)
<< "Conversion did not correctly remove non-diallable characters";
}
TEST_F(PhoneNumberUtilTest, MaybeStripInternationalPrefix) {
string international_prefix("00[39]");
string number_to_strip("0034567700-3898003");
string stripped_number("45677003898003");
EXPECT_EQ(PhoneNumber::FROM_NUMBER_WITH_IDD,
MaybeStripInternationalPrefixAndNormalize(international_prefix,
&number_to_strip));
EXPECT_EQ(stripped_number, number_to_strip)
<< "The number was not stripped of its international prefix.";
EXPECT_EQ(PhoneNumber::FROM_DEFAULT_COUNTRY,
MaybeStripInternationalPrefixAndNormalize(international_prefix,
&number_to_strip));
number_to_strip.assign("00945677003898003");
EXPECT_EQ(PhoneNumber::FROM_NUMBER_WITH_IDD,
MaybeStripInternationalPrefixAndNormalize(international_prefix,
&number_to_strip));
EXPECT_EQ(stripped_number, number_to_strip)
<< "The number was not stripped of its international prefix.";
number_to_strip.assign("00 9 45677003898003");
EXPECT_EQ(PhoneNumber::FROM_NUMBER_WITH_IDD,
MaybeStripInternationalPrefixAndNormalize(international_prefix,
&number_to_strip));
EXPECT_EQ(stripped_number, number_to_strip)
<< "The number was not stripped of its international prefix.";
EXPECT_EQ(PhoneNumber::FROM_DEFAULT_COUNTRY,
MaybeStripInternationalPrefixAndNormalize(international_prefix,
&number_to_strip));
number_to_strip.assign("+45677003898003");
stripped_number.assign("45677003898003");
EXPECT_EQ(PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN,
MaybeStripInternationalPrefixAndNormalize(international_prefix,
&number_to_strip));
EXPECT_EQ(stripped_number, number_to_strip)
<< "The number supplied was not stripped of the plus symbol.";
number_to_strip.assign("0090112-3123");
stripped_number.assign("00901123123");
EXPECT_EQ(PhoneNumber::FROM_DEFAULT_COUNTRY,
MaybeStripInternationalPrefixAndNormalize(international_prefix,
&number_to_strip));
EXPECT_EQ(stripped_number, number_to_strip)
<< "The number had a 0 after the match so shouldn't be stripped.";
number_to_strip.assign("009 0-112-3123");
EXPECT_EQ(PhoneNumber::FROM_DEFAULT_COUNTRY,
MaybeStripInternationalPrefixAndNormalize(international_prefix,
&number_to_strip));
}
TEST_F(PhoneNumberUtilTest, MaybeStripNationalPrefixAndCarrierCode) {
PhoneMetadata metadata;
metadata.set_national_prefix_for_parsing("34");
metadata.mutable_general_desc()->set_national_number_pattern("\\d{4,8}");
string number_to_strip("34356778");
string stripped_number("356778");
string carrier_code;
MaybeStripNationalPrefixAndCarrierCode(metadata, &number_to_strip,
&carrier_code);
EXPECT_EQ(stripped_number, number_to_strip)
<< "Should have had national prefix stripped.";
EXPECT_EQ("", carrier_code) << "Should have had no carrier code stripped.";
MaybeStripNationalPrefixAndCarrierCode(metadata, &number_to_strip,
&carrier_code);
EXPECT_EQ(stripped_number, number_to_strip)
<< "Should have had no change - no national prefix present.";
metadata.clear_national_prefix_for_parsing();
MaybeStripNationalPrefixAndCarrierCode(metadata, &number_to_strip,
&carrier_code);
EXPECT_EQ(stripped_number, number_to_strip)
<< "Should have had no change - empty national prefix.";
metadata.set_national_prefix_for_parsing("3");
number_to_strip.assign("3123");
stripped_number.assign("3123");
MaybeStripNationalPrefixAndCarrierCode(metadata, &number_to_strip,
&carrier_code);
EXPECT_EQ(stripped_number, number_to_strip)
<< "Should have had no change - after stripping, it wouldn't have "
<< "matched the national rule.";
metadata.set_national_prefix_for_parsing("0(81)?");
number_to_strip.assign("08122123456");
stripped_number.assign("22123456");
MaybeStripNationalPrefixAndCarrierCode(metadata, &number_to_strip,
&carrier_code);
EXPECT_EQ("81", carrier_code) << "Should have had carrier code stripped.";
EXPECT_EQ(stripped_number, number_to_strip)
<< "Should have had national prefix and carrier code stripped.";
metadata.set_national_prefix_transform_rule("5$15");
metadata.set_national_prefix_for_parsing("0(\\d{2})");
number_to_strip.assign("031123");
string transformed_number("5315123");
MaybeStripNationalPrefixAndCarrierCode(metadata, &number_to_strip,
&carrier_code);
EXPECT_EQ(transformed_number, number_to_strip)
<< "Was not successfully transformed.";
}
TEST_F(PhoneNumberUtilTest, MaybeStripExtension) {
string number("1234576 ext. 1234");
string extension;
string expected_extension("1234");
string stripped_number("1234576");
EXPECT_TRUE(MaybeStripExtension(&number, &extension));
EXPECT_EQ(stripped_number, number);
EXPECT_EQ(expected_extension, extension);
number.assign("1234-576");
extension.clear();
stripped_number.assign("1234-576");
EXPECT_FALSE(MaybeStripExtension(&number, &extension));
EXPECT_EQ(stripped_number, number);
EXPECT_TRUE(extension.empty());
number.assign("1234576-123#");
extension.clear();
expected_extension.assign("123");
stripped_number.assign("1234576");
EXPECT_TRUE(MaybeStripExtension(&number, &extension));
EXPECT_EQ(stripped_number, number);
EXPECT_EQ(expected_extension, extension);
number.assign("1234576 ext.123#");
extension.clear();
EXPECT_TRUE(MaybeStripExtension(&number, &extension));
EXPECT_EQ(stripped_number, number);
EXPECT_EQ(expected_extension, extension);
}
TEST_F(PhoneNumberUtilTest, MaybeExtractCountryCode) {
PhoneNumber number;
const PhoneMetadata* metadata = GetPhoneMetadata(RegionCode::US());
string phone_number("011112-3456789");
string stripped_number("123456789");
int expected_country_code = 1;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
MaybeExtractCountryCode(metadata, true, &phone_number, &number));
EXPECT_EQ(expected_country_code, number.country_code());
EXPECT_EQ(PhoneNumber::FROM_NUMBER_WITH_IDD, number.country_code_source());
EXPECT_EQ(stripped_number, phone_number);
number.Clear();
phone_number.assign("+80012345678");
stripped_number.assign("12345678");
expected_country_code = 800;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
MaybeExtractCountryCode(metadata, true, &phone_number, &number));
EXPECT_EQ(expected_country_code, number.country_code());
EXPECT_EQ(PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN,
number.country_code_source());
EXPECT_EQ(stripped_number, phone_number);
number.Clear();
phone_number.assign("+6423456789");
stripped_number.assign("23456789");
expected_country_code = 64;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
MaybeExtractCountryCode(metadata, true, &phone_number, &number));
EXPECT_EQ(expected_country_code, number.country_code());
EXPECT_EQ(PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN,
number.country_code_source());
EXPECT_EQ(stripped_number, phone_number);
number.Clear();
expected_country_code = 0;
phone_number.assign("2345-6789");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
MaybeExtractCountryCode(metadata, true, &phone_number, &number));
EXPECT_EQ(expected_country_code, number.country_code());
EXPECT_EQ(PhoneNumber::FROM_DEFAULT_COUNTRY, number.country_code_source());
EXPECT_EQ(stripped_number, phone_number);
expected_country_code = 0;
phone_number.assign("0119991123456789");
stripped_number.assign(phone_number);
EXPECT_EQ(PhoneNumberUtil::INVALID_COUNTRY_CODE_ERROR,
MaybeExtractCountryCode(metadata, true, &phone_number, &number));
number.Clear();
phone_number.assign("(1 610) 619 4466");
stripped_number.assign("6106194466");
expected_country_code = 1;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
MaybeExtractCountryCode(metadata, true, &phone_number, &number));
EXPECT_EQ(expected_country_code, number.country_code());
EXPECT_EQ(PhoneNumber::FROM_NUMBER_WITHOUT_PLUS_SIGN,
number.country_code_source());
EXPECT_EQ(stripped_number, phone_number);
number.Clear();
phone_number.assign("(1 610) 619 4466");
stripped_number.assign("6106194466");
expected_country_code = 1;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
MaybeExtractCountryCode(metadata, false, &phone_number, &number));
EXPECT_EQ(expected_country_code, number.country_code());
EXPECT_FALSE(number.has_country_code_source());
EXPECT_EQ(stripped_number, phone_number);
number.Clear();
phone_number.assign("(1 610) 619 446");
stripped_number.assign("1610619446");
expected_country_code = 0;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
MaybeExtractCountryCode(metadata, false, &phone_number, &number));
EXPECT_EQ(expected_country_code, number.country_code());
EXPECT_FALSE(number.has_country_code_source());
EXPECT_EQ(stripped_number, phone_number);
number.Clear();
phone_number.assign("(1 610) 619");
stripped_number.assign("1610619");
expected_country_code = 0;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
MaybeExtractCountryCode(metadata, true, &phone_number, &number));
EXPECT_EQ(expected_country_code, number.country_code());
EXPECT_EQ(PhoneNumber::FROM_DEFAULT_COUNTRY, number.country_code_source());
EXPECT_EQ(stripped_number, phone_number);
}
TEST_F(PhoneNumberUtilTest, CountryWithNoNumberDesc) {
string formatted_number;
PhoneNumber ad_number;
ad_number.set_country_code(376);
ad_number.set_national_number(uint64{12345});
phone_util_.Format(ad_number, PhoneNumberUtil::INTERNATIONAL,
&formatted_number);
EXPECT_EQ("+376 12345", formatted_number);
phone_util_.Format(ad_number, PhoneNumberUtil::E164, &formatted_number);
EXPECT_EQ("+37612345", formatted_number);
phone_util_.Format(ad_number, PhoneNumberUtil::NATIONAL, &formatted_number);
EXPECT_EQ("12345", formatted_number);
EXPECT_EQ(PhoneNumberUtil::UNKNOWN, phone_util_.GetNumberType(ad_number));
EXPECT_FALSE(phone_util_.IsValidNumber(ad_number));
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{6502530000});
phone_util_.FormatOutOfCountryCallingNumber(us_number, RegionCode::AD(),
&formatted_number);
EXPECT_EQ("00 1 650 253 0000", formatted_number);
}
TEST_F(PhoneNumberUtilTest, UnknownCountryCallingCode) {
PhoneNumber invalid_number;
invalid_number.set_country_code(kInvalidCountryCode);
invalid_number.set_national_number(uint64{12345});
EXPECT_FALSE(phone_util_.IsValidNumber(invalid_number));
string formatted_number;
phone_util_.Format(invalid_number, PhoneNumberUtil::E164, &formatted_number);
EXPECT_EQ("+212345", formatted_number);
}
TEST_F(PhoneNumberUtilTest, IsNumberMatchMatches) {
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331 6005",
"+64 03 331 6005"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+800 1234 5678",
"+80012345678"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 03 331-6005",
"+64 03331 6005"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+643 331-6005",
"+64033316005"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+643 331-6005",
"+6433316005"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005",
"+6433316005"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings(
"+64 3 331-6005", "tel:+64-3-331-6005;isub=123"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+1800 siX-Flags",
"+1 800 7493 5247"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005 extn 1234",
"+6433316005#1234"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005 extn 1234",
"+6433316005;1234"));
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithTwoStrings(
"+7 423 202-25-11 ext 100",
"+7 4232022511 \xd0\xb4\xd0\xbe\xd0\xb1. 100"));
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{33316005});
nz_number.set_extension("3456");
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithOneString(nz_number,
"+643 331 6005 ext 3456"));
nz_number.clear_extension();
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithOneString(nz_number,
"+643 331 6005"));
nz_number.set_extension("");
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatchWithOneString(nz_number,
"+643 331 6005"));
PhoneNumber nz_number_2;
nz_number_2.set_country_code(64);
nz_number_2.set_national_number(uint64{33316005});
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatch(nz_number, nz_number_2));
}
TEST_F(PhoneNumberUtilTest, IsNumberMatchShortMatchIfDiffNumLeadingZeros) {
PhoneNumber nz_number_one;
nz_number_one.set_country_code(64);
nz_number_one.set_national_number(uint64{33316005});
nz_number_one.set_italian_leading_zero(true);
PhoneNumber nz_number_two;
nz_number_two.set_country_code(64);
nz_number_two.set_national_number(uint64{33316005});
nz_number_two.set_italian_leading_zero(true);
nz_number_two.set_number_of_leading_zeros(2);
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatch(nz_number_one, nz_number_two));
nz_number_one.set_italian_leading_zero(false);
nz_number_one.set_number_of_leading_zeros(1);
nz_number_two.set_italian_leading_zero(true);
nz_number_two.set_number_of_leading_zeros(1);
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatch(nz_number_one, nz_number_two));
}
TEST_F(PhoneNumberUtilTest, IsNumberMatchAcceptsProtoDefaultsAsMatch) {
PhoneNumber nz_number_one;
nz_number_one.set_country_code(64);
nz_number_one.set_national_number(uint64{33316005});
nz_number_one.set_italian_leading_zero(true);
PhoneNumber nz_number_two;
nz_number_two.set_country_code(64);
nz_number_two.set_national_number(uint64{33316005});
nz_number_two.set_italian_leading_zero(true);
nz_number_two.set_number_of_leading_zeros(1);
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatch(nz_number_one, nz_number_two));
}
TEST_F(PhoneNumberUtilTest,
IsNumberMatchMatchesDiffLeadingZerosIfItalianLeadingZeroFalse) {
PhoneNumber nz_number_one;
nz_number_one.set_country_code(64);
nz_number_one.set_national_number(uint64{33316005});
PhoneNumber nz_number_two;
nz_number_two.set_country_code(64);
nz_number_two.set_national_number(uint64{33316005});
nz_number_two.set_number_of_leading_zeros(1);
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatch(nz_number_one, nz_number_two));
nz_number_two.set_number_of_leading_zeros(10);
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatch(nz_number_one, nz_number_two));
}
TEST_F(PhoneNumberUtilTest, IsNumberMatchIgnoresSomeFields) {
PhoneNumber br_number_1;
PhoneNumber br_number_2;
br_number_1.set_country_code(55);
br_number_1.set_national_number(uint64{3121286979});
br_number_1.set_country_code_source(PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN);
br_number_1.set_preferred_domestic_carrier_code("12");
br_number_1.set_raw_input("012 3121286979");
br_number_2.set_country_code(55);
br_number_2.set_national_number(uint64{3121286979});
br_number_2.set_country_code_source(PhoneNumber::FROM_DEFAULT_COUNTRY);
br_number_2.set_preferred_domestic_carrier_code("14");
br_number_2.set_raw_input("143121286979");
EXPECT_EQ(PhoneNumberUtil::EXACT_MATCH,
phone_util_.IsNumberMatch(br_number_1, br_number_2));
}
TEST_F(PhoneNumberUtilTest, IsNumberMatchNonMatches) {
EXPECT_EQ(PhoneNumberUtil::NO_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("03 331 6005",
"03 331 6006"));
EXPECT_EQ(PhoneNumberUtil::NO_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+800 1234 5678",
"+1 800 1234 5678"));
EXPECT_EQ(PhoneNumberUtil::NO_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005",
"+16433316005"));
EXPECT_EQ(PhoneNumberUtil::NO_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005",
"+6133316005"));
EXPECT_EQ(PhoneNumberUtil::NO_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005 extn 1234",
"+0116433316005#1235"));
EXPECT_EQ(PhoneNumberUtil::NO_MATCH,
phone_util_.IsNumberMatchWithTwoStrings(
"+64 3 331-6005 extn 1234", "tel:+64-3-331-6005;ext=1235"));
EXPECT_EQ(PhoneNumberUtil::NO_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005 ext.1235",
"3 331 6005#1234"));
EXPECT_EQ(PhoneNumberUtil::INVALID_NUMBER,
phone_util_.IsNumberMatchWithTwoStrings("4", "3 331 6043"));
EXPECT_EQ(PhoneNumberUtil::INVALID_NUMBER,
phone_util_.IsNumberMatchWithTwoStrings("+43", "+64 3 331 6005"));
EXPECT_EQ(PhoneNumberUtil::INVALID_NUMBER,
phone_util_.IsNumberMatchWithTwoStrings("+43", "64 3 331 6005"));
EXPECT_EQ(PhoneNumberUtil::INVALID_NUMBER,
phone_util_.IsNumberMatchWithTwoStrings("Dog", "64 3 331 6005"));
}
TEST_F(PhoneNumberUtilTest, IsNumberMatchNsnMatches) {
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005",
"03 331 6005"));
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings(
"+64 3 331-6005",
"tel:03-331-6005;isub=1234;phone-context=abc.nz"));
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{33316005});
nz_number.set_extension("");
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithOneString(nz_number, "03 331 6005"));
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithOneString(nz_number,
"(64-3) 331 6005"));
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{2345678901});
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithOneString(us_number,
"1-234-567-8901"));
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithOneString(us_number, "2345678901"));
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+1 234-567 8901",
"1 234 567 8901"));
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("1 234-567 8901",
"1 234 567 8901"));
EXPECT_EQ(PhoneNumberUtil::NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("1 234-567 8901",
"+1 234 567 8901"));
PhoneNumber random_number;
random_number.set_country_code(41);
random_number.set_national_number(uint64{2345678901});
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithOneString(random_number,
"1-234-567-8901"));
}
TEST_F(PhoneNumberUtilTest, IsNumberMatchShortNsnMatches) {
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005",
"331 6005"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings(
"+64 3 331-6005", "tel:331-6005;phone-context=abc.nz"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings(
"+64 3 331-6005",
"tel:331-6005;isub=1234;phone-context=abc.nz"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings(
"+64 3 331-6005",
"tel:331-6005;isub=1234;phone-context=abc.nz;a=%A1"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("3 331-6005",
"03 331 6005"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("3 331-6005",
"331 6005"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings(
"3 331-6005", "tel:331-6005;phone-context=abc.nz"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("3 331-6005",
"+64 331 6005"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("03 331-6005",
"331 6005"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("1 234 345 6789",
"345 6789"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+1 (234) 345 6789",
"345 6789"));
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatchWithTwoStrings("+64 3 331-6005",
"3 331 6005#1234"));
PhoneNumber it_number_1, it_number_2;
it_number_1.set_country_code(39);
it_number_1.set_national_number(uint64{1234});
it_number_1.set_italian_leading_zero(true);
it_number_2.set_country_code(39);
it_number_2.set_national_number(uint64{1234});
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatch(it_number_1, it_number_2));
it_number_1.set_extension("1234");
it_number_1.clear_italian_leading_zero();
it_number_2.set_extension("");
EXPECT_EQ(PhoneNumberUtil::SHORT_NSN_MATCH,
phone_util_.IsNumberMatch(it_number_1, it_number_2));
}
TEST_F(PhoneNumberUtilTest, ParseNationalNumber) {
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{33316005});
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("033316005", RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_FALSE(nz_number.has_country_code_source());
EXPECT_EQ(PhoneNumber::UNSPECIFIED, nz_number.country_code_source());
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("33316005", RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03-331 6005", RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 331 6005", RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:03-331-6005;phone-context=+64",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:331-6005;phone-context=+64-3",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:331-6005;phone-context=+64-3",
RegionCode::US(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("My number is tel:03-331-6005;phone-context=+64",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:03-331-6005;phone-context=+64;a=%A1",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:03-331-6005;isub=12345;phone-context=+64",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:+64-3-331-6005;isub=12345",
RegionCode::US(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03-331-6005;phone-context=+64",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0064 3 331 6005",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("01164 3 331 6005",
RegionCode::US(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+64 3 331 6005",
RegionCode::US(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+01164 3 331 6005",
RegionCode::US(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+0064 3 331 6005",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+ 00 64 3 331 6005",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
PhoneNumber us_local_number;
us_local_number.set_country_code(1);
us_local_number.set_national_number(uint64{2530000});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:253-0000;phone-context=www.google.com",
RegionCode::US(), &test_number));
EXPECT_EQ(us_local_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"tel:253-0000;isub=12345;phone-context=www.google.com",
RegionCode::US(), &test_number));
EXPECT_EQ(us_local_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:2530000;isub=12345;phone-context=1234.com",
RegionCode::US(), &test_number));
EXPECT_EQ(us_local_number, test_number);
nz_number.Clear();
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{64123456});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+64(0)64123456",
RegionCode::US(), &test_number));
EXPECT_EQ(nz_number, test_number);
PhoneNumber de_number;
de_number.set_country_code(49);
de_number.set_national_number(uint64{12345678});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("123/45678", RegionCode::DE(), &test_number));
EXPECT_EQ(de_number, test_number);
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{1234567890});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("123-456-7890", RegionCode::US(), &test_number));
EXPECT_EQ(us_number, test_number);
PhoneNumber star_number;
star_number.set_country_code(81);
star_number.set_national_number(uint64{2345});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+81 *2345", RegionCode::JP(), &test_number));
EXPECT_EQ(star_number, test_number);
PhoneNumber short_number;
short_number.set_country_code(64);
short_number.set_national_number(uint64{12});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("12", RegionCode::NZ(), &test_number));
EXPECT_EQ(short_number, test_number);
short_number.set_country_code(44);
short_number.set_national_number(123456);
short_number.set_italian_leading_zero(true);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0123456", RegionCode::GB(), &test_number));
EXPECT_EQ(short_number, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseNumberWithAlphaCharacters) {
PhoneNumber test_number;
PhoneNumber tollfree_number;
tollfree_number.set_country_code(64);
tollfree_number.set_national_number(uint64{800332005});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0800 DDA 005", RegionCode::NZ(), &test_number));
EXPECT_EQ(tollfree_number, test_number);
PhoneNumber premium_number;
premium_number.set_country_code(64);
premium_number.set_national_number(uint64{9003326005});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0900 DDA 6005", RegionCode::NZ(), &test_number));
EXPECT_EQ(premium_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0900 332 6005a",
RegionCode::NZ(), &test_number));
EXPECT_EQ(premium_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0900 332 600a5",
RegionCode::NZ(), &test_number));
EXPECT_EQ(premium_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0900 332 600A5",
RegionCode::NZ(), &test_number));
EXPECT_EQ(premium_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0900 a332 600A5",
RegionCode::NZ(), &test_number));
EXPECT_EQ(premium_number, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseWithInternationalPrefixes) {
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(uint64{6503336000});
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+1 (650) 333-6000",
RegionCode::US(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+1-650-333-6000",
RegionCode::US(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0011-650-333-6000",
RegionCode::SG(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0081-650-333-6000",
RegionCode::SG(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0191-650-333-6000",
RegionCode::SG(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0~01-650-333-6000",
RegionCode::PL(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("++1 (650) 333-6000",
RegionCode::PL(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("\xEF\xBC\x8B" "1 (650) 333-6000",
RegionCode::SG(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("1 (650) 333" "\xC2\xAD" "-6000",
RegionCode::US(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("\xEF\xBC\x8B\xEF\xBC\x91\xE3\x80\x80\xEF\xBC\x88"
"\xEF\xBC\x96\xEF\xBC\x95\xEF\xBC\x90\xEF\xBC\x89"
"\xE3\x80\x80\xEF\xBC\x93\xEF\xBC\x93\xEF\xBC\x93"
"\xEF\xBC\x8D\xEF\xBC\x96\xEF\xBC\x90\xEF\xBC\x90"
"\xEF\xBC\x90",
RegionCode::SG(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("\xEF\xBC\x8B\xEF\xBC\x91\xE3\x80\x80\xEF\xBC\x88"
"\xEF\xBC\x96\xEF\xBC\x95\xEF\xBC\x90\xEF\xBC\x89"
"\xE3\x80\x80\xEF\xBC\x93\xEF\xBC\x93\xEF\xBC\x93"
"\xE3\x83\xBC\xEF\xBC\x96\xEF\xBC\x90\xEF\xBC\x90"
"\xEF\xBC\x90",
RegionCode::SG(), &test_number));
EXPECT_EQ(us_number, test_number);
PhoneNumber toll_free_number;
toll_free_number.set_country_code(800);
toll_free_number.set_national_number(uint64{12345678});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("011 800 1234 5678",
RegionCode::US(), &test_number));
EXPECT_EQ(toll_free_number, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseWithLeadingZero) {
PhoneNumber it_number;
it_number.set_country_code(39);
it_number.set_national_number(uint64{236618300});
it_number.set_italian_leading_zero(true);
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+39 02-36618 300",
RegionCode::NZ(), &test_number));
EXPECT_EQ(it_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("02-36618 300", RegionCode::IT(), &test_number));
EXPECT_EQ(it_number, test_number);
it_number.Clear();
it_number.set_country_code(39);
it_number.set_national_number(uint64{312345678});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("312 345 678", RegionCode::IT(), &test_number));
EXPECT_EQ(it_number, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseNationalNumberArgentina) {
PhoneNumber ar_number;
ar_number.set_country_code(54);
ar_number.set_national_number(uint64{93435551212});
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+54 9 343 555 1212", RegionCode::AR(),
&test_number));
EXPECT_EQ(ar_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0343 15 555 1212", RegionCode::AR(),
&test_number));
EXPECT_EQ(ar_number, test_number);
ar_number.set_national_number(uint64{93715654320});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+54 9 3715 65 4320", RegionCode::AR(),
&test_number));
EXPECT_EQ(ar_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03715 15 65 4320", RegionCode::AR(),
&test_number));
EXPECT_EQ(ar_number, test_number);
ar_number.set_national_number(uint64{1137970000});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+54 11 3797 0000", RegionCode::AR(),
&test_number));
EXPECT_EQ(ar_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("011 3797 0000", RegionCode::AR(), &test_number));
EXPECT_EQ(ar_number, test_number);
ar_number.set_national_number(uint64{3715654321});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+54 3715 65 4321", RegionCode::AR(),
&test_number));
EXPECT_EQ(ar_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03715 65 4321", RegionCode::AR(), &test_number));
EXPECT_EQ(ar_number, test_number);
ar_number.set_national_number(uint64{2312340000});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+54 23 1234 0000", RegionCode::AR(),
&test_number));
EXPECT_EQ(ar_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("023 1234 0000", RegionCode::AR(), &test_number));
EXPECT_EQ(ar_number, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseWithXInNumber) {
PhoneNumber ar_number;
ar_number.set_country_code(54);
ar_number.set_national_number(uint64{123456789});
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0123456789", RegionCode::AR(), &test_number));
EXPECT_EQ(ar_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(0) 123456789", RegionCode::AR(), &test_number));
EXPECT_EQ(ar_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0 123456789", RegionCode::AR(), &test_number));
EXPECT_EQ(ar_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(0xx) 123456789", RegionCode::AR(),
&test_number));
EXPECT_EQ(ar_number, test_number);
PhoneNumber ar_from_us;
ar_from_us.set_country_code(54);
ar_from_us.set_national_number(uint64{81429712});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("011xx5481429712", RegionCode::US(),
&test_number));
EXPECT_EQ(ar_from_us, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseNumbersMexico) {
PhoneNumber mx_number;
mx_number.set_country_code(52);
mx_number.set_national_number(uint64{4499780001});
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+52 (449)978-0001", RegionCode::MX(),
&test_number));
EXPECT_EQ(mx_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("01 (449)978-0001", RegionCode::MX(),
&test_number));
EXPECT_EQ(mx_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(449)978-0001", RegionCode::MX(),
&test_number));
EXPECT_EQ(mx_number, test_number);
mx_number.Clear();
mx_number.set_country_code(52);
mx_number.set_national_number(uint64{13312345678});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+52 1 33 1234-5678", RegionCode::MX(),
&test_number));
EXPECT_EQ(mx_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("044 (33) 1234-5678", RegionCode::MX(),
&test_number));
EXPECT_EQ(mx_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("045 33 1234-5678", RegionCode::MX(),
&test_number));
EXPECT_EQ(mx_number, test_number);
}
TEST_F(PhoneNumberUtilTest, FailedParseOnInvalidNumbers) {
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("This is not a phone number", RegionCode::NZ(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("1 Still not a number", RegionCode::NZ(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("1 MICROSOFT", RegionCode::NZ(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("12 MICROSOFT", RegionCode::NZ(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::TOO_LONG_NSN,
phone_util_.Parse("01495 72553301873 810104", RegionCode::GB(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("+---", RegionCode::DE(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("+***", RegionCode::DE(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("+*******91", RegionCode::DE(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT_NSN,
phone_util_.Parse("+49 0", RegionCode::DE(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::INVALID_COUNTRY_CODE_ERROR,
phone_util_.Parse("+210 3456 56789", RegionCode::NZ(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::INVALID_COUNTRY_CODE_ERROR,
phone_util_.Parse("+ 00 210 3 331 6005", RegionCode::NZ(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::INVALID_COUNTRY_CODE_ERROR,
phone_util_.Parse("123 456 7890", RegionCode::GetUnknown(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::INVALID_COUNTRY_CODE_ERROR,
phone_util_.Parse("123 456 7890", RegionCode::CS(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT_AFTER_IDD,
phone_util_.Parse("0044-----", RegionCode::GB(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT_AFTER_IDD,
phone_util_.Parse("0044", RegionCode::GB(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT_AFTER_IDD,
phone_util_.Parse("011", RegionCode::US(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::TOO_SHORT_AFTER_IDD,
phone_util_.Parse("0119", RegionCode::US(),
&test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::INVALID_COUNTRY_CODE_ERROR,
phone_util_.Parse("tel:555-1234;phone-context=www.google.com",
RegionCode::ZZ(), &test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("tel:555-1234;phone-context=1-331",
RegionCode::ZZ(), &test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse(";phone-context=",
RegionCode::ZZ(), &test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
}
TEST_F(PhoneNumberUtilTest, ParseNumbersWithPlusWithNoRegion) {
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{33316005});
PhoneNumber result_proto;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+64 3 331 6005", RegionCode::GetUnknown(),
&result_proto));
EXPECT_EQ(nz_number, result_proto);
result_proto.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("\xEF\xBC\x8B" "64 3 331 6005",
RegionCode::GetUnknown(), &result_proto));
EXPECT_EQ(nz_number, result_proto);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(" +64 3 331 6005", RegionCode::GetUnknown(),
&result_proto));
EXPECT_EQ(nz_number, result_proto);
PhoneNumber toll_free_number;
toll_free_number.set_country_code(800);
toll_free_number.set_national_number(uint64{12345678});
result_proto.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+800 1234 5678",
RegionCode::GetUnknown(), &result_proto));
EXPECT_EQ(toll_free_number, result_proto);
PhoneNumber universal_premium_rate;
universal_premium_rate.set_country_code(979);
universal_premium_rate.set_national_number(uint64{123456789});
result_proto.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+979 123 456 789",
RegionCode::GetUnknown(), &result_proto));
EXPECT_EQ(universal_premium_rate, result_proto);
result_proto.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:03-331-6005;phone-context=+64",
RegionCode::GetUnknown(), &result_proto));
EXPECT_EQ(nz_number, result_proto);
result_proto.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(" tel:03-331-6005;phone-context=+64",
RegionCode::GetUnknown(), &result_proto));
EXPECT_EQ(nz_number, result_proto);
result_proto.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:03-331-6005;isub=12345;phone-context=+64",
RegionCode::GetUnknown(), &result_proto));
EXPECT_EQ(nz_number, result_proto);
nz_number.set_raw_input("+64 3 331 6005");
nz_number.set_country_code_source(PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN);
result_proto.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("+64 3 331 6005",
RegionCode::GetUnknown(),
&result_proto));
EXPECT_EQ(nz_number, result_proto);
}
TEST_F(PhoneNumberUtilTest, ParseNumberTooShortIfNationalPrefixStripped) {
PhoneNumber test_number;
PhoneNumber by_number;
by_number.set_country_code(375);
by_number.set_national_number(8123L);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("8123", RegionCode::BY(),
&test_number));
EXPECT_EQ(by_number, test_number);
by_number.set_national_number(81234L);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("81234", RegionCode::BY(),
&test_number));
EXPECT_EQ(by_number, test_number);
by_number.set_national_number(812345L);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("812345", RegionCode::BY(),
&test_number));
EXPECT_EQ(by_number, test_number);
by_number.set_national_number(123456L);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("8123456", RegionCode::BY(),
&test_number));
EXPECT_EQ(by_number, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseExtensions) {
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(uint64{33316005});
nz_number.set_extension("3456");
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 331 6005 ext 3456", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 331 6005x3456", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03-331 6005 int.3456", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 331 6005 #3456", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
PhoneNumber non_extn_number;
non_extn_number.set_country_code(1);
non_extn_number.set_national_number(uint64{80074935247});
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("1800 six-flags", RegionCode::US(),
&test_number));
EXPECT_EQ(non_extn_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("1800 SIX-FLAGS", RegionCode::US(),
&test_number));
EXPECT_EQ(non_extn_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0~0 1800 7493 5247", RegionCode::PL(),
&test_number));
EXPECT_EQ(non_extn_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(1800) 7493.5247", RegionCode::US(),
&test_number));
EXPECT_EQ(non_extn_number, test_number);
PhoneNumber extn_number;
extn_number.set_country_code(1);
extn_number.set_national_number(uint64{80074935247});
extn_number.set_extension("1234");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0~0 1800 7493 5247 ~1234", RegionCode::PL(),
&test_number));
EXPECT_EQ(extn_number, test_number);
PhoneNumber uk_number;
uk_number.set_country_code(44);
uk_number.set_national_number(uint64{2034567890});
uk_number.set_extension("456");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44 2034567890x456", RegionCode::NZ(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44 2034567890x456", RegionCode::GB(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44 2034567890 x456", RegionCode::GB(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44 2034567890 X456", RegionCode::GB(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44 2034567890 X 456", RegionCode::GB(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44 2034567890 X 456", RegionCode::GB(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44 2034567890 x 456 ", RegionCode::GB(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44 2034567890 X 456", RegionCode::GB(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44-2034567890;ext=456", RegionCode::GB(),
&test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:2034567890;ext=456;phone-context=+44",
RegionCode::ZZ(), &test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"+442034567890\xEF\xBD\x85\xEF\xBD\x98\xEF\xBD\x94\xEF\xBD\x8E"
"456", RegionCode::GB(), &test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"+44-2034567890\xEF\xBD\x98\xEF\xBD\x94\xEF\xBD\x8E""456",
RegionCode::GB(), &test_number));
EXPECT_EQ(uk_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+44-2034567890\xEF\xBD\x98\xEF\xBD\x94""456",
RegionCode::GB(), &test_number));
EXPECT_EQ(uk_number, test_number);
PhoneNumber us_with_extension;
us_with_extension.set_country_code(1);
us_with_extension.set_national_number(uint64{8009013355});
us_with_extension.set_extension("7246433");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355 x 7246433", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355 , ext 7246433", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355 ; 7246433", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355;7246433", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355 ,extension 7246433",
RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355 ,extensi\xC3\xB3n 7246433",
RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355 ,extensio\xCC\x81n 7246433",
RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355 , 7246433", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(800) 901-3355 ext: 7246433", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
PhoneNumber ru_with_extension;
ru_with_extension.set_country_code(7);
ru_with_extension.set_national_number(4232022511L);
ru_with_extension.set_extension("100");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"8 (423) 202-25-11, \xd0\xb4\xd0\xbe\xd0\xb1. 100",
RegionCode::RU(), &test_number));
EXPECT_EQ(ru_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"8 (423) 202-25-11 \xd0\xb4\xd0\xbe\xd0\xb1. 100",
RegionCode::RU(), &test_number));
EXPECT_EQ(ru_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"8 (423) 202-25-11, \xd0\xb4\xd0\xbe\xd0\xb1 100",
RegionCode::RU(), &test_number));
EXPECT_EQ(ru_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"8 (423) 202-25-11 \xd0\xb4\xd0\xbe\xd0\xb1 100",
RegionCode::RU(), &test_number));
EXPECT_EQ(ru_with_extension, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"8 (423) 202-25-11\xd0\xb4\xd0\xbe\xd0\xb1 100",
RegionCode::RU(), &test_number));
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse(
"8 (423) 202-25-11 \xd0\x94\xd0\x9e\xd0\x91 100",
RegionCode::RU(), &test_number));
EXPECT_EQ(ru_with_extension, test_number);
PhoneNumber us_with_two_extensions_number;
us_with_two_extensions_number.set_country_code(1);
us_with_two_extensions_number.set_national_number(uint64{2121231234});
us_with_two_extensions_number.set_extension("508");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(212)123-1234 x508/x1234", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_two_extensions_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(212)123-1234 x508/ x1234", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_two_extensions_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("(212)123-1234 x508\\x1234", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_two_extensions_number, test_number);
us_with_extension.Clear();
us_with_extension.set_country_code(1);
us_with_extension.set_national_number(uint64{6451231234});
us_with_extension.set_extension("910");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+1 (645) 123 1234-910#", RegionCode::US(),
&test_number));
EXPECT_EQ(us_with_extension, test_number);
}
TEST_F(PhoneNumberUtilTest, TestParseHandlesLongExtensionsWithExplicitLabels) {
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(33316005ULL);
PhoneNumber test_number;
nz_number.set_extension("0");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:+6433316005;ext=0", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
nz_number.set_extension("01234567890123456789");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:+6433316005;ext=01234567890123456789",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("tel:+6433316005;ext=012345678901234567890",
RegionCode::NZ(), &test_number));
nz_number.set_extension("1");
EXPECT_EQ(
PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005ext:1", RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
nz_number.set_extension("12345678901234567890");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 xtn:12345678901234567890",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 extension\t12345678901234567890",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 xtensio:12345678901234567890",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 xtensión, 12345678901234567890#",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005extension.12345678901234567890",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 доб:12345678901234567890",
RegionCode::NZ(), &test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::TOO_LONG_NSN,
phone_util_.Parse("03 3316005 extension 123456789012345678901",
RegionCode::NZ(), &test_number));
}
TEST_F(PhoneNumberUtilTest,
TestParseHandlesLongExtensionsWithAutoDiallingLabels) {
PhoneNumber us_number_user_input;
us_number_user_input.set_country_code(1);
us_number_user_input.set_national_number(2679000000ULL);
PhoneNumber test_number;
us_number_user_input.set_extension("123456789012345");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+12679000000,,123456789012345#",
RegionCode::US(), &test_number));
EXPECT_EQ(us_number_user_input, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+12679000000;123456789012345#", RegionCode::US(),
&test_number));
EXPECT_EQ(us_number_user_input, test_number);
PhoneNumber uk_number_user_input;
uk_number_user_input.set_country_code(44);
uk_number_user_input.set_national_number(2034000000ULL);
uk_number_user_input.set_extension("123456789");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+442034000000,,123456789#", RegionCode::GB(),
&test_number));
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("+12679000000,,1234567890123456#",
RegionCode::US(), &test_number));
}
TEST_F(PhoneNumberUtilTest, TestParseHandlesShortExtensionsWithAmbiguousChar) {
PhoneNumber nz_number;
nz_number.set_country_code(64);
nz_number.set_national_number(33316005ULL);
PhoneNumber test_number;
nz_number.set_extension("123456789");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 x 123456789", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 x. 123456789", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 #123456789#", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("03 3316005 ~ 123456789", RegionCode::NZ(),
&test_number));
EXPECT_EQ(nz_number, test_number);
EXPECT_EQ(PhoneNumberUtil::TOO_LONG_NSN,
phone_util_.Parse("03 3316005 ~ 1234567890", RegionCode::NZ(),
&test_number));
}
TEST_F(PhoneNumberUtilTest, TestParseHandlesShortExtensionsWhenNotSureOfLabel) {
PhoneNumber us_number;
us_number.set_country_code(1);
us_number.set_national_number(1234567890ULL);
PhoneNumber test_number;
us_number.set_extension("666666");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+1123-456-7890 666666#", RegionCode::US(),
&test_number));
EXPECT_EQ(us_number, test_number);
us_number.set_extension("6");
EXPECT_EQ(
PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("+11234567890-6#", RegionCode::US(), &test_number));
EXPECT_EQ(us_number, test_number);
EXPECT_EQ(PhoneNumberUtil::NOT_A_NUMBER,
phone_util_.Parse("+1123-456-7890 7777777#", RegionCode::US(),
&test_number));
}
TEST_F(PhoneNumberUtilTest, ParseAndKeepRaw) {
PhoneNumber alpha_numeric_number;
alpha_numeric_number.set_country_code(1);
alpha_numeric_number.set_national_number(uint64{80074935247});
alpha_numeric_number.set_raw_input("800 six-flags");
alpha_numeric_number.set_country_code_source(
PhoneNumber::FROM_DEFAULT_COUNTRY);
PhoneNumber test_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("800 six-flags", RegionCode::US(),
&test_number));
EXPECT_EQ(alpha_numeric_number, test_number);
alpha_numeric_number.set_national_number(uint64{8007493524});
alpha_numeric_number.set_raw_input("1800 six-flag");
alpha_numeric_number.set_country_code_source(
PhoneNumber::FROM_NUMBER_WITHOUT_PLUS_SIGN);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("1800 six-flag", RegionCode::US(),
&test_number));
EXPECT_EQ(alpha_numeric_number, test_number);
alpha_numeric_number.set_raw_input("+1800 six-flag");
alpha_numeric_number.set_country_code_source(
PhoneNumber::FROM_NUMBER_WITH_PLUS_SIGN);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("+1800 six-flag", RegionCode::CN(),
&test_number));
EXPECT_EQ(alpha_numeric_number, test_number);
alpha_numeric_number.set_raw_input("001800 six-flag");
alpha_numeric_number.set_country_code_source(
PhoneNumber::FROM_NUMBER_WITH_IDD);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("001800 six-flag",
RegionCode::NZ(),
&test_number));
EXPECT_EQ(alpha_numeric_number, test_number);
test_number.Clear();
EXPECT_EQ(PhoneNumberUtil::INVALID_COUNTRY_CODE_ERROR,
phone_util_.Parse("123 456 7890", RegionCode::CS(), &test_number));
EXPECT_EQ(PhoneNumber::default_instance(), test_number);
PhoneNumber korean_number;
korean_number.set_country_code(82);
korean_number.set_national_number(22123456);
korean_number.set_raw_input("08122123456");
korean_number.set_country_code_source(PhoneNumber::FROM_DEFAULT_COUNTRY);
korean_number.set_preferred_domestic_carrier_code("81");
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.ParseAndKeepRawInput("08122123456",
RegionCode::KR(),
&test_number));
EXPECT_EQ(korean_number, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseItalianLeadingZeros) {
PhoneNumber zeros_number;
zeros_number.set_country_code(61);
PhoneNumber test_number;
zeros_number.set_national_number(11L);
zeros_number.set_italian_leading_zero(true);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("011", RegionCode::AU(),
&test_number));
EXPECT_EQ(zeros_number, test_number);
zeros_number.set_national_number(1L);
zeros_number.set_italian_leading_zero(true);
zeros_number.set_number_of_leading_zeros(2);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("001", RegionCode::AU(),
&test_number));
EXPECT_EQ(zeros_number, test_number);
zeros_number.set_national_number(0L);
zeros_number.set_italian_leading_zero(true);
zeros_number.set_number_of_leading_zeros(2);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("000", RegionCode::AU(),
&test_number));
EXPECT_EQ(zeros_number, test_number);
zeros_number.set_national_number(0L);
zeros_number.set_italian_leading_zero(true);
zeros_number.set_number_of_leading_zeros(3);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("0000", RegionCode::AU(),
&test_number));
EXPECT_EQ(zeros_number, test_number);
}
TEST_F(PhoneNumberUtilTest, ParseWithPhoneContext) {
PhoneNumber expected_number;
expected_number.set_country_code(64);
expected_number.set_national_number(33316005L);
PhoneNumber actual_number;
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=+64",
RegionCode::ZZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=+64;{this isn't "
"part of phone-context anymore!}",
RegionCode::ZZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
expected_number.set_national_number(3033316005L);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=+64-3",
RegionCode::ZZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
expected_number.set_country_code(55);
expected_number.set_national_number(5033316005L);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=+(555)",
RegionCode::ZZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
expected_number.set_country_code(1);
expected_number.set_national_number(23033316005L);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=+-1-2.3()",
RegionCode::ZZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
expected_number.set_country_code(64);
expected_number.set_national_number(33316005L);
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=abc.nz",
RegionCode::NZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
EXPECT_EQ(
PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=www.PHONE-numb3r.com",
RegionCode::NZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=a", RegionCode::NZ(),
&actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=3phone.J.",
RegionCode::NZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
actual_number.Clear();
EXPECT_EQ(PhoneNumberUtil::NO_PARSING_ERROR,
phone_util_.Parse("tel:033316005;phone-context=a--z",
RegionCode::NZ(), &actual_number));
EXPECT_EQ(expected_number, actual_number);
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=");
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=+");
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=64");
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=++64");
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=+abc");
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=.");
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=3phone");
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=a-.nz");
AssertThrowsForInvalidPhoneContext("tel:033316005;phone-context=a{b}c");
}
TEST_F(PhoneNumberUtilTest, CanBeInternationallyDialled) {
PhoneNumber test_number;
test_number.set_country_code(1);
test_number.set_national_number(uint64{8002530000});
EXPECT_FALSE(phone_util_.CanBeInternationallyDialled(test_number));
test_number.set_national_number(uint64{6502530000});
EXPECT_TRUE(phone_util_.CanBeInternationallyDialled(test_number));
test_number.set_national_number(uint64{2530000});
EXPECT_TRUE(phone_util_.CanBeInternationallyDialled(test_number));
test_number.set_country_code(64);
test_number.set_national_number(uint64{33316005});
EXPECT_TRUE(phone_util_.CanBeInternationallyDialled(test_number));
test_number.set_country_code(800);
test_number.set_national_number(uint64{12345678});
EXPECT_TRUE(phone_util_.CanBeInternationallyDialled(test_number));
}
TEST_F(PhoneNumberUtilTest, IsAlphaNumber) {
EXPECT_TRUE(phone_util_.IsAlphaNumber("1800 six-flags"));
EXPECT_TRUE(phone_util_.IsAlphaNumber("1800 six-flags ext. 1234"));
EXPECT_TRUE(phone_util_.IsAlphaNumber("+800 six-flags"));
EXPECT_TRUE(phone_util_.IsAlphaNumber("180 six-flags"));
EXPECT_FALSE(phone_util_.IsAlphaNumber("1800 123-1234"));
EXPECT_FALSE(phone_util_.IsAlphaNumber("1 six-flags"));
EXPECT_FALSE(phone_util_.IsAlphaNumber("18 six-flags"));
EXPECT_FALSE(phone_util_.IsAlphaNumber("1800 123-1234 extension: 1234"));
EXPECT_FALSE(phone_util_.IsAlphaNumber("+800 1234-1234"));
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/phonenumberutil.cc | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/phonenumberutil_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
99f5a28d-e886-4282-beb8-9ab9d5c4ff0d | cpp | tensorflow/tensorflow | optimize_function_graph_utils | tensorflow/core/common_runtime/optimize_function_graph_utils.cc | tensorflow/core/common_runtime/optimize_function_graph_utils_test.cc | #include "tensorflow/core/common_runtime/optimize_function_graph_utils.h"
#include <algorithm>
#include <cstdlib>
#include <iterator>
#include <memory>
#include <string>
#include <type_traits>
#include <unordered_map>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "tensorflow/core/common_runtime/composite_device.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_body.h"
#include "tensorflow/core/common_runtime/function_def_utils.h"
#include "tensorflow/core/common_runtime/function_optimization_registry.h"
#include "tensorflow/core/common_runtime/function_utils.h"
#include "tensorflow/core/common_runtime/local_device.h"
#include "tensorflow/core/common_runtime/optimization_registry.h"
#include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include "tensorflow/core/common_runtime/partitioning_utils.h"
#include "tensorflow/core/common_runtime/placer.h"
#include "tensorflow/core/common_runtime/replicate_per_replica_nodes.h"
#include "tensorflow/core/framework/device.h"
#include "tensorflow/core/framework/function.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/framework/optimized_function_graph.pb.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/graph/graph.h"
#include "tensorflow/core/graph/graph_node_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/refcount.h"
#include "tensorflow/core/util/debug_data_dumper.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/host_info.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace tensorflow {
namespace {
Status ValidateNoListArguments(
const protobuf::RepeatedPtrField<OpDef::ArgDef>& args, const char* arg_type,
const string& function_name) {
for (const OpDef::ArgDef& arg : args) {
if (!arg.number_attr().empty() || !arg.type_list_attr().empty()) {
return errors::InvalidArgument(
"Function ", function_name, " has an ", arg_type, " named \"",
arg.name(),
"\" that is a list of tensors."
" Multi-device functions support only single-tensor inputs "
" and outputs");
}
}
return absl::OkStatus();
}
Status ValidateMultiDeviceOptions(
const FunctionDef& fdef,
const FunctionLibraryRuntime::InstantiateOptions& options) {
const OpDef& signature = fdef.signature();
TF_RETURN_IF_ERROR(ValidateNoListArguments(signature.input_arg(), "input",
signature.name()));
TF_RETURN_IF_ERROR(ValidateNoListArguments(signature.output_arg(), "output",
signature.name()));
if (fdef.attr().count(FunctionLibraryDefinition::kIntsOnDeviceAttr) != 0 &&
fdef.attr().at(FunctionLibraryDefinition::kIntsOnDeviceAttr).b()) {
return errors::Unimplemented(
"Function '", signature.name(), "' has `",
FunctionLibraryDefinition::kIntsOnDeviceAttr,
"` attribute set. This attribute is not currently supported by "
"multi-device functions.");
}
if (options.input_devices.size() != signature.input_arg_size()) {
return errors::InvalidArgument(
"InstantiateOptions.input_devices must have the same length "
"as the number of arguments: input_devices length = ",
options.input_devices.size(),
" number of arguments = ", signature.input_arg_size());
}
if (!options.output_devices.empty() &&
options.output_devices.size() != signature.output_arg_size()) {
return errors::InvalidArgument(
"InstantiateOptions.output_devices must either be empty or have the "
"same length as the number of arguments: output_devices length = ",
options.output_devices.size(),
" number of arguments = ", signature.output_arg_size());
}
return absl::OkStatus();
}
Status SetArgShape(const std::unordered_map<int, DtypeAndPartialTensorShape>&
input_resource_dtypes_and_shapes,
const std::vector<Node*>& arg_nodes) {
for (Node* n : arg_nodes) {
int index;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "index", &index));
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "T", &dtype));
if (dtype == DT_RESOURCE) {
auto dtype_and_shape_iter = input_resource_dtypes_and_shapes.find(index);
if (dtype_and_shape_iter != input_resource_dtypes_and_shapes.end()) {
AttrValue dtype_attr_value;
dtype_attr_value.mutable_list()->add_type(
dtype_and_shape_iter->second.dtype);
n->AddAttr("_handle_dtypes", dtype_attr_value);
TensorShapeProto shape_proto;
dtype_and_shape_iter->second.shape.AsProto(&shape_proto);
AttrValue shape_attr_value;
*shape_attr_value.mutable_list()->add_shape() = shape_proto;
n->AddAttr("_handle_shapes", shape_attr_value);
}
}
}
return absl::OkStatus();
}
const string* AssignedOrRequestedDeviceName(const Node& node) {
if (node.has_assigned_device_name()) {
return &node.assigned_device_name();
}
return &node.requested_device();
}
void GetColocationGroup(const Node* node, string* group) {
static const StringPiece kColocationAttrNameStringPiece(kColocationAttrName);
const AttrValue* attr_value =
node->attrs().Find(kColocationAttrNameStringPiece);
if (attr_value != nullptr && attr_value->has_list() &&
attr_value->list().s_size() > 0) {
*group = attr_value->list().s(0);
}
}
Status WriteToCache(const std::string& dir_name, const std::string& file_name,
OptimizedFunctionGraphInfo& optimized_function_graph_info,
Env* env) {
const absl::Time cache_writing_start_time = absl::Now();
OptimizedFunctionGraph optimized_function_graph_proto;
string optimized_function_graph_proto_str;
optimized_function_graph_proto =
OptimizedFunctionGraphInfo::ToProto(optimized_function_graph_info);
optimized_function_graph_proto.SerializeToString(
&optimized_function_graph_proto_str);
if (!env->FileExists(dir_name).ok()) {
TF_RETURN_IF_ERROR(env->RecursivelyCreateDir(dir_name));
}
{
bool has_atomic_move = false;
TF_RETURN_IF_ERROR(env->HasAtomicMove(dir_name, &has_atomic_move));
if (!has_atomic_move) {
LOG_EVERY_POW_2(WARNING)
<< "Filesystem for OptimizedFunctionGraphInfo persistent cache at "
<< dir_name
<< " does not support atomic moves. Therefore the "
"persistent cache is racy if you have multiple optimizations "
"occurring simultaneously!";
}
}
std::string temp_file_name = file_name;
if (!env->CreateUniqueFileName(&temp_file_name, ".pb.tmp")) {
return absl::UnavailableError(
absl::StrCat("Could not create a unique file inside ", dir_name));
}
TF_RETURN_IF_ERROR(tsl::WriteStringToFile(
env, temp_file_name, optimized_function_graph_proto_str));
TF_RETURN_IF_ERROR(env->RenameFile(temp_file_name, file_name));
const absl::Duration cache_writing_duration =
absl::Now() - cache_writing_start_time;
VLOG(3) << "Finished writing Tensorflow optimized graph into cache; took "
<< absl::ToInt64Milliseconds(cache_writing_duration)
<< " msecs, file name: " << file_name;
return absl::OkStatus();
}
absl::StatusOr<OptimizedFunctionGraphInfo> ReadFromCache(
const string& file_name, Env* env) {
absl::Time cache_reading_start_time = absl::Now();
OptimizedFunctionGraph optimized_function_graph_proto;
string optimized_function_graph_proto_str;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(
env, file_name, &optimized_function_graph_proto_str));
optimized_function_graph_proto.ParseFromString(
optimized_function_graph_proto_str);
TF_ASSIGN_OR_RETURN(absl::StatusOr<OptimizedFunctionGraphInfo>
optimized_function_graph_info_restored,
OptimizedFunctionGraphInfo::FromProto(
std::move(optimized_function_graph_proto)));
const absl::Duration cache_reading_duration =
absl::Now() - cache_reading_start_time;
VLOG(3) << "Finished reading Tensorflow optimized graph from cache; took "
<< absl::ToInt64Milliseconds(cache_reading_duration) << " msecs";
return optimized_function_graph_info_restored;
}
string GetFileCacheName(const string& dir_name, const string& function_name,
const FunctionDef* fdef) {
string plain_func_name = function_name;
if (absl::StrContains(function_name, "_")) {
std::vector<string> func_name_tokens = absl::StrSplit(function_name, '_');
func_name_tokens.pop_back();
plain_func_name = absl::StrJoin(func_name_tokens, "_");
}
return absl::StrCat(dir_name, "/", tsl::port::JobName(), "_",
tsl::port::TaskId(), "_", plain_func_name, "_",
fdef->node_def_size());
}
Status GetGraphAndArgRets(const string& function_name, AttrSlice attrs,
core::RefCountPtr<FunctionRecord>&& fdef,
const FunctionLibraryDefinition* lib_def,
std::unique_ptr<Graph>* graph,
std::vector<Node*>* arg_nodes,
std::vector<Node*>* ret_nodes,
std::vector<string>* ret_node_names,
DataTypeVector* ret_types,
std::vector<string>* control_ret_node_names) {
std::unique_ptr<FunctionBody> fbody;
TF_RETURN_IF_ERROR(
FunctionDefToBodyHelper(std::move(fdef), attrs, lib_def, &fbody));
if (!fbody) {
LOG(ERROR) << "Failed to get FunctionBody for \"" << function_name << "\"";
return errors::Internal("Failed to construct FunctionBody for ",
function_name);
}
*graph = std::unique_ptr<Graph>(fbody->graph);
arg_nodes->reserve(fbody->arg_nodes.size());
std::copy(fbody->arg_nodes.begin(), fbody->arg_nodes.end(),
std::back_inserter(*arg_nodes));
ret_nodes->reserve(fbody->ret_nodes.size());
std::copy(fbody->ret_nodes.begin(), fbody->ret_nodes.end(),
std::back_inserter(*ret_nodes));
fbody->graph = nullptr;
ret_node_names->reserve(fbody->ret_nodes.size());
for (const Node* node : fbody->ret_nodes) {
ret_node_names->push_back(node->name());
}
for (const auto& ret_type : fbody->ret_types) {
ret_types->push_back(ret_type);
}
control_ret_node_names->reserve(fbody->control_ret_nodes.size());
for (const Node* node : fbody->control_ret_nodes) {
control_ret_node_names->push_back(node->name());
}
return absl::OkStatus();
}
}
Status PinArgsAndRets(const std::vector<string>& input_devices,
const std::vector<string>& output_devices,
const DeviceSet& device_set,
const std::vector<Node*>& arg_nodes,
const std::vector<Node*>& ret_nodes,
const FunctionLibraryDefinition* lib_def,
Device* default_device) {
for (Node* node : arg_nodes) {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int64_t index = attr_value->i();
node->set_assigned_device_name(input_devices[index]);
VLOG(3) << "Setting device to " << input_devices[index] << " for node "
<< node->name();
}
for (Node* node : ret_nodes) {
if (output_devices.empty()) {
DataType dtype;
TF_RETURN_IF_ERROR(GetNodeAttr(node->attrs(), "T", &dtype));
VLOG(3) << "Trying to determine device for node " << node->name()
<< "[T=" << DataTypeString(dtype) << "]";
for (const auto& it : node->in_edges()) {
if (it->IsControlEdge()) continue;
Node* src_node = it->src();
const string* src_device = AssignedOrRequestedDeviceName(*src_node);
string colocation_group = "";
GetColocationGroup(src_node, &colocation_group);
VLOG(3) << "Considering src: " << src_node->name()
<< " src_device: " << *src_device
<< " colo group: " << colocation_group;
while (src_device->empty() && colocation_group.empty() &&
src_node->IsIdentity()) {
Node* input_node;
TF_RETURN_IF_ERROR(src_node->input_node(0, &input_node));
src_node = input_node;
src_device = AssignedOrRequestedDeviceName(*src_node);
GetColocationGroup(src_node, &colocation_group);
VLOG(3) << "Considering src: " << src_node->name()
<< " src_device: " << *src_device
<< " colo group: " << colocation_group;
}
const bool can_use_src_node_device =
!(dtype == DT_RESOURCE && IsFunctionCall(*lib_def, *src_node));
if (!colocation_group.empty()) {
AttrValue::ListValue colo_attr;
colo_attr.add_s(colocation_group);
std::vector<string> colo_slice = {colocation_group};
node->AddAttr(kColocationAttrName, colo_slice);
} else if (!src_device->empty() && can_use_src_node_device) {
if (dtype == DT_VARIANT && !src_node->IsArg()) {
continue;
}
DeviceNameUtils::ParsedName parsed;
if (!DeviceNameUtils::ParseFullName(*src_device, &parsed)) {
return errors::InvalidArgument(
"Failed to parse explicit device specification ", *src_device);
}
std::vector<Device*> matching_devices;
device_set.FindMatchingDevices(parsed, &matching_devices);
if (matching_devices.empty()) {
if (default_device != nullptr) {
matching_devices.push_back(default_device);
} else {
return errors::InvalidArgument(
"Unable to find any devices for spec ", *src_device);
}
} else if (matching_devices.size() != 1) {
bool on_same_task = true;
for (int i = 1; i < matching_devices.size(); ++i) {
if (!DeviceNameUtils::IsSameAddressSpace(
matching_devices.at(0)->parsed_name(),
matching_devices.at(i)->parsed_name())) {
on_same_task = false;
break;
}
}
if (on_same_task) {
continue;
}
if (default_device != nullptr) {
int colocated_on_default_device = 0;
for (int i = 0; i < matching_devices.size(); ++i) {
if (DeviceNameUtils::IsSameAddressSpace(
default_device->parsed_name(),
matching_devices.at(i)->parsed_name())) {
colocated_on_default_device++;
}
}
if (colocated_on_default_device == 1) {
continue;
}
}
string devices = "[";
for (Device* device : matching_devices) {
devices.append(device->name());
devices.append(", ");
}
if (devices.size() > 2) {
devices.resize(devices.size() - 2);
}
devices.append("]");
return errors::InvalidArgument(
*src_device,
"When FunctionLibraryRuntime::Options.output_devices are "
"not specified for a multi-device function, the device "
"specification on the output node must match exactly one "
"device. Matched devices are ",
devices);
}
VLOG(3) << "Setting output device to " << matching_devices[0]->name()
<< " for node " << SummarizeNode(*node);
node->set_assigned_device_name(matching_devices[0]->name());
} else if (!src_device->empty() && !can_use_src_node_device) {
VLOG(3) << "Did not set device for a resource output node "
<< SummarizeNode(*node);
}
}
} else {
const AttrValue* attr_value;
TF_RETURN_IF_ERROR(node->attrs().Find("index", &attr_value));
int64_t index = attr_value->i();
DCHECK_GT(output_devices.size(), index);
VLOG(3) << "Setting output device to " << output_devices[index]
<< " for return at index " << index;
node->set_assigned_device_name(output_devices[index]);
}
}
return absl::OkStatus();
}
absl::StatusOr<OptimizedFunctionGraphInfo> OptimizeFunctionGraph(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Device* default_device, Env* env,
OptimizedFunctionGraph::OptimizationSource optimization_source) {
const uint64_t graph_optimization_start_time_usecs = env->NowMicros();
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? input_lib_def : options.lib_def;
core::RefCountPtr<FunctionRecord> fdef = lib_def->FindRecord(function_name);
if (fdef == nullptr) {
return errors::InvalidArgument("Failed to find function \"", function_name,
"\" in function library: ", lib_def);
}
TF_RETURN_IF_ERROR(ValidateMultiDeviceOptions(fdef->fdef(), options));
std::unique_ptr<Graph> graph;
std::vector<Node*> arg_nodes, ret_nodes;
std::vector<string> ret_node_names;
DataTypeVector ret_types;
std::vector<string> control_ret_node_names;
TF_RETURN_IF_ERROR(GetGraphAndArgRets(
function_name, attrs, fdef.GetNewRef(), lib_def, &graph, &arg_nodes,
&ret_nodes, &ret_node_names, &ret_types, &control_ret_node_names));
DEBUG_DATA_DUMPER()->DumpOpCreationStackTraces(
function_name, kDebugGroupOpStacktrace, "before_opt", graph.get());
GraphDef graph_def;
graph->ToGraphDef(&graph_def);
FunctionLibraryDefinition reachable_lib_def =
lib_def->ReachableDefinitions(graph_def);
*graph_def.mutable_library() = reachable_lib_def.ToProto();
if (options.graph_collector != nullptr) {
options.graph_collector->CollectRawGraph(graph_def);
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain, "initial",
graph.get(), &reachable_lib_def, false);
if (!options.xla_compile_device_type.empty()) {
for (Node* node : graph->op_nodes()) {
node->AddAttr("_xla_compile_device_type",
options.xla_compile_device_type);
if (default_device) {
node->set_assigned_device_name(default_device->name());
}
}
}
TF_RETURN_IF_ERROR(
SetArgShape(options.input_resource_dtypes_and_shapes, arg_nodes));
TF_RETURN_IF_ERROR(PinArgsAndRets(
options.input_devices, options.output_devices, dev_set, arg_nodes,
ret_nodes, lib_def,
options.config_proto.allow_soft_placement() ? default_device : nullptr));
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_bridge", graph.get(),
&reachable_lib_def, false);
graph->mutable_flib_def()->set_default_registry(&reachable_lib_def);
graph->mutable_flib_def()->Clear();
const bool should_run_optimization_passes = !options.is_component_function;
if (!should_run_optimization_passes) {
VLOG(1) << "Skipping function/graph optimization passes when instantiating "
"component function "
<< function_name;
}
std::unordered_map<string, string> node_name_to_control_ret;
bool control_rets_updated = false;
if (should_run_optimization_passes) {
FunctionOptimizationPass::FunctionOptions function_options{
options.xla_compile_device_type, options.allow_soft_placement};
TF_RETURN_IF_ERROR(FunctionOptimizationPassRegistry::Global().Run(
function_name, dev_set, options.config_proto, function_options, &graph,
&reachable_lib_def, &control_ret_node_names, &control_rets_updated));
}
if (control_rets_updated) {
for (const auto& control_ret : control_ret_node_names) {
node_name_to_control_ret.emplace(control_ret, control_ret);
}
} else {
for (const auto& control_ret : fdef->fdef().control_ret()) {
node_name_to_control_ret.emplace(control_ret.second, control_ret.first);
}
}
GraphOptimizationPassOptions optimization_options;
SessionOptions session_options;
session_options.env = env;
session_options.config = options.config_proto;
optimization_options.session_options = &session_options;
optimization_options.graph = &graph;
optimization_options.flib_def = &reachable_lib_def;
optimization_options.device_set = &dev_set;
optimization_options.is_function_graph = true;
optimization_options.composite_devices = &composite_devices;
optimization_options.default_function_device = default_device;
optimization_options.function_def = &fdef->fdef();
optimization_options.shape_inference_on_tfe_dialect_import =
options.shape_inference_on_tfe_dialect_import;
optimization_options.debug_filename_prefix = function_name;
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_pre_placement_passes", graph.get(),
&reachable_lib_def, false);
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::PRE_PLACEMENT, optimization_options));
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_placer", graph.get(),
&reachable_lib_def, false);
Placer placer(graph.get(), function_name, optimization_options.flib_def,
&dev_set, default_device,
options.config_proto.allow_soft_placement(),
options.config_proto.log_device_placement());
TF_RETURN_IF_ERROR(placer.Run(optimization_options));
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_post_placement_passes", graph.get(),
&reachable_lib_def, false);
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PLACEMENT, optimization_options));
}
if (options.optimize_graph_fn) {
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_graph_optimization", graph.get(),
&reachable_lib_def, false);
Status status = options.optimize_graph_fn(
std::move(ret_node_names), std::move(control_ret_node_names),
&reachable_lib_def, dev_set, cpu_device, &graph);
if (!status.ok()) {
LOG(WARNING) << "Ignoring multi-device function optimization failure: "
<< status;
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"after_graph_optimization", graph.get(),
&reachable_lib_def, false);
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_post_rewrite_for_exec_passes",
graph.get(), &reachable_lib_def, false);
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_REWRITE_FOR_EXEC, optimization_options));
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"after_post_rewrite_for_exec_passes",
graph.get(), &reachable_lib_def, false);
graph->mutable_flib_def()->set_default_registry(nullptr);
graph->mutable_flib_def()->Clear();
FunctionLibraryDefinition pruned_lib_def =
reachable_lib_def.ReachableDefinitions(*graph);
return OptimizedFunctionGraphInfo(
function_name, std::move(graph), std::move(pruned_lib_def),
node_name_to_control_ret, ret_types, ret_nodes.size(),
env->NowMicros() - graph_optimization_start_time_usecs,
optimization_source);
}
absl::StatusOr<OptimizedFunctionGraphInfo>
OptimizeFunctionGraphOrReadFromFileCache(
const string& function_name, AttrSlice attrs,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Device* default_device, Env* env,
absl::Duration caching_threshold_duration) {
const string dir_name = absl::StrCat(getenv(kGraphCachingEnvVariableName));
if (dir_name.empty() || options.is_component_function) {
return OptimizeFunctionGraph(function_name, attrs, options, dev_set,
input_lib_def, composite_devices, cpu_device,
default_device, env,
OptimizedFunctionGraph::JIT);
}
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? input_lib_def : options.lib_def;
const FunctionDef* fdef = lib_def->Find(function_name);
if (fdef == nullptr) {
return absl::AbortedError(absl::StrCat(
"Failed to find function ", function_name,
" in function library: ", lib_def->ToProto().DebugString()));
}
const string file_name = GetFileCacheName(dir_name, function_name, fdef);
if (env->FileExists(file_name).ok()) {
LOG(INFO)
<< "TensorFlow graph cache existed; reading from cache; function name: "
<< function_name << ", full cache file path: " << file_name;
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_function_graph_info =
ReadFromCache(file_name, env);
if (optimized_function_graph_info.ok()) {
metrics::UpdateFunctionGraphOptimizationSavingTime(
optimized_function_graph_info->optimization_duration_usecs,
metrics::GraphOptimizationSource::kJit);
metrics::IncrementFunctionGraphOptimizationCacheHitCount(
1, metrics::GraphOptimizationSource::kJit);
LOG(INFO)
<< "Successfully restored the Tensorflow optimized graph from "
"the cache for the function: "
<< function_name << ", saved optimized time: "
<< absl::ToInt64Milliseconds(absl::Microseconds(
optimized_function_graph_info->optimization_duration_usecs))
<< " msecs";
return optimized_function_graph_info;
}
metrics::IncrementFunctionGraphOptimizationCacheFailureCount(
1, metrics::GraphOptimizationSource::kJit);
LOG(ERROR)
<< "Reading from Tensorflow graph optimization cache failed. Continue "
"to run the Tensorflow graph optimization passes instead. Error: "
<< optimized_function_graph_info.status();
return OptimizeFunctionGraph(function_name, attrs, options, dev_set,
input_lib_def, composite_devices, cpu_device,
default_device, env,
OptimizedFunctionGraph::JIT);
}
metrics::IncrementFunctionGraphOptimizationCacheMissCount(
1, metrics::GraphOptimizationSource::kJit);
VLOG(3) << "No cache existed; run the optimization passes. function name:"
<< " " << function_name;
absl::Time optimization_start_time = absl::Now();
TF_ASSIGN_OR_RETURN(
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_function_graph_info,
OptimizeFunctionGraph(function_name, attrs, options, dev_set,
input_lib_def, composite_devices, cpu_device,
default_device, env, OptimizedFunctionGraph::JIT));
const absl::Duration graph_optimization_duration =
absl::Now() - optimization_start_time;
VLOG(3) << "Finished running the optimization passes; took "
<< absl::ToInt64Seconds(graph_optimization_duration)
<< " secs; function name: " << function_name;
if (graph_optimization_duration >= caching_threshold_duration) {
LOG(INFO)
<< "Writing the optimized TensorFlow graph into cache: function name: "
<< function_name << ", full cache file path: " << file_name;
Status s = WriteToCache(dir_name, file_name,
optimized_function_graph_info.value(), env);
if (!s.ok()) {
LOG(ERROR) << "Caching the Tensorflow graph optimization results failed; "
"cotinue without caching. Error message: "
<< s;
}
LOG(INFO) << "Successfully wrote the optimized Tensorflow graph into cache "
"for the function: "
<< function_name << ", graph optimization time ( / threshold): "
<< absl::ToInt64Milliseconds(graph_optimization_duration)
<< " / (" << absl::ToInt64Milliseconds(caching_threshold_duration)
<< ") msecs";
}
return optimized_function_graph_info;
}
absl::StatusOr<
std::unique_ptr<std::unordered_map<string, std::unique_ptr<Graph>>>>
PreprocessAndPartitionGraph(
const std::string& function_name,
OptimizedFunctionGraphInfo& input_optimized_graph,
const FunctionLibraryRuntime::InstantiateOptions& options,
const DeviceSet& dev_set, const FunctionLibraryDefinition* input_lib_def,
const std::vector<CompositeDevice*>& composite_devices, Device* cpu_device,
Env* env) {
std::unique_ptr<Graph>& graph = input_optimized_graph.function_graph;
TF_RETURN_IF_ERROR(ReplicatePerReplicaNodesInFunctionGraph(
options.composite_devices, graph.get()));
const FunctionLibraryDefinition* lib_def =
options.lib_def == nullptr ? input_lib_def : options.lib_def;
if (options.graph_collector != nullptr) {
GraphDef def;
graph->ToGraphDef(&def);
*def.mutable_library() = lib_def->ReachableDefinitions(def).ToProto();
options.graph_collector->CollectOptimizedGraph(def);
}
DEBUG_DATA_DUMPER()->DumpGraph(function_name, kDebugGroupMain,
"before_partition", graph.get(),
&input_optimized_graph.lib_def, VLOG_IS_ON(4));
auto device_name_to_subgraphs =
std::make_unique<std::unordered_map<string, std::unique_ptr<Graph>>>();
TF_RETURN_IF_ERROR(PartitionFunctionGraph(dev_set, std::move(graph),
device_name_to_subgraphs.get()));
for (const auto& pair : *device_name_to_subgraphs) {
std::string partitioned_func_name =
absl::StrCat(function_name, "_partition_" + pair.first);
const auto* optimized_subgraph = pair.second.get();
DEBUG_DATA_DUMPER()->DumpGraph(
partitioned_func_name, kDebugGroupMain, "before_partition_passes",
optimized_subgraph, &input_optimized_graph.lib_def, false);
}
GraphOptimizationPassOptions optimization_options;
SessionOptions session_options;
session_options.env = env;
session_options.config = options.config_proto;
optimization_options.session_options = &session_options;
optimization_options.flib_def = &(input_optimized_graph.lib_def);
optimization_options.is_function_graph = true;
optimization_options.graph = nullptr;
optimization_options.device_set = nullptr;
optimization_options.partition_graphs = device_name_to_subgraphs.get();
optimization_options.debug_filename_prefix = function_name;
if (cpu_device && std::is_same<decltype(cpu_device), LocalDevice>::value &&
cpu_device->tensorflow_cpu_worker_threads() != nullptr) {
session_options.config.set_intra_op_parallelism_threads(
cpu_device->tensorflow_cpu_worker_threads()->num_threads);
}
const bool should_run_optimization_passes = !options.is_component_function;
if (should_run_optimization_passes) {
TF_RETURN_IF_ERROR(OptimizationPassRegistry::Global()->RunGrouping(
OptimizationPassRegistry::POST_PARTITIONING, optimization_options));
}
for (const auto& pair : *device_name_to_subgraphs) {
std::string partitioned_func_name =
absl::StrCat(function_name, "_partition_" + pair.first);
const auto* optimized_subgraph = pair.second.get();
DEBUG_DATA_DUMPER()->DumpGraph(partitioned_func_name, kDebugGroupMain,
"after_partition_passes", optimized_subgraph,
&input_optimized_graph.lib_def, false);
}
return std::move(device_name_to_subgraphs);
}
} | #include "tensorflow/core/common_runtime/optimize_function_graph_utils.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "tensorflow/core/common_runtime/device.h"
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/device_set.h"
#include "tensorflow/core/common_runtime/function_testlib.h"
#include "tensorflow/core/common_runtime/optimized_function_graph_info.h"
#include "tensorflow/core/framework/metrics.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/public/session_options.h"
#include "tsl/platform/env.h"
#include "tsl/platform/status.h"
namespace tensorflow {
namespace {
using ::testing::ElementsAre;
constexpr absl::string_view kDevicePrefix = "/job:a/replica:0/task:0/device:";
void CreateCpuDeviceList(absl::string_view name_prefix, int num_devices,
std::vector<std::unique_ptr<Device>>& devices) {
SessionOptions options;
auto* device_count = options.config.mutable_device_count();
device_count->insert({"CPU", num_devices});
TF_ASSERT_OK(
DeviceFactory::AddDevices(options, "/job:a/replica:0/task:0", &devices));
}
void TestOptimizeFunctionGraphWithFunctionNotFound(bool load_from_cache) {
FunctionLibraryRuntime::InstantiateOptions opts;
opts.is_multi_device_function = true;
auto lib_def =
std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global());
std::vector<std::unique_ptr<Device>> devices;
CreateCpuDeviceList(kDevicePrefix, 1, devices);
DeviceSet device_set;
for (const auto& device : devices) {
device_set.AddDevice(device.get());
}
absl::StatusOr<OptimizedFunctionGraphInfo> optimized_function_graph_info;
if (load_from_cache) {
optimized_function_graph_info = OptimizeFunctionGraphOrReadFromFileCache(
"FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[0].get(),
Env::Default(), absl::ZeroDuration());
} else {
optimized_function_graph_info = OptimizeFunctionGraph(
"FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[0].get(),
Env::Default(), OptimizedFunctionGraph::AOT);
}
EXPECT_TRUE(absl::IsInvalidArgument(optimized_function_graph_info.status()))
<< "Actual status: " << optimized_function_graph_info.status();
EXPECT_TRUE(
absl::StrContains(optimized_function_graph_info.status().message(),
"Failed to find function"))
<< "Actual error message: "
<< optimized_function_graph_info.status().message();
}
TEST(OptimizeFunctionGraphTest,
OptimizeFunctionGraphReturnsErrorIfNoFunctionFound) {
TestOptimizeFunctionGraphWithFunctionNotFound(false);
}
TEST(OptimizeFunctionGraphTest, OptimizeFunctionGraphReturnsCorrectResult) {
FunctionLibraryRuntime::InstantiateOptions opts;
opts.is_multi_device_function = true;
FunctionDefLibrary proto;
*(proto.add_function()) = test::function::FindDevice();
auto lib_def =
std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(), proto);
std::vector<std::unique_ptr<Device>> devices;
CreateCpuDeviceList(kDevicePrefix, 3, devices);
DeviceSet device_set;
for (const auto& device : devices) {
device_set.AddDevice(device.get());
}
const absl::StatusOr<OptimizedFunctionGraphInfo> aot_result =
OptimizeFunctionGraph("FindDevice", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(),
devices[1].get(), Env::Default(),
OptimizedFunctionGraph::AOT);
TF_EXPECT_OK(aot_result.status());
EXPECT_EQ(aot_result->name, "FindDevice");
EXPECT_EQ(aot_result->num_return_nodes, 1);
EXPECT_THAT(aot_result->ret_types, ElementsAre(DT_STRING));
EXPECT_GT(aot_result->optimization_duration_usecs, 0);
EXPECT_EQ(aot_result->optimization_source, OptimizedFunctionGraph::AOT);
}
TEST(OptimizeFunctionGraphTest, ReloadFromCacheReturnsErrorIfNoFunctionFound) {
TestOptimizeFunctionGraphWithFunctionNotFound(true);
}
TEST(OptimizeFunctionGraphTest, OptimizeFunctionGraphAndWriteToCache) {
Env* env = Env::Default();
const string temp_dir = "/tmp/testing_cache_direcroty";
EXPECT_TRUE(env->RecursivelyCreateDir(temp_dir).ok());
setenv(kGraphCachingEnvVariableName, temp_dir.c_str(), 1);
std::vector<string> empty_file_list;
TF_ASSERT_OK(
env->GetMatchingPaths(absl::StrCat(temp_dir, "{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::Hours(48));
TF_ASSERT_OK(optimized_info.status());
std::vector<string> file_list;
TF_ASSERT_OK(env->GetMatchingPaths(absl::StrCat(temp_dir, "{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::ZeroDuration());
TF_ASSERT_OK(optimized_info.status());
file_list.clear();
TF_ASSERT_OK(env->GetMatchingPaths(
absl::StrCat(temp_dir, "/_-1_FindDevice_1"), &file_list));
EXPECT_EQ(file_list.size(), 1);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationSavingTimeUsecs(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheHitCount(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheMissCount(
metrics::GraphOptimizationSource::kJit),
2);
optimized_info = OptimizeFunctionGraphOrReadFromFileCache(
"FindDevice_1234", {}, opts, device_set, lib_def.get(),
{}, devices[0].get(), devices[1].get(),
Env::Default(), absl::ZeroDuration());
TF_ASSERT_OK(optimized_info.status());
file_list.clear();
TF_ASSERT_OK(env->GetMatchingPaths(
absl::StrCat(temp_dir, "/_-1_FindDevice_1"), &file_list));
EXPECT_EQ(file_list.size(), 1);
EXPECT_GT(metrics::GetFunctionGraphOptimizationSavingTimeUsecs(
metrics::GraphOptimizationSource::kJit),
0);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheHitCount(
metrics::GraphOptimizationSource::kJit),
1);
EXPECT_EQ(metrics::GetFunctionGraphOptimizationCacheMissCount(
metrics::GraphOptimizationSource::kJit),
2);
EXPECT_EQ(optimized_info->name, "FindDevice_1234");
EXPECT_EQ(optimized_info->num_return_nodes, 1);
EXPECT_THAT(optimized_info->ret_types, ElementsAre(DT_STRING));
int64_t undeleted_files;
int64_t undeleted_dirs;
TF_EXPECT_OK(
env->DeleteRecursively(temp_dir, &undeleted_files, &undeleted_dirs));
EXPECT_EQ(undeleted_files, 0);
EXPECT_EQ(undeleted_dirs, 0);
TF_ASSERT_OK(
env->GetMatchingPaths(absl::StrCat(temp_dir, "/*"), &empty_file_list));
ASSERT_TRUE(empty_file_list.empty());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimize_function_graph_utils.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/common_runtime/optimize_function_graph_utils_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e065e452-5515-4f94-a09b-a7f41b0f0597 | cpp | tensorflow/tensorflow | gemv_rewriter | third_party/xla/xla/service/gpu/transforms/gemv_rewriter.cc | third_party/xla/xla/service/gpu/transforms/gemv_rewriter_test.cc | #include "xla/service/gpu/transforms/gemv_rewriter.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::StatusOr<Layout> GetLayoutWithNewMinorMostDimension(
const Layout& layout) {
if (!LayoutUtil::IsMonotonicWithDim0Major(layout)) {
return absl::InvalidArgumentError("Layout is not normalized.");
}
return LayoutUtil::MakeDescendingLayout(layout.minor_to_major_size() + 1);
}
class GemvRewriterVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleDot(HloInstruction* instr) override {
HloDotInstruction* dot = Cast<HloDotInstruction>(instr);
const DotDimensionNumbers& dim_numbers = dot->dot_dimension_numbers();
HloInstruction* lhs = dot->mutable_operand(0);
HloInstruction* rhs = dot->mutable_operand(1);
bool lhs_has_non_contracting_dim =
lhs->shape().rank() ==
dim_numbers.lhs_batch_dimensions_size() +
dim_numbers.lhs_contracting_dimensions_size() + 1;
bool rhs_has_non_contracting_dim =
rhs->shape().rank() ==
dim_numbers.rhs_batch_dimensions_size() +
dim_numbers.rhs_contracting_dimensions_size() + 1;
if (lhs_has_non_contracting_dim && rhs_has_non_contracting_dim) {
return absl::OkStatus();
}
if (!lhs_has_non_contracting_dim && !rhs_has_non_contracting_dim) {
return absl::OkStatus();
}
if (dot->shape().is_dynamic()) {
return absl::OkStatus();
}
changed_ = true;
HloComputation* computation = dot->parent();
HloInstruction* new_lhs = lhs;
if (!lhs_has_non_contracting_dim) {
const Shape& lhs_shape = lhs->shape();
absl::Span<const int64_t> lhs_dimensions = lhs_shape.dimensions();
std::vector<int64_t> new_lhs_dimensions(lhs_dimensions.begin(),
lhs_dimensions.end());
new_lhs_dimensions.push_back(1);
Shape new_lhs_shape(
lhs_shape.element_type(), new_lhs_dimensions,
absl::InlinedVector<bool, 4>(new_lhs_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_lhs_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(lhs_shape.layout()));
new_lhs = computation->AddInstruction(
HloInstruction::CreateBitcast(new_lhs_shape, lhs));
}
HloInstruction* new_rhs = rhs;
if (!rhs_has_non_contracting_dim) {
const Shape& rhs_shape = rhs->shape();
absl::Span<const int64_t> rhs_dimensions = rhs_shape.dimensions();
std::vector<int64_t> new_rhs_dimensions(rhs_dimensions.begin(),
rhs_dimensions.end());
new_rhs_dimensions.push_back(1);
Shape new_rhs_shape(
rhs_shape.element_type(), new_rhs_dimensions,
absl::InlinedVector<bool, 4>(new_rhs_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_rhs_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(rhs_shape.layout()));
new_rhs = computation->AddInstruction(
HloInstruction::CreateBitcast(new_rhs_shape, rhs));
}
std::vector<int64_t> new_out_dimensions;
new_out_dimensions.reserve(dot->shape().dimensions().size() + 1);
for (int64_t dim_size : dot->shape().dimensions()) {
new_out_dimensions.push_back(dim_size);
}
if (!lhs_has_non_contracting_dim) {
int non_contracting_dim_size = new_out_dimensions.back();
new_out_dimensions[new_out_dimensions.size() - 1] = 1;
new_out_dimensions.push_back(non_contracting_dim_size);
} else {
new_out_dimensions.push_back(1);
}
Shape new_out_shape(
dot->shape().element_type(), new_out_dimensions,
absl::InlinedVector<bool, 4>(new_out_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_out_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(dot->shape().layout()));
HloInstruction* new_dot =
computation->AddInstruction(HloInstruction::CreateDot(
new_out_shape, new_lhs, new_rhs, dot->dot_dimension_numbers(),
dot->precision_config()));
HloInstruction* bitcast = computation->AddInstruction(
HloInstruction::CreateBitcast(dot->shape(), new_dot));
return computation->ReplaceInstruction(dot, bitcast);
}
bool changed() const { return changed_; }
private:
bool changed_ = false;
};
}
absl::StatusOr<bool> GemvRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
GemvRewriterVisitor gemv_rewriter;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(&gemv_rewriter));
}
return gemv_rewriter.changed();
}
}
} | #include "xla/service/gpu/transforms/gemv_rewriter.h"
#include <memory>
#include <optional>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class GemvRewriterTest : public HloTestBase {};
TEST_F(GemvRewriterTest, RewriteMatrixVectorMultiplicationToGemm) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[32,7] parameter(0)
p1 = f32[7] parameter(1)
ROOT d = f32[32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, RewriteVectorMatrixMultiplicationToGemm) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[7] parameter(0)
p1 = f32[7,32] parameter(1)
ROOT d = f32[32] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, RewriteMatrixVectorMultiplicationWithBatch) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[2,5,32,7] parameter(0)
p1 = f32[2,5,7] parameter(1)
ROOT d = f32[2,5,32] dot(p0, p1),
lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, DotNotRewriteVectorVectorMultiplication) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[7] parameter(0)
p1 = f32[7] parameter(1)
ROOT d = f32[] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), std::nullopt);
}
TEST_F(GemvRewriterTest, DotNotRewriteMatrixMatrixMultiplication) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[5,7] parameter(0)
p1 = f32[7,32] parameter(1)
ROOT d = f32[5,32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), std::nullopt);
}
TEST_F(GemvRewriterTest, DoNotRewriteDotsWithNonNormalizedLayout) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[5,32,7]{2,1,0} parameter(0)
p1 = f32[5,7]{0,1} parameter(1)
ROOT d = f32[5,32]{0,1} dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
GemvRewriter rewriter;
absl::StatusOr<bool> result = this->RunHloPass(&rewriter, module.get());
EXPECT_FALSE(result.ok());
EXPECT_EQ(result.status().message(), "Layout is not normalized.");
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemv_rewriter.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemv_rewriter_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e9184746-8b58-4033-bae2-5a7c164051b9 | cpp | tensorflow/tensorflow | copy_insertion | third_party/xla/xla/service/copy_insertion.cc | third_party/xla/xla/service/copy_insertion_test.cc | #include "xla/service/copy_insertion.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/frontend_attributes.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_reachability.h"
#include "xla/map_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/compile_time_cap.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_ordering.h"
#include "xla/service/hlo_value.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using absl::StrAppend;
bool IsReadonlyEntryParameterValue(const HloValue& value) {
const HloComputation* computation = value.defining_instruction()->parent();
return value.defining_instruction()->opcode() == HloOpcode::kParameter &&
computation == computation->parent()->entry_computation() &&
!computation->parent()->input_output_alias_config().ParameterHasAlias(
value.defining_instruction()->parameter_number(), value.index());
}
bool IsConstantValue(const HloValue& value) {
return value.defining_instruction()->opcode() == HloOpcode::kConstant;
}
bool ValueIsReadOnly(const HloValue& value) {
return IsConstantValue(value) || IsReadonlyEntryParameterValue(value);
}
struct SpecialCaseCopyPolicy {
bool copy_root_replicated_buffers = false;
bool copy_parameters_and_constants = false;
};
SpecialCaseCopyPolicy GetSpecialCaseCopyPolicy(const CallGraphNode& node,
HloModule* module,
HloComputation* computation) {
SpecialCaseCopyPolicy policy;
if (computation == module->entry_computation()) {
policy.copy_parameters_and_constants = true;
policy.copy_root_replicated_buffers = true;
}
return policy;
}
bool ShouldCopyRootValue(const HloValue& value,
const SpecialCaseCopyPolicy& policy) {
if (policy.copy_parameters_and_constants) {
return ValueIsReadOnly(value);
}
return false;
}
absl::StatusOr<std::pair<HloInstruction*, HloInstruction*>>
DeepCopyAndAddControlEdges(HloInstruction* from, HloInstruction* to,
const ShapeTree<bool>& indices_to_copy) {
DCHECK(ShapeUtil::Compatible(from->shape(), to->shape()));
ShapeTree<HloInstruction*> from_copy_tree(from->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * from_deep_copy,
from->parent()->DeepCopyInstruction(
from, &indices_to_copy, &from_copy_tree));
ShapeTree<HloInstruction*> to_copy_tree(to->shape(), nullptr);
TF_ASSIGN_OR_RETURN(
HloInstruction * to_deep_copy,
to->parent()->DeepCopyInstruction(to, &indices_to_copy, &to_copy_tree));
for (const auto& pair : from_copy_tree) {
const ShapeIndex& index = pair.first;
HloInstruction* from_copy = pair.second;
HloInstruction* to_copy = to_copy_tree.element(index);
if (from_copy == nullptr) {
TF_RET_CHECK(to_copy == nullptr);
continue;
}
TF_RET_CHECK(to_copy != nullptr);
TF_RETURN_IF_ERROR(from_copy->AddControlDependencyTo(to_copy));
}
return std::make_pair(from_deep_copy, to_deep_copy);
}
bool IndicesToCopyForWhile(const HloDataflowAnalysis& dataflow,
const HloInstruction* xla_while,
ShapeTree<bool>* indices_to_copy) {
DCHECK(ShapeUtil::Compatible(indices_to_copy->shape(), xla_while->shape()));
bool any_copies = false;
const HloInstruction* init = xla_while->operand(0);
for (auto& pair : *indices_to_copy) {
const ShapeIndex& index = pair.first;
bool& should_copy = pair.second;
if (dataflow.GetValueSet(init, index).values().size() > 1 ||
dataflow.GetValueSet(xla_while, index).values().size() > 1) {
should_copy = true;
} else {
should_copy = dataflow.GetUniqueValueAt(xla_while, index) !=
dataflow.GetUniqueValueAt(init, index);
}
any_copies |= should_copy;
}
return any_copies;
}
bool IndicesToCopyForConditional(const HloDataflowAnalysis& dataflow,
const HloInstruction* xla_conditional,
ShapeTree<bool>* indices_to_copy) {
DCHECK(ShapeUtil::Compatible(indices_to_copy->shape(),
xla_conditional->shape()));
bool any_copies = false;
for (auto& pair : *indices_to_copy) {
const ShapeIndex& index = pair.first;
bool& should_copy = pair.second;
CHECK_EQ(dataflow.GetValueSet(xla_conditional, index).values().size(), 1);
auto value = dataflow.GetValueSet(xla_conditional, index).values()[0];
should_copy =
value->is_phi() && value->defining_instruction() == xla_conditional;
any_copies |= should_copy;
}
return any_copies;
}
absl::Status AddCopiesForWhile(const HloAliasAnalysis& alias_analysis,
HloInstruction* xla_while) {
VLOG(2) << "Adding copies for kWhile instruction " << xla_while->name();
TF_RET_CHECK(xla_while->opcode() == HloOpcode::kWhile);
ShapeTree<bool> indices_to_copy(xla_while->shape());
if (!IndicesToCopyForWhile(alias_analysis.dataflow_analysis(), xla_while,
&indices_to_copy)) {
VLOG(2) << "No copies necessary for kWhile instruction "
<< xla_while->name();
return absl::OkStatus();
}
VLOG(2) << "Adding copies for " << xla_while->name() << " at indices:";
for (auto& pair : indices_to_copy) {
if (pair.second) {
VLOG(2) << " " << pair.first;
}
}
HloInstruction* while_init = xla_while->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * while_init_copy,
xla_while->parent()->DeepCopyInstruction(while_init, &indices_to_copy));
TF_RETURN_IF_ERROR(while_init->ReplaceUseWith(xla_while, while_init_copy));
HloComputation* body = xla_while->while_body();
HloInstruction* param = body->parameter_instruction(0);
HloInstruction* root = body->root_instruction();
TF_RET_CHECK(param != root);
std::vector<HloInstruction*> param_users = param->users();
TF_ASSIGN_OR_RETURN(auto pair,
DeepCopyAndAddControlEdges(param, root, indices_to_copy));
HloInstruction* param_copy = pair.first;
HloInstruction* root_copy = pair.second;
for (HloInstruction* user : param_users) {
TF_RETURN_IF_ERROR(param->ReplaceUseWith(user, param_copy));
}
body->set_root_instruction(root_copy);
return absl::OkStatus();
}
absl::Status AddCopiesForInPlaceOperation(
const HloAliasAnalysis& alias_analysis, HloInstruction* in_place_op,
int64_t operand_number) {
VLOG(2) << "Adding copies for in-place operation " << in_place_op->name();
HloInstruction* operand = in_place_op->mutable_operand(operand_number);
TF_ASSIGN_OR_RETURN(HloInstruction * deep_copy,
in_place_op->parent()->DeepCopyInstruction(operand));
TF_RETURN_IF_ERROR(
operand->ReplaceUseWith(in_place_op, operand_number, deep_copy));
return absl::OkStatus();
}
absl::Status AddCopiesForAliasedInputOutputs(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
HloComputation* entry = module->entry_computation();
if (!HloInstruction::IsThreadIncluded(entry->execution_thread(),
execution_threads)) {
return absl::OkStatus();
}
HloInstruction* root = entry->root_instruction();
ShapeTree<bool> output_indices_to_copy(root->shape());
std::vector<std::optional<ShapeTree<HloInstruction*>>> copied_parameters(
entry->num_parameters());
bool has_alias = false;
for (auto* param : entry->parameter_instructions()) {
bool param_has_alias = false;
ShapeTree<bool> param_indices_to_copy(param->shape());
module->input_output_alias_config().ForEachAlias(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias& alias) {
if (alias.parameter_number == param->parameter_number()) {
param_has_alias = true;
*(param_indices_to_copy.mutable_element(alias.parameter_index)) =
true;
*(output_indices_to_copy.mutable_element(output_index)) = true;
}
});
if (!param_has_alias) {
continue;
}
TF_RET_CHECK(param->parameter_number() < entry->num_parameters());
TF_RET_CHECK(!copied_parameters[param->parameter_number()]);
has_alias = true;
std::vector<HloInstruction*> users = param->users();
ShapeTree<HloInstruction*> param_copy_tree(param->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * copied,
entry->DeepCopyInstruction(
param, ¶m_indices_to_copy, ¶m_copy_tree));
if (param == root) {
entry->set_root_instruction(copied);
root = copied;
}
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(param->ReplaceUseWith(user, copied));
}
copied_parameters[param->parameter_number()] = param_copy_tree;
}
if (!has_alias) {
return absl::OkStatus();
}
ShapeTree<HloInstruction*> output_copy_tree(root->shape(),
nullptr);
TF_ASSIGN_OR_RETURN(HloInstruction * root_copied,
root->parent()->DeepCopyInstruction(
root, &output_indices_to_copy, &output_copy_tree));
TF_RETURN_IF_ERROR(module->input_output_alias_config().ForEachAliasWithStatus(
[&](const ShapeIndex& output_index,
const HloInputOutputAliasConfig::Alias& alias) -> absl::Status {
if (!copied_parameters[alias.parameter_number]) {
return absl::OkStatus();
}
HloInstruction* from =
copied_parameters[alias.parameter_number]->element(
alias.parameter_index);
HloInstruction* to = output_copy_tree.element(output_index);
TF_RET_CHECK(from != nullptr);
TF_RET_CHECK(to != nullptr);
TF_RETURN_IF_ERROR(from->AddControlDependencyTo(to));
return absl::OkStatus();
}));
entry->set_root_instruction(root_copied);
return absl::OkStatus();
}
absl::Status StripControlDependenciesFrom(HloInstruction* instruction) {
while (!instruction->control_successors().empty()) {
TF_RETURN_IF_ERROR(instruction->RemoveControlDependencyTo(
instruction->control_successors().front()));
}
while (!instruction->control_predecessors().empty()) {
TF_RETURN_IF_ERROR(
instruction->control_predecessors().front()->RemoveControlDependencyTo(
instruction));
}
return absl::OkStatus();
}
class LiveRangeRegions {
public:
struct InstructionInfo {
InstructionInfo() : value_definition(nullptr), is_definition(false) {}
HloInstruction* value_definition;
bool is_definition;
std::string ToString() const {
return absl::StrCat(
"is_definition: ", std::to_string(is_definition),
", value_definition: ",
value_definition ? value_definition->name() : "nullptr");
}
};
typedef HloInstructionMap<InstructionInfo> InstructionMap;
typedef std::pair<HloInstruction*, InstructionInfo> InstructionEntry;
typedef absl::flat_hash_map<const HloComputation*, InstructionMap>
ComputationMap;
InstructionMap& operator[](const HloComputation* computation) {
if (computation_map_.find(computation) == computation_map_.end()) {
computation_vector_.push_back(computation);
}
return computation_map_[computation];
}
const InstructionMap& operator[](const HloComputation* computation) const {
ComputationMap::const_iterator p = computation_map_.find(computation);
CHECK(p != computation_map_.end());
return p->second;
}
absl::InlinedVector<const HloComputation*, 5>::const_iterator begin() const {
return computation_vector_.begin();
}
absl::InlinedVector<const HloComputation*, 5>::const_iterator end() const {
return computation_vector_.end();
}
int64_t size() const {
CHECK_EQ(computation_vector_.size(), computation_map_.size());
return computation_vector_.size();
}
bool empty() const { return size() == 0; }
const HloComputation* Computation(int64_t index) const {
return computation_vector_[index];
}
bool contains(HloInstruction* instr) const {
CHECK_NE(instr, nullptr);
auto* computation = instr->parent();
auto p = computation_map_.find(computation);
if (p == computation_map_.end()) {
return false;
}
auto instr_map = (*p).second;
return instr_map.find(instr) != instr_map.end();
}
std::string ToString() const {
std::string result;
for (const auto* computation : computation_vector_) {
StrAppend(&result, "computation: ", computation->name(), "\n");
for (const auto& entry : computation_map_.at(computation)) {
StrAppend(&result, " entry: ", entry.first->name(), ", ",
entry.second.ToString(), "\n");
}
}
return result;
}
private:
ComputationMap computation_map_;
absl::InlinedVector<const HloComputation*, 5> computation_vector_;
};
namespace {
class Relation {
public:
enum RuntimeOrder {
kNoOverlap = 0,
kSameInstr = 1,
kBeforeStart = 2,
kBeforeStartOrSameInstr = kBeforeStart | kSameInstr,
kAfterEnd = 4,
kAfterEndOrSameInstr = kAfterEnd | kSameInstr,
kBeforeStartOrAfterEnd = kBeforeStart | kAfterEnd,
kBeforeOrAfterOrOverlap = kBeforeStart | kAfterEnd | kSameInstr,
};
Relation() : intercept_def_use_(false) {}
explicit Relation(RuntimeOrder order, bool intercept_def_use = false)
: intercept_def_use_(intercept_def_use) {
orders_.push_back(order);
}
Relation(const Relation& that)
: intercept_def_use_(that.intercept_def_use_), orders_(that.orders_) {}
bool operator==(const Relation& that) const {
return intercept_def_use_ == that.intercept_def_use_ &&
absl::c_equal(orders_, that.orders_);
}
bool UseImpliesInterception() const {
CHECK_EQ(orders_.size(), 1);
return UseImpliesInterception(orders_[0]);
}
bool DefinitionImpliesInterception() const {
CHECK_EQ(orders_.size(), 1);
return DefinitionImpliesInterception(orders_[0]);
}
bool InterceptDefUse() const { return intercept_def_use_; }
void UpdateInterception(bool value) {
CHECK_EQ(orders_.size(), 1);
intercept_def_use_ = value;
}
Relation::RuntimeOrder GetRuntimeOrder() const {
if (orders_.empty()) {
return Relation::kNoOverlap;
}
CHECK_EQ(orders_.size(), 1);
return orders_[0];
}
bool RuntimeOrderOverlap() const {
return absl::c_any_of(orders_, ImpliesOverlap);
}
bool RuntimeOrderIsUnordered() const {
return orders_.size() == 1 && orders_[0] == kBeforeStartOrAfterEnd;
}
bool RuntimeOrderIsNoOverlap() const {
return orders_.empty() || (orders_.size() == 1 && orders_[0] == kNoOverlap);
}
bool RuntimeOrderIsRunBefore() const {
return orders_.size() == 1 && orders_[0] == kBeforeStart;
}
bool RuntimeOrderIsRunAfter() const {
return orders_.size() == 1 && orders_[0] == kAfterEnd;
}
std::string ToString() const {
return absl::StrCat("Interception = ", intercept_def_use_, ";",
absl::StrJoin(orders_, ","));
}
static bool DefinitionImpliesInterception(RuntimeOrder definition) {
return (definition == kAfterEnd || definition == kBeforeStartOrAfterEnd);
}
static bool UseImpliesInterception(RuntimeOrder use) {
return (use == kBeforeStart || use == kBeforeStartOrAfterEnd);
}
void UnionRelationFromSameSource(const Relation& rel) {
CHECK_LE(orders_.size(), 1);
CHECK_EQ(rel.orders_.size(), 1);
if (orders_.empty()) {
orders_.push_back(rel.orders_[0]);
} else {
orders_[0] = Union(orders_[0], rel.orders_[0]);
}
intercept_def_use_ = intercept_def_use_ || rel.intercept_def_use_;
}
void UnionRelationFromDifferentSource(const Relation& rel) {
if (rel.orders_.empty()) {
return;
}
CHECK_EQ(rel.orders_.size(), 1);
intercept_def_use_ = intercept_def_use_ || rel.intercept_def_use_;
for (auto& local_order : orders_) {
if (OverwriteIfSubsume(rel.orders_[0], &local_order)) {
return;
}
}
orders_.push_back(rel.orders_[0]);
}
static Relation::RuntimeOrder ReverseRuntimeOrder(RuntimeOrder order) {
switch (order) {
case kNoOverlap:
case kSameInstr:
case kBeforeStartOrAfterEnd:
case kBeforeOrAfterOrOverlap:
return order;
case kBeforeStart:
return kAfterEnd;
case kBeforeStartOrSameInstr:
return kAfterEndOrSameInstr;
case kAfterEnd:
return kBeforeStart;
case kAfterEndOrSameInstr:
return kBeforeStartOrSameInstr;
}
}
private:
bool intercept_def_use_;
absl::InlinedVector<RuntimeOrder, 4> orders_;
static RuntimeOrder Union(RuntimeOrder o1, RuntimeOrder o2) {
return static_cast<Relation::RuntimeOrder>(o1 | o2);
}
static bool ImpliesOverlap(RuntimeOrder o) {
return o >= RuntimeOrder::kBeforeStartOrAfterEnd;
}
static bool Subsume(RuntimeOrder o1, RuntimeOrder o2) {
return Union(o1, o2) == o1;
}
static bool OverwriteIfSubsume(RuntimeOrder o2, RuntimeOrder* o1) {
if (*o1 == o2) {
return true;
}
CHECK_NE(o1, nullptr);
if (Subsume(o2, *o1)) {
*o1 = o2;
return true;
} else if (Subsume(*o1, o2)) {
return true;
}
return false;
}
};
class ComputeRelativeLocation {
public:
typedef LiveRangeRegions::InstructionEntry InstructionEntry;
explicit ComputeRelativeLocation(HloOrdering* ordering)
: ordering_(ordering) {
VLOG(3) << "New analysis";
}
Relation Compute(const InstructionEntry& entry1,
const InstructionEntry& entry2, bool instr2_can_modify) {
auto def = entry1.second.value_definition;
auto use = entry1.first;
Relation::RuntimeOrder order =
ComputeRuntimeOrdering(entry2.first, entry1.first);
if (order == Relation::kSameInstr &&
entry1.second.is_definition != entry2.second.is_definition) {
if (entry1.second.is_definition) {
order = Relation::kBeforeStart;
} else {
order = Relation::kAfterEnd;
}
}
bool intercept = AlwaysForceInterception(entry2.first);
if (def == nullptr || !instr2_can_modify) {
return Relation(order, intercept);
}
if (def->opcode() == HloOpcode::kParameter &&
use == use->parent()->root_instruction()) {
VLOG(3) << "Setting interception due to parameter/root relation";
return Relation(order, true);
}
if (use->parent() == def->parent() &&
ComputeRuntimeOrdering(use, entry2.first) == Relation::kAfterEnd &&
def->opcode() == HloOpcode::kWhile &&
entry2.first->parent() == def->while_body()) {
return Relation(order, false);
}
if (use->parent() == def->parent() &&
ComputeRuntimeOrdering(def, entry2.first) == Relation::kBeforeStart &&
use->opcode() == HloOpcode::kWhile &&
entry2.first->parent() == use->while_body()) {
return Relation(order, false);
}
if (use->parent() == def->parent() &&
def->parent()->IsConditionalBranchComputation() &&
def == entry2.first && def->shape().IsTuple()) {
VLOG(3) << "Setting interception for multi-output instruction inside "
"conditional branch: "
<< def->name();
return Relation(order, true);
}
if (Relation::UseImpliesInterception(order)) {
auto order2 = ComputeRuntimeOrdering(entry2.first, def);
if (Relation::DefinitionImpliesInterception(order2)) {
VLOG(3) << "Setting interception for " << def->ToString()
<< " with use: " << entry1.first->ToString();
intercept = true;
}
}
return Relation(order, intercept);
}
Relation Compute(const LiveRangeRegions& range1,
const LiveRangeRegions& range2) {
Relation dir_src_dest;
for (const auto* computation1 : range1) {
for (const auto* computation2 : range2) {
for (auto instr_entry2 : range2[computation2]) {
if (!ordering_->call_graph().Dominates(computation1, computation2)) {
continue;
}
VLOG(3) << "Locationing " << instr_entry2.first->ToString();
bool instr2_can_modify =
InstructionCanIntercept(instr_entry2, range1);
Relation instr2_relation;
std::vector<InstructionEntry> unordered_ops;
bool unordered_intercept = false;
for (auto instr_entry1 : range1[computation1]) {
auto rel = Compute(instr_entry1, instr_entry2, instr2_can_modify);
VLOG(3) << "New relation with " << instr_entry1.first->name()
<< ": " << rel.ToString();
if (!rel.RuntimeOrderIsUnordered()) {
instr2_relation.UnionRelationFromSameSource(rel);
} else {
unordered_ops.push_back(instr_entry1);
unordered_intercept |= rel.InterceptDefUse();
}
VLOG(3) << "instr2 relation: " << instr2_relation.ToString();
}
if (!ForceRuntimeOrder(unordered_ops, instr_entry2,
instr2_relation.GetRuntimeOrder())) {
VLOG(3) << "Unable to force ordering of unordered ops";
instr2_relation.UnionRelationFromSameSource(Relation(
Relation::kBeforeStartOrAfterEnd, unordered_intercept));
}
dir_src_dest.UnionRelationFromDifferentSource(instr2_relation);
VLOG(3) << "Resulting relation: " << dir_src_dest.ToString();
}
}
}
return dir_src_dest;
}
bool AddControlDependenceForUnorderedOps() {
if (ctrl_deps_.empty()) {
return true;
}
PredecessorHloOrdering* ordering =
dynamic_cast<PredecessorHloOrdering*>(ordering_);
if (ordering == nullptr) {
return false;
}
for (const auto& comp_it : ctrl_deps_) {
HloComputation* parent = comp_it.first;
HloReachabilityMap& reachability_map = ordering->reachability_map(parent);
for (const auto& instr_it : comp_it.second) {
HloInstruction* entry1 = instr_it.first;
for (HloInstruction* entry2 : instr_it.second) {
VLOG(3) << "Add control dependence between " << entry2->name()
<< " vs " << entry1->name();
TF_CHECK_OK(entry2->AddControlDependencyTo(entry1));
}
reachability_map.UpdateReachabilityThroughInstruction(entry1);
for (HloInstruction* entry2 : instr_it.second) {
DCHECK(ordering_->GetExecutionConstraint(entry1, entry2) ==
HloOrdering::ExecutionConstraint::kRunAfter);
}
}
}
return true;
}
private:
enum ComputeStatus {
kFullyComputed,
kPartiallyComputed,
kNotComputed,
};
typedef std::pair<ComputeStatus, Relation::RuntimeOrder> SavedRelation;
bool ForceRuntimeOrder(absl::Span<const InstructionEntry> unordered_ops,
const InstructionEntry entry2,
Relation::RuntimeOrder desired_relation) {
if (unordered_ops.empty()) {
return true;
}
if (desired_relation != Relation::kBeforeStart &&
desired_relation != Relation::kAfterEnd) {
return false;
}
auto ModifiesNonCopy = [](HloInstruction* instr, const HloInstruction* op) {
auto in_place = HloDataflowAnalysis::GetInPlaceInputOutputPairs(instr);
if (in_place.empty()) {
return false;
}
return absl::c_any_of(
in_place, [&](const std::pair<HloOperandIndex, ShapeIndex>&
operand_and_output_index) {
auto* op2 =
instr->operand(operand_and_output_index.first.operand_number);
return (op == nullptr) ? (op2->opcode() == HloOpcode::kCopy)
: (op2 == op);
});
};
for (const InstructionEntry& entry1 : unordered_ops) {
if (entry1.first->parent() != entry2.first->parent()) {
return false;
}
HloInstruction* pred = (desired_relation == Relation::kBeforeStart)
? entry2.first
: entry1.first;
HloInstruction* succ = (desired_relation == Relation::kBeforeStart)
? entry1.first
: entry2.first;
if (pred == pred->parent()->root_instruction()) {
return false;
}
if (succ->opcode() == HloOpcode::kCopy &&
ModifiesNonCopy(pred, succ->operand(0))) {
VLOG(3) << "Failed to force unordered op ordering due to copy ordering "
<< " between " << pred->name() << " vs " << succ->name();
return false;
}
}
for (const InstructionEntry& entry1 : unordered_ops) {
Save(entry2.first, entry1.first, desired_relation,
true);
}
return true;
}
static bool AlwaysForceInterception(HloInstruction* instr) {
if (HloDataflowAnalysis::IsAsynchronousOperationStart(instr->opcode()) ||
HloDataflowAnalysis::IsAsynchronousOperationDone(instr->opcode())) {
return true;
}
switch (instr->opcode()) {
case HloOpcode::kCollectivePermute:
return true;
default:
return false;
}
}
bool InstructionCanIntercept(const InstructionEntry& entry,
const LiveRangeRegions& region) {
auto instr = entry.first;
if (!entry.second.is_definition) {
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(instr)) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
if (region.contains(
instr->mutable_operand(operand_index.operand_number))) {
return true;
}
}
return false;
}
switch (instr->opcode()) {
case HloOpcode::kCopy: {
HloInstruction* operand = instr->mutable_operand(0);
if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->mutable_operand(0);
}
if (region.contains(operand) &&
ShapeUtil::Equal(instr->shape(), instr->operand(0)->shape())) {
return false;
}
return true;
}
case HloOpcode::kParameter:
case HloOpcode::kTuple:
case HloOpcode::kGetTupleElement:
case HloOpcode::kWhile:
case HloOpcode::kCall:
case HloOpcode::kConditional:
return false;
default:
return true;
}
return true;
}
SavedRelation AlreadyComputed(HloInstruction* op1, HloInstruction* op2) {
auto p2 = saved_relations_.find(op2);
if (p2 != saved_relations_.end()) {
auto p1 = (*p2).second.find(op1);
if (p1 != (*p2).second.end()) {
return SavedRelation(kFullyComputed, (*p1).second);
}
}
p2 = saved_relations_.find(op1);
if (p2 != saved_relations_.end()) {
auto p1 = (*p2).second.find(op2);
if (p1 != (*p2).second.end()) {
return SavedRelation(kPartiallyComputed,
Relation::ReverseRuntimeOrder((*p1).second));
}
}
return SavedRelation(kNotComputed, Relation::kNoOverlap);
}
Relation::RuntimeOrder Save(HloInstruction* entry1, HloInstruction* entry2,
const Relation::RuntimeOrder relation,
bool is_unordered_originally = false) {
CHECK_EQ(AlreadyComputed(entry1, entry2).first, kNotComputed);
CHECK_NE(relation, Relation::kBeforeStartOrAfterEnd);
saved_relations_[entry2][entry1] = relation;
if (is_unordered_originally) {
CHECK(relation == Relation::kBeforeStart ||
relation == Relation::kAfterEnd)
<< relation;
HloInstruction* pred =
(relation == Relation::kBeforeStart) ? entry1 : entry2;
HloInstruction* succ =
(relation == Relation::kBeforeStart) ? entry2 : entry1;
VLOG(3) << "Save unordered relation: " << pred->name() << " vs "
<< succ->name();
CHECK_EQ(succ->parent(), pred->parent());
auto& dep_vec = ctrl_deps_[succ->parent()][succ];
for (HloInstruction*& op : dep_vec) {
auto rel = AlreadyComputed(pred, op);
if (rel.first != kNotComputed) {
if (rel.second == Relation::kAfterEnd) {
op = pred;
} else {
CHECK(rel.second == Relation::kBeforeStart);
}
return relation;
}
}
VLOG(2) << "Forcing unordered: " << pred->name() << " vs "
<< succ->name();
dep_vec.push_back(pred);
}
return relation;
}
Relation::RuntimeOrder ComputeRuntimeOrdering(HloInstruction* instr1,
HloInstruction* instr2) {
auto saved_relation = AlreadyComputed(instr1, instr2);
if (saved_relation.first != kNotComputed) {
VLOG(3) << "Already computed between " << instr1->name() << " vs "
<< instr2->name();
return saved_relation.second;
}
auto constraint = ordering_->GetExecutionConstraint(instr1, instr2);
switch (constraint) {
case HloOrdering::ExecutionConstraint::kIsSame:
return Save(instr1, instr2, Relation::kSameInstr);
case HloOrdering::ExecutionConstraint::kRunBeforeEnd:
return Save(instr1, instr2, Relation::kBeforeStartOrSameInstr);
case HloOrdering::ExecutionConstraint::kRunBeforeStart:
return Save(instr1, instr2, Relation::kBeforeStart);
case HloOrdering::ExecutionConstraint::kRunAfter:
return Save(instr1, instr2, Relation::kAfterEnd);
case HloOrdering::ExecutionConstraint::kRunExclusiveBefore:
case HloOrdering::ExecutionConstraint::kRunExclusiveAfter:
return Save(instr1, instr2, Relation::kNoOverlap);
case HloOrdering::ExecutionConstraint::kUnordered: {
if (instr1->parent() != instr2->parent()) {
return Relation::kBeforeStartOrAfterEnd;
}
auto ControlDependenceBefore = [&](HloInstruction* op1,
HloInstruction* op2) {
auto constraint = ComputeRuntimeOrdering(op1, op2);
if (constraint == Relation::kBeforeStart ||
constraint == Relation::kSameInstr ||
constraint == Relation::kBeforeStartOrSameInstr) {
return true;
} else {
return false;
}
};
if (!ctrl_deps_.empty()) {
auto ctrl_deps = ctrl_deps_[instr1->parent()];
if (absl::c_any_of(ctrl_deps[instr2], [&](HloInstruction* pred2) {
return ControlDependenceBefore(instr1, pred2);
})) {
VLOG(2) << "control-dependent: " << instr1->name() << " vs "
<< instr2->name();
return Save(instr1, instr2, Relation::kBeforeStart);
} else if (absl::c_any_of(
ctrl_deps[instr1], [&](HloInstruction* pred1) {
return ControlDependenceBefore(instr2, pred1);
})) {
VLOG(2) << "control-dependent: " << instr2->name() << " vs "
<< instr1->name();
return Save(instr1, instr2, Relation::kAfterEnd);
}
}
return Relation::kBeforeStartOrAfterEnd;
}
}
}
HloOrdering* ordering_;
absl::flat_hash_map<
HloInstruction*,
absl::flat_hash_map<HloInstruction*, Relation::RuntimeOrder>>
saved_relations_;
absl::flat_hash_map<
HloComputation*,
absl::flat_hash_map<HloInstruction*, std::vector<HloInstruction*>>>
ctrl_deps_;
};
}
class CopyRemover {
public:
struct ValueNode {
explicit ValueNode(const HloValue* v) : value(v) {}
const HloValue* value;
std::vector<const HloUse*> uses;
ValueNode* prev = nullptr;
ValueNode* next = nullptr;
};
CopyRemover(const HloModule& module, const HloAliasAnalysis& alias_analysis,
HloOrdering* ordering, bool check_live_range_ordering,
const absl::flat_hash_set<absl::string_view>& execution_threads)
: dataflow_(alias_analysis.dataflow_analysis()), ordering_(ordering) {
absl::flat_hash_map<int, int64_t> instruction_ids;
int64_t id = 0;
for (HloComputation* computation : module.MakeComputationPostOrder()) {
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
instruction_ids[instruction->unique_id()] = id++;
}
}
absl::flat_hash_map<const HloValue*, ValueNode*> value_to_node;
for (const HloBuffer& buffer : alias_analysis.buffers()) {
if (buffer.values().at(0)->defining_instruction()->IsFused()) {
continue;
}
if (check_live_range_ordering) {
auto should_skip_value = [&execution_threads](const HloValue* value) {
return value->defining_instruction()->parent() != nullptr &&
!HloInstruction::IsThreadIncluded(value->defining_instruction()
->parent()
->execution_thread(),
execution_threads);
};
for (const HloValue* value_a : buffer.values()) {
if (value_a->shape().IsToken()) {
continue;
}
if (should_skip_value(value_a)) {
continue;
}
for (const HloValue* value_b : buffer.values()) {
if (!should_skip_value(value_b) && value_a != value_b) {
DCHECK(ordering_->LiveRangeStrictlyBefore(
*value_a, *value_b, dataflow_,
true) ||
ordering_->LiveRangeStrictlyBefore(
*value_b, *value_a, dataflow_,
true))
<< value_a->ToString() << " and " << value_b->ToString()
<< " are not ordered";
}
}
}
}
std::vector<const HloValue*> values = buffer.values();
absl::c_sort(values, [this, &instruction_ids](const HloValue* a,
const HloValue* b) {
if (a == b) {
return false;
}
const bool a_has_smaller_id =
instruction_ids.at(a->defining_instruction()->unique_id()) <
instruction_ids.at(b->defining_instruction()->unique_id());
if (a_has_smaller_id) {
if (ordering_->IsDefinedBefore(*a, *b)) {
return true;
}
if (ordering_->IsDefinedBefore(*b, *a)) {
return false;
}
} else {
if (ordering_->IsDefinedBefore(*b, *a)) {
return false;
}
if (ordering_->IsDefinedBefore(*a, *b)) {
return true;
}
}
return a_has_smaller_id;
});
AddValueList(values, &value_to_node);
}
CreateCopyMap(module, value_to_node);
XLA_VLOG_LINES(3, ToString());
TF_DCHECK_OK(Verify());
}
void AddValueList(
absl::Span<const HloValue* const> values,
absl::flat_hash_map<const HloValue*, ValueNode*>* value_to_node) {
ValueNode* tail = nullptr;
ValueNode* head = nullptr;
for (const HloValue* value : values) {
auto new_node = new ValueNode(value);
(*value_to_node)[value] = new_node;
new_node->uses.reserve(value->GetUses().size());
for (const HloUse& use : value->GetUses()) {
new_node->uses.push_back(&use);
}
if (tail == nullptr) {
head = new_node;
} else {
tail->next = new_node;
new_node->prev = tail;
}
tail = new_node;
}
tail->next = head;
head->prev = tail;
value_lists_.insert(head);
}
void CreateCopyMap(
const HloModule& module,
const absl::flat_hash_map<const HloValue*, ValueNode*>& value_to_node) {
for (HloComputation* computation : module.MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
const HloValueSet& src_value_set =
dataflow_.GetValueSet(instruction->operand(0));
if (src_value_set.values().size() == 1) {
CopyNodes& copy_node = copy_map_[instruction];
copy_node.dest =
value_to_node.at(&dataflow_.GetUniqueValueAt(instruction));
copy_node.src = value_to_node.at(&src_value_set.GetUniqueValue());
}
}
}
}
}
~CopyRemover() {
for (const ValueNode* head : value_lists_) {
const ValueNode* p = head;
do {
const ValueNode* tmp = p->next;
delete p;
p = tmp;
} while (p != head);
}
}
absl::Status Verify() const {
for (const ValueNode* head : value_lists_) {
const ValueNode* p = head;
do {
TF_RET_CHECK(p->prev->next == p);
TF_RET_CHECK(p->next->prev == p);
const HloInstruction* def = p->value->defining_instruction();
if (def->opcode() == HloOpcode::kCopy && ContainsKey(copy_map_, def)) {
TF_RET_CHECK(copy_map_.at(def).dest == p);
}
for (const HloUse* use : p->uses) {
if (use->instruction->opcode() == HloOpcode::kCopy &&
ContainsKey(copy_map_, use->instruction)) {
TF_RET_CHECK(copy_map_.at(use->instruction).src == p);
}
}
p = p->next;
} while (p != head);
}
return absl::OkStatus();
}
LiveRangeRegions ComputeLiveRangeRegions(const ValueNode* head) {
LiveRangeRegions live_range;
auto VisitValueNode = [&](const ValueNode* node) {
HloInstruction* def_op = node->value->instruction();
HloComputation* def_parent = def_op->parent();
live_range[def_parent][def_op].is_definition = true;
for (const auto& use : node->uses) {
auto* use_op = use->instruction;
HloComputation* use_parent = use_op->parent();
live_range[use_parent][use_op].value_definition = def_op;
}
};
ForEachValueInRange(head, VisitValueNode);
return live_range;
}
bool TryElideCopy(const HloInstruction* copy,
int64_t* region_analysis_limit) {
VLOG(2) << "Trying to remove " << copy->name();
CHECK_NE(region_analysis_limit, nullptr);
if (!ContainsKey(copy_map_, copy)) {
VLOG(2) << copy->name() << " is not removable";
return false;
}
if (!ShapeUtil::Equal(copy->shape(), copy->operand(0)->shape())) {
VLOG(2) << copy->name() << " is not removable (shape mismatch)";
return false;
}
const CopyNodes& copy_node = copy_map_.at(copy);
DCHECK(copy_node.src != nullptr);
DCHECK(copy_node.dest != nullptr);
int64_t live_range_size1 = 0, live_range_size2 = 0;
ForEachValueInRange(copy_node.src, [&](const ValueNode* node) {
live_range_size1 += 1 + node->uses.size();
});
ForEachValueInRange(copy_node.dest, [&](const ValueNode* node) {
live_range_size2 += 1 + node->uses.size();
});
bool use_region_analysis =
copy->operand(0)->opcode() != HloOpcode::kBroadcast &&
(*region_analysis_limit < 0 ||
live_range_size1 * live_range_size2 <= *region_analysis_limit);
*region_analysis_limit = 0;
VLOG(3) << copy->name() << " copies value "
<< copy_node.src->value->ToShortString();
VLOG(3) << "Source buffer values: " << ValueListToString(copy_node.src);
VLOG(3) << "Dest buffer values: " << ValueListToString(copy_node.dest);
auto CheckLiveRangeBefore = [&](ValueNode* src, ValueNode* dest) {
for (ValueNode* next_dest = dest; next_dest != nullptr;
next_dest = Next(*next_dest)) {
for (ValueNode* prev_src = src; prev_src != nullptr;
prev_src = Prev(*prev_src)) {
if (!LiveRangeBefore(*prev_src, *next_dest)) {
VLOG(2) << "Live range of " << prev_src->value->ToShortString()
<< " is not before " << next_dest->value->ToShortString();
return false;
}
}
}
return true;
};
auto CheckLiveRangeInterference = [&](ValueNode* src, ValueNode* dest,
const CombineLiveRangeOption option) {
CHECK_NE(src, nullptr);
CHECK_NE(dest, nullptr);
if (!use_region_analysis) {
VLOG(2) << "Configured to not use region-based analysis.";
return true;
}
*region_analysis_limit += live_range_size1 * live_range_size2;
if (ValuesInterfere(src, dest, option)) {
VLOG(2) << "Region-based interference is true.";
return true;
}
VLOG(2) << "Region-based interference is false.";
return false;
};
if (copy_node.src->next == copy_node.dest) {
VLOG(2) << copy->name() << " source and destination buffers are same.";
} else if (IsHead(*copy_node.dest)) {
VLOG(2) << copy->name() << " defines the first value in its buffer";
bool live_range_before =
CheckLiveRangeBefore(copy_node.src, Next(*copy_node.dest)) &&
CheckLiveRangeBefore(copy_node.dest->prev, Next(*copy_node.src));
VLOG(2) << "LiveRangeBefore result: " << live_range_before;
if (!live_range_before &&
CheckLiveRangeInterference(copy_node.src, copy_node.dest,
kMergeFirstDestInSource)) {
return false;
}
VLOG(2) << "Splice dest after source.";
SpliceAfter(copy_node.dest, copy_node.src);
} else if (IsTail(*copy_node.src)) {
VLOG(2) << copy->name() << " copies the last value ("
<< copy_node.src->value->ToShortString() << ") in its buffer";
bool live_range_before =
CheckLiveRangeBefore(Prev(*copy_node.dest), copy_node.src->next) &&
CheckLiveRangeBefore(copy_node.src, Next(*copy_node.dest));
VLOG(2) << "LiveRangeBefore result: " << live_range_before;
if (!live_range_before &&
CheckLiveRangeInterference(copy_node.src, copy_node.dest,
kMergeLastSourceInDest)) {
VLOG(2) << "Region-based analysis concludes interference.";
return false;
}
VLOG(2) << "Splice src after prev of dest.";
SpliceAfter(copy_node.src->next, Prev(*copy_node.dest));
} else {
VLOG(2) << copy->name()
<< " copies value in middle of source buffer to value in middle "
"of destination buffer";
return false;
}
RemoveCopyValue(copy_node.dest);
XLA_VLOG_LINES(4, ToString());
TF_DCHECK_OK(Verify());
return true;
}
void RemoveCopyValue(ValueNode* copy_value_node) {
CHECK_EQ(copy_value_node->value->defining_instruction()->opcode(),
HloOpcode::kCopy);
ValueNode* operand_node = copy_value_node->prev;
CHECK(operand_node != copy_value_node);
VLOG(2) << "Removing copy " << operand_node->value->ToShortString()
<< " => " << copy_value_node->value->ToShortString();
operand_node->next = copy_value_node->next;
copy_value_node->next->prev = operand_node;
auto it = absl::c_find_if(operand_node->uses, [copy_value_node](
const HloUse* use) {
return use->instruction == copy_value_node->value->defining_instruction();
});
CHECK(it != operand_node->uses.end());
operand_node->uses.erase(it);
for (const HloUse* copy_use : copy_value_node->uses) {
operand_node->uses.push_back(copy_use);
if (copy_use->instruction->opcode() == HloOpcode::kCopy &&
ContainsKey(copy_map_, copy_use->instruction)) {
copy_map_.at(copy_use->instruction).src = operand_node;
}
}
copy_map_.erase(copy_value_node->value->defining_instruction());
delete copy_value_node;
}
bool LiveRangeBefore(const ValueNode& a, const ValueNode& b) {
if (a.uses.empty()) {
VLOG(2) << "Empty uses for " << *a.value;
return ordering_->IsDefinedBefore(*a.value, *b.value);
}
VLOG(3) << "Checking live ranges before: " << ValueListToString(&a)
<< " vs " << ValueListToString(&b);
if (a.value->IsRootOf(b.value->defining_instruction()->parent())) {
VLOG(3) << "Value is root of the same computation";
return false;
}
return ordering_->UsesBeforeValueDefinition(
a.uses, *b.value, dataflow_,
false);
}
bool IsTail(const ValueNode& node) const {
return ContainsKey(value_lists_, node.next);
}
bool IsHead(const ValueNode& node) const {
return ContainsKey(value_lists_, &node);
}
ValueNode* Next(const ValueNode& node) const {
if (IsTail(node)) {
return nullptr;
} else {
return node.next;
}
}
ValueNode* Prev(const ValueNode& node) const {
if (IsHead(node)) {
return nullptr;
} else {
return node.prev;
}
}
void SpliceAfter(ValueNode* head, ValueNode* insert_after) {
DCHECK(IsHead(*head));
value_lists_.erase(head);
ValueNode* tail = head->prev;
tail->next = insert_after->next;
insert_after->next->prev = tail;
insert_after->next = head;
head->prev = insert_after;
}
enum CombineLiveRangeOption {
kMergeFirstDestInSource = 1,
kMergeLastSourceInDest = 2
};
bool ValuesInterfere(const ValueNode* src, const ValueNode* dest,
CombineLiveRangeOption merge_location) {
auto src_live_range = ComputeLiveRangeRegions(src);
auto dest_live_range = ComputeLiveRangeRegions(dest);
VLOG(5) << "src value: " << src->value->ToString();
VLOG(5) << "src live range:\n" << src_live_range.ToString();
VLOG(5) << "dest value: " << dest->value->ToString();
VLOG(5) << "dest live range:\n" << dest_live_range.ToString();
ComputeRelativeLocation relative_location_analysis(ordering_);
auto rel1 =
relative_location_analysis.Compute(src_live_range, dest_live_range);
VLOG(3) << "Location of dest in relation to src: " << rel1.ToString()
<< " with interception set to " << rel1.InterceptDefUse();
auto rel2 =
relative_location_analysis.Compute(dest_live_range, src_live_range);
VLOG(3) << "Location of src in relation to dest: " << rel2.ToString()
<< " with interception set to " << rel2.InterceptDefUse();
if (rel1.RuntimeOrderOverlap() && rel2.RuntimeOrderOverlap()) {
VLOG(3) << "Both relations are overlap.";
return true;
}
if (rel1.RuntimeOrderOverlap() || rel2.RuntimeOrderOverlap()) {
VLOG(3) << "At least one relation is overlap.";
if (rel1.RuntimeOrderOverlap()) {
VLOG(3) << "rel1 is overlap, with interception = "
<< rel1.InterceptDefUse();
if (rel1.InterceptDefUse() ||
(merge_location != kMergeFirstDestInSource &&
rel2.InterceptDefUse())) {
return true;
}
} else {
VLOG(3) << "rel2 is overlap, with interception = "
<< rel2.InterceptDefUse();
if (rel2.InterceptDefUse() ||
(merge_location != kMergeLastSourceInDest &&
rel1.InterceptDefUse())) {
return true;
}
}
}
if (relative_location_analysis.AddControlDependenceForUnorderedOps()) {
return false;
} else {
return true;
}
}
void ForEachValueInRange(const ValueNode* element,
absl::FunctionRef<void(const ValueNode*)> visitor) {
const ValueNode* head = element;
for (const ValueNode* p = head; p != nullptr; p = Next(*p)) {
visitor(p);
}
while (!IsHead(*head)) {
head = Prev(*head);
}
for (const ValueNode* p = head; p != element; p = Next(*p)) {
visitor(p);
}
}
std::string ValueListToString(const ValueNode* element) {
std::string result = "{";
auto VisitValueNode = [&](const ValueNode* node) {
if (result == "{") {
StrAppend(&result, node->value->ToShortString());
} else {
StrAppend(&result, ", ", node->value->ToShortString());
}
};
ForEachValueInRange(element, VisitValueNode);
StrAppend(&result, "}");
return result;
}
std::string ToString() const {
std::string out = absl::StrCat("CopyRemover:\n");
StrAppend(&out, " Def-use chains in each buffer:\n");
for (const ValueNode* head : value_lists_) {
StrAppend(&out, " Buffer defined by ", head->value->ToShortString(),
":\n");
const ValueNode* p = head;
do {
StrAppend(&out, " ", p->value->ToShortString(), ", uses: ",
absl::StrJoin(p->uses, "; ",
[](std::string* s, const HloUse* use) {
StrAppend(s, use->ToString());
}),
"\n");
p = p->next;
} while (p != head);
}
StrAppend(&out, " Potentially removable copies:\n");
for (const auto& pair : copy_map_) {
const HloInstruction* copy = pair.first;
const CopyNodes& copy_info = pair.second;
StrAppend(&out, " ", copy->name(), " : ",
copy_info.src->value->ToShortString(), " => ",
copy_info.dest->value->ToShortString(), "\n");
}
return out;
}
private:
const HloDataflowAnalysis& dataflow_;
HloOrdering* ordering_;
absl::flat_hash_set<const ValueNode*> value_lists_;
struct CopyNodes {
ValueNode* src = nullptr;
ValueNode* dest = nullptr;
};
absl::flat_hash_map<const HloInstruction*, CopyNodes> copy_map_;
};
}
absl::Status CopyInsertion::AddCopiesForConditional(
const HloAliasAnalysis& alias_analysis, HloInstruction* conditional) {
VLOG(2) << "Adding copies for kConditional instruction "
<< conditional->name();
ShapeTree<bool> indices_to_copy(conditional->shape());
TF_RET_CHECK(conditional->opcode() == HloOpcode::kConditional);
if (!IndicesToCopyForConditional(alias_analysis.dataflow_analysis(),
conditional, &indices_to_copy)) {
VLOG(2) << "No copies necessary for kConditional instruction "
<< conditional->name();
return absl::OkStatus();
}
for (HloComputation* computation : conditional->branch_computations()) {
HloInstruction* root = computation->root_instruction();
std::vector<HloInstruction*> users = root->users();
TF_ASSIGN_OR_RETURN(
HloInstruction * deep_copy,
computation->DeepCopyInstruction(root, &indices_to_copy));
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(root->ReplaceUseWith(user, deep_copy));
}
computation->set_root_instruction(deep_copy);
}
return absl::OkStatus();
}
absl::Status CopyInsertion::AddCopiesToResolveInterference(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
if (computation->IsAsyncComputation()) {
continue;
}
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
if (instruction->opcode() == HloOpcode::kWhile) {
TF_RETURN_IF_ERROR(AddCopiesForWhile(*alias_analysis, instruction));
} else if (instruction->opcode() == HloOpcode::kConditional) {
TF_RETURN_IF_ERROR(
AddCopiesForConditional(*alias_analysis, instruction));
} else {
absl::flat_hash_set<int64_t> copied_operands;
for (const auto& operand_and_output_index :
HloDataflowAnalysis::GetInPlaceInputOutputPairs(
instruction->opcode() == HloOpcode::kAsyncStart
? instruction->async_wrapped_instruction()
: instruction)) {
const HloOperandIndex& operand_index = operand_and_output_index.first;
if (copied_operands.contains(operand_index.operand_number)) {
continue;
}
bool can_share_buffer = false;
if (can_share_buffer_ != nullptr) {
auto maybe_can_share_buffer = can_share_buffer_(
instruction, instruction->operand(operand_index.operand_number),
operand_index.operand_index);
if (maybe_can_share_buffer.has_value()) {
can_share_buffer = maybe_can_share_buffer.value();
}
}
if (can_share_buffer &&
HasDisjointReadWriteRegionsAttr(instruction) &&
absl::c_all_of(
instruction->operand(operand_index.operand_number)->users(),
[&instruction](const HloInstruction* user) {
return user == instruction;
})) {
continue;
}
copied_operands.insert(operand_index.operand_number);
TF_RETURN_IF_ERROR(AddCopiesForInPlaceOperation(
*alias_analysis, instruction, operand_index.operand_number));
}
}
}
}
TF_RETURN_IF_ERROR(
AddCopiesForAliasedInputOutputs(module, execution_threads));
return absl::OkStatus();
}
absl::Status CopyInsertion::AddSpecialCaseCopies(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
return AddSpecialCaseCopies(*call_graph, execution_threads, module);
}
absl::Status CopyInsertion::AddSpecialCaseCopies(
const CallGraph& call_graph,
const absl::flat_hash_set<absl::string_view>& execution_threads,
HloModule* module) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
HloInstructionMap<ShapeTree<bool>> instructions_to_copy;
auto add_index_to_copy = [&instructions_to_copy](HloInstruction* instruction,
const ShapeIndex& index) {
auto it = instructions_to_copy.find(instruction);
if (it == instructions_to_copy.end()) {
auto it_added = instructions_to_copy.emplace(
std::piecewise_construct, std::forward_as_tuple(instruction),
std::forward_as_tuple(instruction->shape(), false));
it = it_added.first;
}
*it->second.mutable_element(index) = true;
};
for (const HloValue* value : alias_analysis->dataflow_analysis().values()) {
HloBuffer& buffer = alias_analysis->GetBufferContainingValue(*value);
if (buffer.values().size() > 1 && ValueIsReadOnly(*value)) {
VLOG(2) << "Value " << value->ToShortString()
<< " is read only, but its buffer contains more than one value. "
"Copying.";
add_index_to_copy(value->defining_instruction(), value->defining_index());
}
for (const HloValue* value2 : buffer.values()) {
if (value2 == value) {
continue;
}
HloPosition position = value2->defining_position();
for (const HloUse& use : value->GetUses()) {
if (use.instruction == position.instruction) {
VLOG(3) << "Same instruction: " << position.instruction->ToString();
if (!alias_analysis->dataflow_analysis()
.CanShareOperandBufferWithUser(
use.instruction->mutable_operand(
use.operand_number),
use.operand_index,
position.instruction,
position.index)) {
VLOG(2) << "Adding back copy: "
<< use.instruction->operand(use.operand_number)->ToString()
<< "@" << use.operand_index.ToString()
<< " instr: " << position.instruction->ToString() << "@"
<< position.index;
add_index_to_copy(
use.instruction->mutable_operand(use.operand_number),
use.operand_index);
}
}
}
}
}
for (HloComputation* computation : module->computations(execution_threads)) {
const CallGraphNode& node = call_graph.GetNode(computation);
if (node.context() == CallContext::kEmbedded) {
continue;
}
TF_RET_CHECK(node.context() == CallContext::kControlFlow);
SpecialCaseCopyPolicy policy =
GetSpecialCaseCopyPolicy(node, module, computation);
HloInstruction* root = computation->root_instruction();
absl::flat_hash_map<const HloBuffer*, ShapeIndex> seen;
ShapeUtil::ForEachSubshape(
root->shape(), [&](const Shape& , const ShapeIndex& index) {
std::vector<const HloBuffer*> buffers_at_index =
alias_analysis->ComputeBuffersAt(root, index);
bool buffer_seen_before = false;
for (const HloBuffer* buffer : buffers_at_index) {
buffer_seen_before |= !seen.emplace(buffer, index).second;
}
if (buffer_seen_before && policy.copy_root_replicated_buffers &&
computation == module->entry_computation() &&
module->input_output_alias_config().OutputHasAlias(index) &&
buffers_at_index.size() == 1) {
std::optional<HloInputOutputAliasConfig::Alias> alias =
module->input_output_alias_config().GetAliasedParameter(index);
CHECK(alias) << "Alias does not exist";
const ShapeIndex& other_index = seen[buffers_at_index[0]];
VLOG(2) << "Output indices " << index.ToString() << " and "
<< other_index.ToString() << " are both aliased to "
<< alias->parameter_number << " copying " << other_index;
add_index_to_copy(root, other_index);
return;
}
if (buffers_at_index.size() > 1 ||
(buffer_seen_before && policy.copy_root_replicated_buffers)) {
VLOG(2) << "Index " << index << " of computation "
<< computation->name() << " (" << root->name()
<< ") has ambiguous or non-distinct buffer. Copying.";
add_index_to_copy(root, index);
}
});
for (const auto& pair :
alias_analysis->dataflow_analysis().GetInstructionValueSet(root)) {
const ShapeIndex& index = pair.first;
const HloValueSet& value_set = pair.second;
for (const HloValue* value : value_set.values()) {
if (ShouldCopyRootValue(*value, policy)) {
VLOG(2) << "Root of (" << root->name() << ") of computation("
<< computation->name()
<< ") has constant or parameter value at index " << index
<< ". Copying.";
add_index_to_copy(root, index);
}
}
}
}
for (const auto& pair : instructions_to_copy) {
HloInstruction* instruction = pair.first;
const ShapeTree<bool>& indices_to_copy = pair.second;
ShapeTree<HloInstruction*> copies_added(indices_to_copy.shape());
std::vector<HloInstruction*> users = instruction->users();
TF_ASSIGN_OR_RETURN(HloInstruction * deep_copy,
instruction->parent()->DeepCopyInstruction(
instruction, &indices_to_copy, &copies_added));
for (HloInstruction* user : users) {
TF_RETURN_IF_ERROR(instruction->ReplaceUseWith(user, deep_copy));
}
if (instruction == instruction->parent()->root_instruction()) {
instruction->parent()->set_root_instruction(deep_copy);
}
}
return absl::OkStatus();
}
static int64_t GetNumExistingCopies(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
int64_t num_existing_copies = 0;
for (HloComputation* computation : module->computations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
++num_existing_copies;
}
}
}
return num_existing_copies;
}
absl::Status CopyInsertion::RemoveUnnecessaryCopies(
HloModule* module, bool check_live_range_ordering,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
4, module->ToString(HloPrintOptions().set_syntax_sugar_async_ops(false)));
std::unique_ptr<HloOrdering> ordering;
if (module->has_schedule()) {
ordering = std::make_unique<SequentialHloOrdering>(module->schedule());
} else {
ordering = std::make_unique<DependencyHloOrdering>(module);
}
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloAliasAnalysis> alias_analysis,
HloAliasAnalysis::Run(module, can_share_buffer_));
CopyRemover copy_remover(*module, *alias_analysis, ordering.get(),
check_live_range_ordering, execution_threads);
if (VLOG_IS_ON(3)) {
LOG(INFO) << "Removing unnecessary copies in " << module->name();
LOG(INFO) << "Buffer values, in dependency order: ";
for (const HloBuffer& buffer : alias_analysis->buffers()) {
LOG(INFO) << " HloBuffer " << buffer.id();
}
}
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
int64_t num_existing_copies = GetNumExistingCopies(module, execution_threads);
bool changed = true;
int64_t num_iterations = -1;
VLOG(6) << "Copy Insertion analyzing module with instruction count = "
<< module->instruction_count();
BoundNonLinearCompilerAnalysis allowance(module, name(), 10);
while (changed) {
CHECK_LE(++num_iterations, num_existing_copies);
changed = false;
VLOG(2) << "Running fixpoint iteration " << num_iterations
<< " of copy elision";
for (HloComputation* computation :
module->computations(execution_threads)) {
VLOG(2) << "computation:" << computation->name();
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kCopy) continue;
int64_t region_analysis_cost_now =
(use_region_based_live_range_analysis_ == 0)
? 0
: std::min(allowance.analysis_allowance(),
use_region_based_live_range_analysis_);
if (copy_remover.TryElideCopy(instruction, ®ion_analysis_cost_now)) {
changed = true;
TF_RETURN_IF_ERROR(StripControlDependenciesFrom(instruction));
TF_RETURN_IF_ERROR(
instruction->ReplaceAllUsesWith(instruction->mutable_operand(0)));
VLOG(6) << "succeeded in eliminating copy.";
}
if (allowance.ContinueAnalysis() && region_analysis_cost_now > 0) {
VLOG(6) << "Copy Insertion analyzing module cost: "
<< region_analysis_cost_now;
VLOG(6) << "instruction:" << instruction->ToString();
allowance.DeductCost(region_analysis_cost_now);
VLOG(6) << "allowance:" << allowance.analysis_allowance();
}
}
}
}
return absl::OkStatus();
}
absl::StatusOr<bool> CopyInsertion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
if (!call_graph->IsFlattened()) {
return FailedPrecondition(
"Call graph must be flattened before copy insertion.");
}
int64_t num_copies_before = GetNumExistingCopies(module, execution_threads);
TF_RETURN_IF_ERROR(AddCopiesToResolveInterference(module, execution_threads));
TupleSimplifier tuple_simplifier;
HloDCE dce;
TF_RETURN_IF_ERROR(tuple_simplifier.Run(module, execution_threads).status());
TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());
DumpHloModuleDuringPassIfEnabled(
name(), "after adding copies to resolve interference", *module);
TF_RETURN_IF_ERROR(RemoveUnnecessaryCopies(module,
true,
execution_threads));
DumpHloModuleDuringPassIfEnabled(name(), "after removing unnecessary copies",
*module);
TF_RETURN_IF_ERROR(
AddSpecialCaseCopies(*call_graph, execution_threads, module));
DumpHloModuleDuringPassIfEnabled(name(), "after adding special-case copies",
*module);
TF_RETURN_IF_ERROR(tuple_simplifier.Run(module, execution_threads).status());
TF_RETURN_IF_ERROR(dce.Run(module, execution_threads).status());
VLOG(1) << "Num copies before copy-insertion: " << num_copies_before;
VLOG(1) << "Num copies after copy-insertion: "
<< GetNumExistingCopies(module, execution_threads);
return true;
}
} | #include "xla/service/copy_insertion.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace {
using ::testing::NotNull;
using ::testing::UnorderedElementsAre;
int64_t CountCopies(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
if (instruction->opcode() == HloOpcode::kCopy) {
count++;
}
}
return count;
}
int64_t CountCopies(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountCopies(*computation);
}
return count;
}
int64_t CountControlEdges(const HloComputation& computation) {
int64_t count = 0;
for (const auto& instruction : computation.instructions()) {
count += instruction->control_successors().size();
}
return count;
}
int64_t CountControlEdges(const HloModule& module) {
int64_t count = 0;
for (const auto& computation : module.computations()) {
count += CountControlEdges(*computation);
}
return count;
}
class CopyInsertionTest : public HloTestBase {
protected:
void InsertCopies(HloModule* module) {
CopyInsertion copy_insertion;
VLOG(3) << "Before copy inser: " << module->ToString();
ASSERT_IS_OK(copy_insertion.Run(module).status());
VLOG(2) << "After copy inser: " << module->ToString();
}
const Shape scalar_shape_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(CopyInsertionTest, SingleParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "x"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({x}));
EXPECT_THAT(x->users(), UnorderedElementsAre(tuple));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(x)));
}
TEST_F(CopyInsertionTest, SingleConstant) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant}));
EXPECT_THAT(constant->users(), UnorderedElementsAre(tuple));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(constant)));
}
TEST_F(CopyInsertionTest, ExistingCopiesNotRemoved) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}})));
auto minor_to_major = LayoutUtil::MinorToMajor(constant->shape());
Layout reversed_layout =
LayoutUtil::MakeLayoutFromMajorToMinor(minor_to_major);
Shape copy_shape = constant->shape();
*copy_shape.mutable_layout() = reversed_layout;
HloInstruction* copy_1 = builder.AddInstruction(
HloInstruction::CreateUnary(copy_shape, HloOpcode::kCopy, constant));
HloInstruction* copy_2 = builder.AddInstruction(
HloInstruction::CreateUnary(copy_shape, HloOpcode::kCopy, constant));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
constant->shape(), HloOpcode::kAdd, copy_1, copy_2));
builder.AddInstruction(
HloInstruction::CreateUnary(add->shape(), HloOpcode::kCopy, add));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(CountCopies(*module), 3);
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_EQ(module->entry_computation()->root_instruction(), add);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Add(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(CopyInsertionTest, MultipleConstantsAndParameters) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {}), "x"));
HloInstruction* y = builder.AddInstruction(
HloInstruction::CreateParameter(1, ShapeUtil::MakeShape(F32, {}), "y"));
HloInstruction* add = builder.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kAdd, constant1, y));
builder.AddInstruction(HloInstruction::CreateTuple({constant2, x, add}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(constant2), op::Copy(x), op::Add(constant1, y)));
}
TEST_F(CopyInsertionTest, BitcastParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {4}), "x"));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2, 2}), x));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(x->users(), UnorderedElementsAre(bitcast));
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(old_root));
}
TEST_F(CopyInsertionTest, BitcastConstant) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* constant =
builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 42.0})));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2}), constant));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(constant->users(), UnorderedElementsAre(bitcast));
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Copy(old_root));
}
TEST_F(CopyInsertionTest, BitcastTupleElementParameter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* x = builder.AddInstruction(
HloInstruction::CreateParameter(0, ShapeUtil::MakeShape(F32, {4}), "x"));
HloInstruction* bitcast = builder.AddInstruction(
HloInstruction::CreateBitcast(ShapeUtil::MakeShape(F32, {2, 2}), x));
builder.AddInstruction(HloInstruction::CreateTuple({bitcast}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(x->users(), UnorderedElementsAre(bitcast));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(bitcast)));
}
TEST_F(CopyInsertionTest, NestedTupleParameter) {
auto builder = HloComputation::Builder(TestName());
builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(S32, {1, 2, 3})}),
ShapeUtil::MakeShape(F32, {42})}),
"param0"));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(HloOpcode::kParameter,
module->entry_computation()->root_instruction()->opcode());
HloInstruction* old_root = module->entry_computation()->root_instruction();
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 3);
HloInstruction* new_root = module->entry_computation()->root_instruction();
EXPECT_NE(old_root, new_root);
EXPECT_THAT(
new_root,
op::Tuple(
op::Tuple(
op::Copy(op::GetTupleElement(op::GetTupleElement(old_root))),
op::Copy(op::GetTupleElement(op::GetTupleElement(old_root)))),
op::Copy(op::GetTupleElement(old_root))));
}
TEST_F(CopyInsertionTest, ElementOfNestedTupleParameter) {
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(HloInstruction::CreateParameter(
0,
ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeShape(S32, {1, 2, 3})}),
ShapeUtil::MakeShape(F32, {42})}),
"param0"));
auto gte = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(param->shape(), {0}), param, 0));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(gte, module->entry_computation()->root_instruction());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(op::GetTupleElement(op::GetTupleElement(param))),
op::Copy(op::GetTupleElement(op::GetTupleElement(param)))));
}
class WhileCopyInsertionTest : public CopyInsertionTest {
protected:
WhileCopyInsertionTest() : module_(CreateNewVerifiedModule()) {}
std::unique_ptr<HloComputation> BuildConditionComputation(
const Shape& loop_state_shape) {
auto builder = HloComputation::Builder(TestName() + ".Condition");
auto limit_const = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(10)));
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
limit_const->shape(), loop_state, 0));
builder.AddInstruction(HloInstruction::CreateCompare(
condition_result_shape_, induction_variable, limit_const,
ComparisonDirection::kLt));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
auto data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
Shape f32_scalar_shape = ShapeUtil::MakeShape(F32, {});
auto convert = builder.AddInstruction(
HloInstruction::CreateConvert(f32_scalar_shape, induction_variable));
auto update = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, convert, {}));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyComputation2() {
auto builder = HloComputation::Builder(TestName() + ".Body");
const Shape& loop_state_shape = ShapeUtil::MakeTupleShape(
{induction_variable_shape_, data_shape_, data_shape_});
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
HloInstruction* data1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
HloInstruction* data2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 2));
builder.AddInstruction(HloInstruction::CreateTuple({add0, data1, data2}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildDependentBodyOneReadOnlyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape_, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
Shape f32_scalar_shape = ShapeUtil::MakeShape(F32, {});
auto convert = builder.AddInstruction(
HloInstruction::CreateConvert(f32_scalar_shape, induction_variable));
auto update = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, convert, {}));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
builder.AddInstruction(
HloInstruction::CreateTuple({induction_variable, add1}));
return builder.Build();
}
std::unique_ptr<HloComputation> BuildIndependentBodyComputation(
bool nested = false) {
auto builder = HloComputation::Builder(TestName() + ".Body");
const Shape& loop_state_shape =
nested ? nested_loop_state_shape_ : loop_state_shape_;
auto loop_state = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
auto induction_variable =
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
induction_variable->shape(), HloOpcode::kAdd, induction_variable, inc));
HloInstruction* data = nullptr;
if (nested) {
data = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
nested_tuple_shape_, loop_state, 1));
data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, data, 0));
} else {
data = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, loop_state, 1));
}
auto update = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data, update));
if (nested) {
auto nested_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add1, add1}));
builder.AddInstruction(HloInstruction::CreateTuple({add0, nested_tuple}));
} else {
builder.AddInstruction(HloInstruction::CreateTuple({add0, add1}));
}
return builder.Build();
}
std::unique_ptr<HloComputation> BuildNestedBodyComputation() {
auto builder = HloComputation::Builder(TestName() + ".Body");
auto loop_state = builder.AddInstruction(HloInstruction::CreateParameter(
0, nested_loop_state_shape_, "loop_state"));
auto gte0 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
induction_variable_shape_, loop_state, 0));
auto inc = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)));
auto add0 = builder.AddInstruction(HloInstruction::CreateBinary(
gte0->shape(), HloOpcode::kAdd, gte0, inc));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
nested_tuple_shape_, loop_state, 1));
auto gte10 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, gte1, 0));
auto update10 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add10 = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, gte10, update10));
auto gte11 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(data_shape_, gte1, 1));
auto rev11 = builder.AddInstruction(
HloInstruction::CreateReverse(data_shape_, gte11, {0}));
auto inner_tuple =
builder.AddInstruction(HloInstruction::CreateTuple({add10, rev11}));
builder.AddInstruction(HloInstruction::CreateTuple({add0, inner_tuple}));
return builder.Build();
}
HloInstruction* BuildWhileInstruction(HloComputation* condition,
HloComputation* body,
bool nested = false) {
auto builder = HloComputation::Builder(TestName() + ".While");
auto induction_var_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto data_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
if (nested) {
auto inner_init = builder.AddInstruction(
HloInstruction::CreateTuple({data_init, data_init}));
auto loop_state_init = builder.AddInstruction(
HloInstruction::CreateTuple({induction_var_init, inner_init}));
auto while_hlo = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_init->shape(), condition, body, loop_state_init));
module_->AddEntryComputation(builder.Build());
return while_hlo;
}
auto loop_state_init = builder.AddInstruction(
HloInstruction::CreateTuple({induction_var_init, data_init}));
auto while_hlo = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition, body, loop_state_init));
module_->AddEntryComputation(builder.Build());
return while_hlo;
}
HloInstruction* BuildWhileInstruction_InitPointsToConstant() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto data_init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f})));
return BuildWhileInstructionWithCustomInit(loop_state_shape_, data_init,
&builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToParameter() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto data_init = builder.AddInstruction(
HloInstruction::CreateParameter(0, data_shape_, "data_init"));
return BuildWhileInstructionWithCustomInit(loop_state_shape_, data_init,
&builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToNonDistinct() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto one_vec = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {}));
auto data_init =
builder.AddInstruction(HloInstruction::CreateTuple({one_vec, one_vec}));
return BuildWhileInstructionWithCustomInit(nested_loop_state_shape_,
data_init, &builder);
}
HloInstruction* BuildWhileInstruction_InitPointsToInterfering() {
auto builder = HloComputation::Builder(TestName() + ".While");
auto one = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto data_init = builder.AddInstruction(
HloInstruction::CreateBroadcast(data_shape_, one, {}));
auto one_vec = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(
{1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f, 1.f})));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kAdd, data_init, one_vec));
auto xla_while = BuildWhileInstructionWithCustomInit(loop_state_shape_,
data_init, &builder);
auto gte = xla_while->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(xla_while->shape(), {1}), xla_while, 1));
auto sub = xla_while->parent()->AddInstruction(HloInstruction::CreateBinary(
data_shape_, HloOpcode::kSubtract, add, gte));
auto gte0 = xla_while->parent()->AddInstruction(
HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(xla_while->shape(), {0}), xla_while, 0));
auto tuple = xla_while->parent()->AddInstruction(
HloInstruction::CreateTuple({gte0, sub}));
xla_while->parent()->set_root_instruction(tuple);
return xla_while;
}
HloInstruction* BuildWhileInstructionWithCustomInit(
const Shape& loop_state_shape, HloInstruction* data_init,
HloComputation::Builder* builder) {
const bool nested =
ShapeUtil::Equal(loop_state_shape, nested_loop_state_shape_);
auto induction_var_init = builder->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0)));
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape));
auto body = module_->AddEmbeddedComputation(
BuildIndependentBodyComputation(nested));
auto loop_state_init = builder->AddInstruction(
HloInstruction::CreateTuple({induction_var_init, data_init}));
auto while_hlo = builder->AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition, body, loop_state_init));
module_->AddEntryComputation(builder->Build());
return while_hlo;
}
std::unique_ptr<HloModule> module_;
Shape induction_variable_shape_ = ShapeUtil::MakeShape(S32, {});
Shape data_shape_ = ShapeUtil::MakeShape(F32, {8});
Shape loop_state_shape_ =
ShapeUtil::MakeTupleShape({induction_variable_shape_, data_shape_});
Shape nested_tuple_shape_ =
ShapeUtil::MakeTupleShape({data_shape_, data_shape_});
Shape nested_loop_state_shape_ = ShapeUtil::MakeTupleShape(
{induction_variable_shape_, nested_tuple_shape_});
Shape condition_result_shape_ = ShapeUtil::MakeShape(PRED, {});
};
TEST_F(WhileCopyInsertionTest, IndependentTupleElements) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body =
module_->AddEmbeddedComputation(BuildIndependentBodyComputation());
auto while_hlo = BuildWhileInstruction(condition, body);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 0);
EXPECT_EQ(CountControlEdges(*module_), 0);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(WhileCopyInsertionTest, WhileFeedingWhileThruParameterWithCopies) {
const std::string& hlo_string = R"(
HloModule DependentTupleElements
%DependentTupleElements.Body (loop_state.1: (s32[], f32[8])) -> (s32[], f32[8]) {
%loop_state.1 = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = f32[8]{0} get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=1
%convert = f32[] convert(s32[] %get-tuple-element.1)
%broadcast = f32[8]{0} broadcast(f32[] %convert), dimensions={}
%add.1 = f32[8]{0} add(f32[8]{0} %get-tuple-element.2, f32[8]{0} %broadcast)
ROOT %tuple = (s32[], f32[8]{0}) tuple(s32[] %add, f32[8]{0} %add.1)
}
%DependentTupleElements.Condition (loop_state: (s32[], f32[8])) -> pred[] {
%loop_state = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state), index=0
%constant = s32[] constant(10)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %DependentTupleElements.While () -> (s32[], f32[8]) {
%constant.2 = s32[] constant(0)
%constant.3 = f32[8]{0} constant({0, 0, 0, 0, 0, 0, 0, 0})
%tuple.1 = (s32[], f32[8]{0}) tuple(s32[] %constant.2, f32[8]{0} %constant.3)
ROOT %while.1 = (s32[], f32[8]{0}) while((s32[], f32[8]{0}) %tuple.1), condition=%DependentTupleElements.Condition, body=%DependentTupleElements.Body
}
)";
auto module_ = ParseAndReturnVerifiedModule(hlo_string).value();
auto while_hlo = module_->entry_computation()->root_instruction();
HloComputation* outer_while_condition =
module_->AddEmbeddedComputation(while_hlo->while_condition()->Clone());
HloComputation* outer_while_body =
module_->AddEmbeddedComputation(while_hlo->while_body()->Clone());
HloInstruction* outer_while =
while_hlo->parent()->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), outer_while_condition, outer_while_body,
while_hlo->mutable_operand(0)));
HloInstruction* outer_param = outer_while_body->parameter_instruction(0);
std::vector<HloInstruction*> materialized_gtes;
for (int i = 0; i < outer_param->shape().tuple_shapes_size(); ++i) {
materialized_gtes.push_back(
outer_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
outer_param->shape().tuple_shapes(i), outer_param, i)));
}
HloInstruction* dual_init = outer_while_body->AddInstruction(
HloInstruction::CreateTuple(materialized_gtes));
HloInstruction* dual_while =
outer_while_body->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), while_hlo->while_condition(),
while_hlo->while_body(), dual_init));
TF_CHECK_OK(outer_while_body->ReplaceInstruction(
outer_while_body->root_instruction(), dual_while));
TF_CHECK_OK(while_hlo->parent()->ReplaceInstruction(while_hlo, outer_while));
InsertCopies(module_.get());
}
TEST_F(WhileCopyInsertionTest, WhileFeedingWhileThruParameterNoCopies) {
const std::string& hlo_string = R"(
HloModule DependentTupleElements
%DependentTupleElements.Body (loop_state.1: (s32[], f32[8])) -> (s32[], f32[8]) {
%loop_state.1 = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = f32[8]{0} get-tuple-element((s32[], f32[8]{0}) %loop_state.1), index=1
%convert = f32[] convert(s32[] %get-tuple-element.1)
%broadcast = f32[8]{0} broadcast(f32[] %convert), dimensions={}
%add.1 = f32[8]{0} add(f32[8]{0} %get-tuple-element.2, f32[8]{0} %broadcast)
ROOT %tuple = (s32[], f32[8]{0}) tuple(s32[] %add, f32[8]{0} %add.1)
}
%DependentTupleElements.Condition (loop_state: (s32[], f32[8])) -> pred[] {
%loop_state = (s32[], f32[8]{0}) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], f32[8]{0}) %loop_state), index=0
%constant = s32[] constant(10)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %DependentTupleElements.While () -> (s32[], f32[8]) {
%constant.2 = s32[] constant(0)
%constant.3 = f32[8]{0} constant({0, 0, 0, 0, 0, 0, 0, 0})
%tuple.1 = (s32[], f32[8]{0}) tuple(s32[] %constant.2, f32[8]{0} %constant.3)
ROOT %while.1 = (s32[], f32[8]{0}) while((s32[], f32[8]{0}) %tuple.1), condition=%DependentTupleElements.Condition, body=%DependentTupleElements.Body
}
)";
auto module_ = ParseAndReturnVerifiedModule(hlo_string).value();
auto while_hlo = module_->entry_computation()->root_instruction();
HloComputation* outer_while_condition =
module_->AddEmbeddedComputation(while_hlo->while_condition()->Clone());
HloComputation* outer_while_body =
module_->AddEmbeddedComputation(while_hlo->while_body()->Clone());
HloInstruction* outer_while =
while_hlo->parent()->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), outer_while_condition, outer_while_body,
while_hlo->mutable_operand(0)));
HloInstruction* outer_param = outer_while_body->parameter_instruction(0);
HloInstruction* dual_while =
outer_while_body->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), while_hlo->while_condition(),
while_hlo->while_body(), outer_param));
TF_CHECK_OK(outer_while_body->ReplaceInstruction(
outer_while_body->root_instruction(), dual_while));
TF_CHECK_OK(while_hlo->parent()->ReplaceInstruction(while_hlo, outer_while));
InsertCopies(module_.get());
}
TEST_F(WhileCopyInsertionTest, WhileFeedingWhileThruParameterBig) {
const std::string& hlo_string = R"(
HloModule DependentTupleElements
%DependentTupleElements.Body (loop_state.1: (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0})) -> (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) {
%loop_state.1 = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) %loop_state.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = f32[8]{0} get-tuple-element((s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) %loop_state.1), index=1
%convert = f32[] convert(s32[] %get-tuple-element.1)
%broadcast = f32[8]{0} broadcast(f32[] %convert), dimensions={}
%add.1 = f32[8]{0} add(f32[8]{0} %get-tuple-element.2, f32[8]{0} %broadcast)
ROOT %tuple = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) tuple(s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1, s32[] %add, f32[8]{0} %add.1)
}
%DependentTupleElements.Condition (loop_state: (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0})) -> pred[] {
%loop_state = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) %loop_state), index=0
%constant = s32[] constant(10)
ROOT %compare = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %DependentTupleElements.While () -> (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) {
%constant.2 = s32[] constant(0)
%constant.3 = f32[8]{0} constant({0, 0, 0, 0, 0, 0, 0, 0})
%tuple.1 = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) tuple(s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3, s32[] %constant.2, f32[8]{0} %constant.3)
ROOT %while.1 = (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) while( (s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}, s32[], f32[8]{0}) %tuple.1), condition=%DependentTupleElements.Condition, body=%DependentTupleElements.Body
}
)";
auto module_ = ParseAndReturnVerifiedModule(hlo_string).value();
auto while_hlo = module_->entry_computation()->root_instruction();
HloComputation* outer_while_condition =
module_->AddEmbeddedComputation(while_hlo->while_condition()->Clone());
HloComputation* outer_while_body =
module_->AddEmbeddedComputation(while_hlo->while_body()->Clone());
HloInstruction* outer_while =
while_hlo->parent()->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), outer_while_condition, outer_while_body,
while_hlo->mutable_operand(0)));
HloInstruction* outer_param = outer_while_body->parameter_instruction(0);
std::vector<HloInstruction*> materialized_gtes;
for (int i = 0; i < outer_param->shape().tuple_shapes_size(); ++i) {
materialized_gtes.push_back(
outer_while_body->AddInstruction(HloInstruction::CreateGetTupleElement(
outer_param->shape().tuple_shapes(i), outer_param, i)));
}
HloInstruction* dual_init = outer_while_body->AddInstruction(
HloInstruction::CreateTuple(materialized_gtes));
HloInstruction* dual_while =
outer_while_body->AddInstruction(HloInstruction::CreateWhile(
while_hlo->shape(), while_hlo->while_condition(),
while_hlo->while_body(), dual_init));
TF_CHECK_OK(outer_while_body->ReplaceInstruction(
outer_while_body->root_instruction(), dual_while));
TF_CHECK_OK(while_hlo->parent()->ReplaceInstruction(while_hlo, outer_while));
InsertCopies(module_.get());
}
TEST_F(WhileCopyInsertionTest, DependentTupleElements) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body = module_->AddEmbeddedComputation(BuildDependentBodyComputation());
auto while_hlo = BuildWhileInstruction(condition, body);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 1);
EXPECT_EQ(CountControlEdges(*body), 0);
EXPECT_THAT(
body->root_instruction(),
op::Tuple(op::Add(), op::Add(op::GetTupleElement(), op::Broadcast())));
auto add = body->root_instruction()->operand(0);
auto bcast = body->root_instruction()->operand(1)->operand(1);
ASSERT_EQ(add->opcode(), HloOpcode::kAdd);
ASSERT_EQ(bcast->opcode(), HloOpcode::kBroadcast);
EXPECT_THAT(while_hlo->while_body()->root_instruction(),
op::Tuple(op::Add(op::Copy(), op::Constant()),
op::Add(op::GetTupleElement(),
op::Broadcast(op::Convert(op::Copy())))));
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(WhileCopyInsertionTest, DependentTupleElements_OneReadOnly) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
BuildWhileInstruction(condition, body);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 0);
EXPECT_EQ(CountControlEdges(*body), 0);
}
TEST_F(WhileCopyInsertionTest,
DependentTupleElements_OneReadOnly_TwoLoops_EntryParams) {
auto condition1 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto condition2 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body1 = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
auto body2 = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
auto builder = HloComputation::Builder(TestName() + ".While");
auto iter_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, induction_variable_shape_, "iter"));
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "data"));
auto loop_init = builder.AddInstruction(
HloInstruction::CreateTuple({iter_param, data_param}));
auto while_hlo1 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition1, body1, loop_init));
auto while_hlo2 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition2, body2, loop_init));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo1->shape(), {0}), while_hlo1, 0));
auto gte2 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo2->shape(), {0}), while_hlo2, 0));
builder.AddInstruction(
HloInstruction::CreateBinary(gte1->shape(), HloOpcode::kAdd, gte1, gte2));
auto entry = module_->AddEntryComputation(builder.Build());
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body1), 0);
EXPECT_EQ(CountCopies(*body2), 0);
EXPECT_EQ(CountControlEdges(*body1), 0);
EXPECT_EQ(CountControlEdges(*body2), 0);
EXPECT_EQ(CountCopies(*entry), 2);
EXPECT_EQ(while_hlo1->operand(0)->operand(1)->opcode(), HloOpcode::kCopy);
EXPECT_EQ(while_hlo2->operand(0)->operand(1)->opcode(), HloOpcode::kCopy);
EXPECT_NE(while_hlo1->operand(0)->operand(1),
while_hlo2->operand(0)->operand(1));
}
TEST_F(WhileCopyInsertionTest,
DependentTupleElements_OneReadOnly_TwoLoops_NonParams) {
auto condition1 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto condition2 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape_));
auto body1 = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
auto body2 = module_->AddEmbeddedComputation(
BuildDependentBodyOneReadOnlyComputation());
auto builder = HloComputation::Builder(TestName() + ".While");
auto iter_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, induction_variable_shape_, "iter"));
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "data"));
Shape f32_scalar_shape = ShapeUtil::MakeShape(F32, {});
auto convert = builder.AddInstruction(
HloInstruction::CreateConvert(f32_scalar_shape, iter_param));
auto iter_value = builder.AddInstruction(
HloInstruction::CreateUnary(convert->shape(), HloOpcode::kExp, convert));
auto convert2 = builder.AddInstruction(
HloInstruction::CreateConvert(induction_variable_shape_, iter_value));
auto data_value = builder.AddInstruction(HloInstruction::CreateUnary(
data_param->shape(), HloOpcode::kExp, data_param));
auto loop_init = builder.AddInstruction(
HloInstruction::CreateTuple({convert2, data_value}));
auto while_hlo1 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition1, body1, loop_init));
auto while_hlo2 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape_, condition2, body2, loop_init));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo1->shape(), {0}), while_hlo1, 0));
auto gte2 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo2->shape(), {0}), while_hlo2, 0));
builder.AddInstruction(
HloInstruction::CreateBinary(gte1->shape(), HloOpcode::kAdd, gte1, gte2));
auto entry = module_->AddEntryComputation(builder.Build());
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*entry), 2);
EXPECT_THAT(while_hlo1->operand(0),
op::Tuple(op::Convert(op::Exp()), op::Copy(op::Exp())));
EXPECT_THAT(while_hlo2->operand(0),
op::Tuple(op::Convert(op::Exp()), op::Copy(op::Exp())));
}
TEST_F(WhileCopyInsertionTest, NestedTupleElements) {
auto condition = module_->AddEmbeddedComputation(
BuildConditionComputation(nested_loop_state_shape_));
auto body = module_->AddEmbeddedComputation(BuildNestedBodyComputation());
BuildWhileInstruction(condition, body, true);
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body), 1);
EXPECT_EQ(CountCopies(*module_), 4);
if (body->root_instruction()->operand(1)->operand(1)->opcode() ==
HloOpcode::kCopy) {
EXPECT_THAT(
body->root_instruction(),
op::Tuple(op::Add(), op::Tuple(op::Add(), op::Copy(op::Reverse()))));
} else {
EXPECT_THAT(
body->root_instruction(),
op::Tuple(op::Add(), op::Tuple(op::Add(), op::Reverse(op::Copy()))));
}
}
TEST_F(WhileCopyInsertionTest, InitPointsToConstant) {
auto while_hlo = BuildWhileInstruction_InitPointsToConstant();
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*while_hlo->while_body()), 0);
EXPECT_EQ(CountCopies(*module_), 2);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Constant())));
}
TEST_F(WhileCopyInsertionTest, InitPointsToParameter) {
auto while_hlo = BuildWhileInstruction_InitPointsToParameter();
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*while_hlo->while_body()), 0);
EXPECT_EQ(CountCopies(*module_), 2);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Parameter())));
}
TEST_F(WhileCopyInsertionTest, InitPointsToNonDistinct) {
auto while_hlo = BuildWhileInstruction_InitPointsToNonDistinct();
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*module_->entry_computation()), 2);
if (while_hlo->operand(0)->operand(1)->operand(0)->opcode() ==
HloOpcode::kCopy) {
EXPECT_THAT(
while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()),
op::Tuple(op::Copy(op::Broadcast()), op::Broadcast())));
} else {
EXPECT_THAT(
while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()),
op::Tuple(op::Broadcast(), op::Copy(op::Broadcast()))));
}
EXPECT_EQ(CountCopies(*while_hlo->while_body()), 1);
if (while_hlo->while_body()
->root_instruction()
->operand(1)
->operand(0)
->opcode() == HloOpcode::kCopy) {
EXPECT_THAT(
while_hlo->while_body()->root_instruction(),
op::Tuple(op::Add(), op::Tuple(op::Copy(op::Add()), op::Add())));
} else {
EXPECT_THAT(
while_hlo->while_body()->root_instruction(),
op::Tuple(op::Add(), op::Tuple(op::Add(), op::Copy(op::Add()))));
}
}
TEST_F(WhileCopyInsertionTest, InitPointsToInterfering) {
auto while_hlo = BuildWhileInstruction_InitPointsToInterfering();
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*module_), 2);
EXPECT_EQ(CountCopies(*while_hlo->while_body()), 0);
EXPECT_THAT(while_hlo->operand(0),
op::Tuple(op::Copy(op::Constant()), op::Copy(op::Broadcast())));
}
TEST_F(WhileCopyInsertionTest, InitPointsToNonDistinctUsedByTwoWhileLoops) {
const Shape& loop_state_shape = ShapeUtil::MakeTupleShape(
{induction_variable_shape_, data_shape_, data_shape_});
auto condition1 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape));
auto condition2 = module_->AddEmbeddedComputation(
BuildConditionComputation(loop_state_shape));
auto body1 =
module_->AddEmbeddedComputation(BuildDependentBodyComputation2());
auto body2 =
module_->AddEmbeddedComputation(BuildDependentBodyComputation2());
auto builder = HloComputation::Builder(TestName() + ".While");
auto iter_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, induction_variable_shape_, "iter"));
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, data_shape_, "data"));
auto loop_init = builder.AddInstruction(
HloInstruction::CreateTuple({iter_param, data_param, data_param}));
auto while_hlo1 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition1, body1, loop_init));
auto while_hlo2 = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition2, body2, loop_init));
auto gte1 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo1->shape(), {0}), while_hlo1, 0));
auto gte2 = builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::GetSubshape(while_hlo1->shape(), {0}), while_hlo2, 0));
builder.AddInstruction(
HloInstruction::CreateBinary(gte1->shape(), HloOpcode::kAdd, gte1, gte2));
module_->AddEntryComputation(builder.Build());
InsertCopies(module_.get());
EXPECT_EQ(CountCopies(*body1), 0);
EXPECT_EQ(CountCopies(*body2), 0);
EXPECT_EQ(CountCopies(*module_->entry_computation()), 2);
EXPECT_THAT(while_hlo1->operand(0),
op::Tuple(op::Copy(), op::Parameter(), op::Parameter()));
EXPECT_THAT(while_hlo2->operand(0),
op::Tuple(op::Copy(), op::Parameter(), op::Parameter()));
}
TEST_F(CopyInsertionTest, SwizzlingWhile) {
auto module = CreateNewVerifiedModule();
const Shape loop_state_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_1, body_element_0}));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape, condition, body, tuple));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 6);
EXPECT_EQ(CountCopies(*body), 4);
EXPECT_EQ(CountControlEdges(*body), 2);
EXPECT_THAT(body->root_instruction(),
op::Tuple(op::Copy(op::Copy()), op::Copy(op::Copy())));
EXPECT_EQ(CountCopies(*module->entry_computation()), 2);
EXPECT_THAT(xla_while->operand(0), op::Tuple(op::Copy(), op::Copy()));
}
TEST_F(CopyInsertionTest, CrossingParameters) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte1, gte0}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 4);
}
TEST_F(CopyInsertionTest, ParametersAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1}, 0, {1}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, ParameterWithNoAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(op::GetTupleElement(param, 0)),
op::Copy(op::GetTupleElement(param, 1))));
EXPECT_EQ(CountCopies(*module), 2);
}
TEST_F(CopyInsertionTest, ParameterWithPartialAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
builder.AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
InsertCopies(module.get());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::GetTupleElement(param, 0),
op::Copy(op::GetTupleElement(param, 1))));
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, ParameterAndParallelOpsWithPartialAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte1));
builder.AddInstruction(HloInstruction::CreateTuple({negate0, negate1}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, ParameterAndOpsWithPartialAliasing) {
auto module = CreateNewVerifiedModule();
const Shape tuple_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "p0"));
auto gte0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 0));
auto gte1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, param, 1));
auto negate0 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte0));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(scalar_shape_, HloOpcode::kNegate, gte1));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape_, HloOpcode::kAdd, negate0, negate1));
builder.AddInstruction(HloInstruction::CreateTuple({add, negate1}));
module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0}, 0, {0}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, SwizzlingWhileWithOneOp) {
auto module = CreateNewVerifiedModule();
const Shape loop_state_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
auto negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
scalar_shape_, HloOpcode::kNegate, body_element_1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({negate, body_element_0}));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape, condition, body, tuple));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 6);
EXPECT_EQ(CountCopies(*body), 4);
EXPECT_EQ(CountControlEdges(*body), 2);
EXPECT_THAT(
body->root_instruction(),
op::Tuple(op::Copy(op::Negate(op::Copy())), op::Copy(op::Copy())));
EXPECT_EQ(CountCopies(*module->entry_computation()), 2);
EXPECT_THAT(xla_while->operand(0), op::Tuple(op::Copy(), op::Copy()));
}
TEST_F(CopyInsertionTest, SwizzlingWhileSharedInput) {
auto module = CreateNewVerifiedModule();
const Shape loop_state_shape =
ShapeUtil::MakeTupleShape({scalar_shape_, scalar_shape_});
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape_, body_param, 1));
body_builder.AddInstruction(
HloInstruction::CreateTuple({body_element_1, body_element_0}));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({constant, constant}));
builder.AddInstruction(
HloInstruction::CreateWhile(loop_state_shape, condition, body, tuple));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_EQ(CountCopies(*body), 0);
EXPECT_EQ(CountCopies(*module->entry_computation()), 2);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(), op::Copy()));
}
TEST_F(CopyInsertionTest, SequentialWhiles) {
const Shape element_shape = ShapeUtil::MakeShape(F32, {42});
const Shape loop_state_shape = ShapeUtil::MakeTupleShape(
{element_shape, element_shape, element_shape, element_shape});
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param_0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, element_shape, "param_0"));
auto param_1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, element_shape, "param_1"));
auto param_2 = builder.AddInstruction(
HloInstruction::CreateParameter(2, element_shape, "param_2"));
auto param_3 = builder.AddInstruction(
HloInstruction::CreateParameter(3, element_shape, "param_3"));
const int kNumWhiles = 3;
HloInstruction* prev_element_1 = param_1;
HloInstruction* prev_element_2 = param_2;
HloInstruction* prev_element_3 = param_3;
std::vector<const HloInstruction*> whiles;
for (int i = 0; i < kNumWhiles; ++i) {
auto body_builder = HloComputation::Builder("body");
auto body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto body_element_0 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, body_param, 0));
auto body_element_1 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, body_param, 1));
auto body_element_2 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, body_param, 2));
auto body_element_3 = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, body_param, 3));
auto negate = body_builder.AddInstruction(HloInstruction::CreateUnary(
element_shape, HloOpcode::kNegate, body_element_2));
auto reverse = body_builder.AddInstruction(
HloInstruction::CreateReverse(element_shape, body_element_3, {0}));
body_builder.AddInstruction(HloInstruction::CreateTuple(
{body_element_0, body_element_1, negate, reverse}));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "param"));
auto cond_constant = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
cond_builder.AddInstruction(HloInstruction::CreateUnary(
cond_constant->shape(), HloOpcode::kNot, cond_constant));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto while_init = builder.AddInstruction(HloInstruction::CreateTuple(
{param_0, prev_element_1, prev_element_2, prev_element_3}));
auto xla_while = builder.AddInstruction(HloInstruction::CreateWhile(
loop_state_shape, condition, body, while_init));
whiles.push_back(xla_while);
if (i != kNumWhiles - 1) {
prev_element_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, xla_while, 1));
prev_element_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, xla_while, 2));
prev_element_3 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, xla_while, 3));
}
}
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 4 + kNumWhiles);
for (const HloInstruction* xla_while : whiles) {
EXPECT_EQ(CountCopies(*xla_while->while_body()), 1);
}
EXPECT_THAT(whiles[0]->operand(0), op::Tuple(op::Parameter(), op::Parameter(),
op::Copy(), op::Copy()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Tuple(op::Copy(), op::Copy(), op::GetTupleElement(),
op::GetTupleElement()));
}
TEST_F(CopyInsertionTest, WhileBodyWithConstantRoot) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder(TestName());
auto param_0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param_0"));
auto body_builder = HloComputation::Builder("body");
body_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(123.0)));
HloComputation* body = module->AddEmbeddedComputation(body_builder.Build());
auto cond_builder = HloComputation::Builder("condition");
cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape_, "param"));
cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
HloComputation* condition =
module->AddEmbeddedComputation(cond_builder.Build());
auto xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(scalar_shape_, condition, body, param_0));
module->AddEntryComputation(builder.Build());
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
EXPECT_THAT(xla_while->operand(0), op::Copy(op::Parameter()));
EXPECT_THAT(body->root_instruction(), op::Copy(op::Constant()));
EXPECT_THAT(condition->root_instruction(), op::Constant());
}
TEST_F(CopyInsertionTest, TokensShouldNotBeCopied) {
std::string module_string = R"(
HloModule TokensShouldNotBeCopied
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %TokensShouldNotBeCopied () -> s32[] {
%one = s32[] constant(1)
%negative_one = s32[] negate(%one)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %negative_one, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
std::unique_ptr<HloComputation> MakeTrivialCondition(const Shape& shape) {
auto builder = HloComputation::Builder("trivial_condition");
builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "loop_state"));
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(false)));
builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNot, constant));
return builder.Build();
}
std::unique_ptr<HloComputation> MakeBenchmarkWhileBody() {
auto builder = HloComputation::Builder("benchmark_loop_body");
const Shape element_shape = ShapeUtil::MakeShape(F32, {42});
const Shape loop_state_shape =
ShapeUtil::MakeTupleShape({element_shape, element_shape, element_shape});
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
HloInstruction* element_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, param, 0));
HloInstruction* element_1 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, param, 1));
HloInstruction* element_2 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, param, 2));
HloInstruction* rev_1 = builder.AddInstruction(
HloInstruction::CreateReverse(element_shape, element_1, {0}));
HloInstruction* add_1_2 = builder.AddInstruction(HloInstruction::CreateBinary(
element_shape, HloOpcode::kAdd, element_1, element_2));
builder.AddInstruction(
HloInstruction::CreateTuple({element_0, rev_1, add_1_2}));
return builder.Build();
}
void BM_SequentialWhiles(::testing::benchmark::State& state) {
const int num_whiles = state.range(0);
for (auto s : state) {
state.PauseTiming();
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
HloModule module("BM_SequentialWhiles", config);
auto builder = HloComputation::Builder("BM_SequentialWhiles");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {42}), "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {42}), "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {42}), "z"));
HloInstruction* init =
builder.AddInstruction(HloInstruction::CreateTuple({x, y, z}));
HloInstruction* prev_loop_state = init;
for (int w = 0; w < num_whiles; ++w) {
HloComputation* condition =
module.AddEmbeddedComputation(MakeTrivialCondition(init->shape()));
HloComputation* body =
module.AddEmbeddedComputation(MakeBenchmarkWhileBody());
prev_loop_state = builder.AddInstruction(HloInstruction::CreateWhile(
init->shape(), condition, body, prev_loop_state));
}
module.AddEntryComputation(builder.Build());
CopyInsertion copy_insertion;
state.ResumeTiming();
ASSERT_IS_OK(copy_insertion.Run(&module).status());
state.PauseTiming();
ASSERT_EQ(CountCopies(module), 3 + num_whiles);
state.ResumeTiming();
}
}
void BM_ParallelWhiles(::testing::benchmark::State& state) {
const int num_whiles = state.range(0);
for (auto s : state) {
state.PauseTiming();
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
HloModule module("BM_SequentialWhiles", config);
auto builder = HloComputation::Builder("BM_ParallelWhiles");
HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {42}), "x"));
HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {42}), "y"));
HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeShape(F32, {42}), "z"));
HloInstruction* init =
builder.AddInstruction(HloInstruction::CreateTuple({x, y, z}));
HloInstruction* sum = nullptr;
for (int w = 0; w < num_whiles; ++w) {
HloComputation* condition =
module.AddEmbeddedComputation(MakeTrivialCondition(init->shape()));
HloComputation* body =
module.AddEmbeddedComputation(MakeBenchmarkWhileBody());
HloInstruction* xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(init->shape(), condition, body, init));
if (sum == nullptr) {
sum = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(x->shape(), xla_while, 0));
} else {
HloInstruction* element_0 = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(x->shape(), xla_while, 0));
sum = builder.AddInstruction(HloInstruction::CreateBinary(
x->shape(), HloOpcode::kAdd, sum, element_0));
}
}
module.AddEntryComputation(builder.Build());
CopyInsertion copy_insertion;
state.ResumeTiming();
ASSERT_IS_OK(copy_insertion.Run(&module).status());
state.PauseTiming();
ASSERT_EQ(CountCopies(module), 3 * num_whiles);
}
}
std::unique_ptr<HloComputation> MakeBenchmarkWhileBody(
const int num_tuple_inputs) {
auto builder = HloComputation::Builder("benchmark_loop_body");
const Shape element_shape = ShapeUtil::MakeShape(F32, {});
std::vector<Shape> input_shape(num_tuple_inputs, element_shape);
const Shape loop_state_shape = ShapeUtil::MakeTupleShape(input_shape);
HloInstruction* param = builder.AddInstruction(
HloInstruction::CreateParameter(0, loop_state_shape, "loop_state"));
std::vector<HloInstruction*> gte_nodes(num_tuple_inputs);
for (int i = 0; i < num_tuple_inputs; ++i) {
gte_nodes[i] = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(element_shape, param, i));
}
builder.AddInstruction(HloInstruction::CreateTuple(gte_nodes));
return builder.Build();
}
void BM_ManyElementTuple(::testing::benchmark::State& state) {
const int num_tuple_inputs = state.range(0);
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
CopyInsertion copy_insertion;
const Shape element_shape = ShapeUtil::MakeShape(F32, {});
std::vector<HloInstruction*> tuple_params(num_tuple_inputs);
for (auto s : state) {
state.PauseTiming();
auto builder = HloComputation::Builder("BM_ParallelWhiles");
HloModule module("BM_ManyElementTuple", config);
for (int j = 0; j < num_tuple_inputs; ++j) {
tuple_params[j] = builder.AddInstruction(
HloInstruction::CreateParameter(j, element_shape, ""));
}
HloInstruction* init =
builder.AddInstruction(HloInstruction::CreateTuple(tuple_params));
HloComputation* condition =
module.AddEmbeddedComputation(MakeTrivialCondition(init->shape()));
HloComputation* body =
module.AddEmbeddedComputation(MakeBenchmarkWhileBody(num_tuple_inputs));
HloInstruction* xla_while = builder.AddInstruction(
HloInstruction::CreateWhile(init->shape(), condition, body, init));
builder.AddInstruction(HloInstruction::CreateGetTupleElement(
ShapeUtil::MakeShape(F32, {}), xla_while, 0));
module.AddEntryComputation(builder.Build());
state.ResumeTiming();
ASSERT_IS_OK(copy_insertion.Run(&module).status());
}
}
BENCHMARK(BM_SequentialWhiles)->Arg(512)->Arg(1024)->Arg(2048)->Arg(4096);
BENCHMARK(BM_ParallelWhiles)->Arg(512)->Arg(1024)->Arg(2048)->Arg(4096);
BENCHMARK(BM_ManyElementTuple)->Arg(1024)->Arg(12288);
TEST_F(CopyInsertionTest, SimpleControlFlowTest) {
const std::string& hlo_string = R"(
HloModule TestModule
if-body.v5 {
constant.3 = s32[] constant(-1)
p.1 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.18 = (s32[], s32[], s32[]) get-tuple-element(p.1), index=1
get-tuple-element.65 = s32[] get-tuple-element(get-tuple-element.18), index=0
get-tuple-element.66 = s32[] get-tuple-element(get-tuple-element.18), index=1
add.3 = s32[] add(get-tuple-element.65, get-tuple-element.66)
tuple.33 = (s32[]) tuple(add.3)
ROOT tuple.34 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(constant.3, get-tuple-element.18, tuple.33)
}
if-condition.v4 {
p.2 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.67 = s32[] get-tuple-element(p.2), index=0
constant.4 = s32[] constant(0)
ROOT equal-to = pred[] compare(get-tuple-element.67, constant.4), direction=EQ
}
_functionalize_body_1__.v28 {
arg_tuple.4 = (s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.68 = s32[] get-tuple-element(arg_tuple.4), index=0
constant.7 = s32[] constant(1)
add.4 = s32[] add(get-tuple-element.68, constant.7)
get-tuple-element.69 = s32[] get-tuple-element(arg_tuple.4), index=1
get-tuple-element.70 = s32[] get-tuple-element(arg_tuple.4), index=2
less-than-or-equal-to = pred[] compare(get-tuple-element.69, get-tuple-element.70), direction=LE
constant.8 = s32[] constant(0)
select = s32[] select(less-than-or-equal-to, constant.8, constant.7)
get-tuple-element.71 = s32[] get-tuple-element(arg_tuple.4), index=3
tuple.35 = (s32[], s32[], s32[]) tuple(get-tuple-element.69, get-tuple-element.71, get-tuple-element.70)
tuple.36 = (s32[]) tuple(constant.8)
tuple.37 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(select, tuple.35, tuple.36)
while = (s32[], (s32[], s32[], s32[]), (s32[])) while(tuple.37), condition=if-condition.v4, body=if-body.v5
get-tuple-element.72 = (s32[]) get-tuple-element(while), index=2
get-tuple-element.73 = s32[] get-tuple-element(get-tuple-element.72), index=0
ROOT tuple.38 = (s32[], s32[], s32[], s32[]) tuple(add.4, get-tuple-element.69, get-tuple-element.70, get-tuple-element.73)
}
cond_wrapper.v3.1 {
inputs.1 = (s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.75 = s32[] get-tuple-element(inputs.1), index=0
constant.11 = s32[] constant(7)
ROOT less-than.2 = pred[] compare(get-tuple-element.75, constant.11), direction=LT
}
_functionalize_body_2__.v25 {
arg_tuple.5 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.76 = s32[] get-tuple-element(arg_tuple.5), index=0
get-tuple-element.77 = s32[] get-tuple-element(arg_tuple.5), index=2
get-tuple-element.78 = s32[] get-tuple-element(arg_tuple.5), index=3
get-tuple-element.79 = s32[] get-tuple-element(arg_tuple.5), index=4
tuple.39 = (s32[], s32[], s32[], s32[]) tuple(get-tuple-element.76, get-tuple-element.77, get-tuple-element.78, get-tuple-element.79)
while.2 = (s32[], s32[], s32[], s32[]) while(tuple.39), condition=cond_wrapper.v3.1, body=_functionalize_body_1__.v28
get-tuple-element.80 = s32[] get-tuple-element(while.2), index=0
get-tuple-element.81 = s32[] get-tuple-element(arg_tuple.5), index=1
constant.12 = s32[] constant(1)
add.5 = s32[] add(get-tuple-element.81, constant.12)
get-tuple-element.82 = s32[] get-tuple-element(while.2), index=3
ROOT tuple.40 = (s32[], s32[], s32[], s32[], s32[]) tuple(get-tuple-element.80, add.5, get-tuple-element.77, get-tuple-element.78, get-tuple-element.82)
}
cond_wrapper.v3.2 {
inputs.2 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.83 = s32[] get-tuple-element(inputs.2), index=1
constant.13 = s32[] constant(5)
ROOT less-than.3 = pred[] compare(get-tuple-element.83, constant.13), direction=LT
}
ENTRY TestComputation {
arg_tuple.6 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
ROOT while.3 = (s32[], s32[], s32[], s32[], s32[]) while(arg_tuple.6), condition=cond_wrapper.v3.2, body=_functionalize_body_2__.v25
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
InsertCopies(module.get());
}
TEST_F(CopyInsertionTest, ControlFlowTest) {
const std::string& hlo_string = R"(
HloModule TestModule
if-body.v5 {
constant.3 = s32[] constant(-1)
p.1 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.18 = (s32[], s32[], s32[]) get-tuple-element(p.1), index=1
get-tuple-element.65 = s32[] get-tuple-element(get-tuple-element.18), index=0
get-tuple-element.66 = s32[] get-tuple-element(get-tuple-element.18), index=1
add.3 = s32[] add(get-tuple-element.65, get-tuple-element.66)
tuple.33 = (s32[]) tuple(add.3)
ROOT tuple.34 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(constant.3, get-tuple-element.18, tuple.33)
}
if-condition.v4 {
p.2 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.67 = s32[] get-tuple-element(p.2), index=0
constant.4 = s32[] constant(0)
ROOT equal-to = pred[] compare(get-tuple-element.67, constant.4), direction=EQ
}
if-body.v5.1 {
constant.5 = s32[] constant(-1)
p.3 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.68 = (s32[], s32[], s32[]) get-tuple-element(p.3), index=1
get-tuple-element.70 = s32[] get-tuple-element(get-tuple-element.68), index=2
multiply.1 = s32[] multiply(get-tuple-element.70, get-tuple-element.70)
tuple.35 = (s32[]) tuple(multiply.1)
ROOT tuple.36 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(constant.5, get-tuple-element.68, tuple.35)
}
if-condition.v4.1 {
p.4 = (s32[], (s32[], s32[], s32[]), (s32[])) parameter(0)
get-tuple-element.71 = s32[] get-tuple-element(p.4), index=0
constant.6 = s32[] constant(1)
ROOT equal-to.1 = pred[] compare(get-tuple-element.71, constant.6), direction=EQ
}
_functionalize_body_1__.v28 {
arg_tuple.4 = (s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.72 = s32[] get-tuple-element(arg_tuple.4), index=0
constant.7 = s32[] constant(1)
add.4 = s32[] add(get-tuple-element.72, constant.7)
get-tuple-element.73 = s32[] get-tuple-element(arg_tuple.4), index=1
get-tuple-element.74 = s32[] get-tuple-element(arg_tuple.4), index=2
less-than-or-equal-to = pred[] compare(get-tuple-element.73, get-tuple-element.74), direction=LE
constant.8 = s32[] constant(0)
select = s32[] select(less-than-or-equal-to, constant.8, constant.7)
get-tuple-element.75 = s32[] get-tuple-element(arg_tuple.4), index=3
tuple.37 = (s32[], s32[], s32[]) tuple(get-tuple-element.73, get-tuple-element.75, get-tuple-element.74)
tuple.38 = (s32[]) tuple(constant.8)
tuple.39 = (s32[], (s32[], s32[], s32[]), (s32[])) tuple(select, tuple.37, tuple.38)
while = (s32[], (s32[], s32[], s32[]), (s32[])) while(tuple.39), condition=if-condition.v4, body=if-body.v5
while.1 = (s32[], (s32[], s32[], s32[]), (s32[])) while(while), condition=if-condition.v4.1, body=if-body.v5.1
get-tuple-element.76 = (s32[]) get-tuple-element(while.1), index=2
get-tuple-element.77 = s32[] get-tuple-element(get-tuple-element.76), index=0
ROOT tuple.40 = (s32[], s32[], s32[], s32[]) tuple(add.4, get-tuple-element.73, get-tuple-element.74, get-tuple-element.77)
}
cond_wrapper.v3.1 {
inputs.1 = (s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.78 = s32[] get-tuple-element(inputs.1), index=0
constant.11 = s32[] constant(7)
ROOT less-than.2 = pred[] compare(get-tuple-element.78, constant.11), direction=LT
}
_functionalize_body_2__.v25 {
arg_tuple.5 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.79 = s32[] get-tuple-element(arg_tuple.5), index=0
get-tuple-element.80 = s32[] get-tuple-element(arg_tuple.5), index=2
get-tuple-element.81 = s32[] get-tuple-element(arg_tuple.5), index=3
get-tuple-element.82 = s32[] get-tuple-element(arg_tuple.5), index=4
tuple.41 = (s32[], s32[], s32[], s32[]) tuple(get-tuple-element.79, get-tuple-element.80, get-tuple-element.81, get-tuple-element.82)
while.2 = (s32[], s32[], s32[], s32[]) while(tuple.41), condition=cond_wrapper.v3.1, body=_functionalize_body_1__.v28
get-tuple-element.83 = s32[] get-tuple-element(while.2), index=0
get-tuple-element.84 = s32[] get-tuple-element(arg_tuple.5), index=1
constant.12 = s32[] constant(1)
add.5 = s32[] add(get-tuple-element.84, constant.12)
get-tuple-element.85 = s32[] get-tuple-element(while.2), index=3
ROOT tuple.42 = (s32[], s32[], s32[], s32[], s32[]) tuple(get-tuple-element.83, add.5, get-tuple-element.80, get-tuple-element.81, get-tuple-element.85)
}
cond_wrapper.v3.2 {
inputs.2 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
get-tuple-element.86 = s32[] get-tuple-element(inputs.2), index=1
constant.13 = s32[] constant(5)
ROOT less-than.3 = pred[] compare(get-tuple-element.86, constant.13), direction=LT
}
ENTRY TestComputation {
arg_tuple.6 = (s32[], s32[], s32[], s32[], s32[]) parameter(0)
ROOT while.3 = (s32[], s32[], s32[], s32[], s32[]) while(arg_tuple.6), condition=cond_wrapper.v3.2, body=_functionalize_body_2__.v25
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
InsertCopies(module.get());
}
TEST_F(CopyInsertionTest, NestedWhiles) {
const std::string& hlo_string = R"(
HloModule TestModule
cond.inner {
ROOT param.cond.inner = pred[] parameter(0)
}
body.inner {
param.body.inner = pred[] parameter(0)
ROOT not = pred[] not(param.body.inner)
}
cond.outer {
ROOT param.cond.outer = pred[] parameter(0)
}
body.outer {
param.cond.outer = pred[] parameter(0)
ROOT while = pred[] while(param.cond.outer), condition=cond.inner, body=body.inner
}
ENTRY TestComputation {
entry_param = pred[] parameter(0)
ROOT while = pred[] while(entry_param), condition=cond.outer, body=body.outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::While(op::Copy(op::Parameter())));
}
TEST_F(CopyInsertionTest, NestedWhilesWithParamRoot) {
const std::string& hlo_string = R"(
HloModule TestModule
cond.inner {
ROOT param.cond.inner = pred[] parameter(0)
}
body.inner {
param.body.inner = pred[] parameter(0)
ROOT not = pred[] not(param.body.inner)
}
cond.outer {
ROOT param.cond.outer = pred[] parameter(0)
}
body.outer {
ROOT param.cond.outer = pred[] parameter(0)
while = pred[] while(param.cond.outer), condition=cond.inner, body=body.inner
after-all = token[] after-all()
outfeed = token[] outfeed(while, after-all)
}
ENTRY TestComputation {
entry_param = pred[] parameter(0)
while = pred[] while(entry_param), condition=cond.outer, body=body.outer
ROOT not = pred[] not(while)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Not(op::While(op::Parameter())));
HloInstruction* outfeed = FindInstruction(module.get(), "outfeed");
EXPECT_THAT(outfeed, op::Outfeed(op::While(op::Copy(op::Parameter(0))),
op::AfterAll()));
}
TEST_F(CopyInsertionTest, NestedWhilesWithParamRoot2) {
const std::string& hlo_string = R"(
HloModule TestModule
cond.inner {
param.cond.inner = (pred[], pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param.cond.inner), index=0
}
body.inner {
param.body.inner = (pred[], pred[]) parameter(0)
gte.0 = pred[] get-tuple-element(param.body.inner), index=0
gte.1 = pred[] get-tuple-element(param.body.inner), index=1
and = pred[] and(gte.0, gte.1)
not = pred[] not(gte.1)
ROOT root = (pred[], pred[]) tuple(and, not)
}
cond.outer {
param.cond.outer = (pred[], pred[]) parameter(0)
ROOT gte = pred[] get-tuple-element(param.cond.outer), index=0
}
body.outer {
param.body.outer = (pred[], pred[]) parameter(0)
gte.0 = pred[] get-tuple-element(param.body.outer), index=0
gte.1 = pred[] get-tuple-element(param.body.outer), index=1
while.inner = (pred[], pred[]) while(param.body.outer), condition=cond.inner, body=body.inner
gte.2 = pred[] get-tuple-element(while.inner), index=0
after-all = token[] after-all()
outfeed = token[] outfeed(gte.2, after-all)
ROOT root = (pred[], pred[]) tuple(gte.0, gte.1)
}
ENTRY TestComputation {
entry_param.1 = pred[] parameter(0)
entry_param.2 = pred[] parameter(1)
tuple = (pred[], pred[]) tuple(entry_param.1, entry_param.2)
while.outer = (pred[], pred[]) while(tuple), condition=cond.outer, body=body.outer
gte = pred[] get-tuple-element(while.outer), index=0
ROOT not = pred[] not(gte)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
HloInstruction* while_inner = FindInstruction(module.get(), "while.inner");
EXPECT_THAT(
while_inner,
op::While(op::Tuple(op::Copy(op::GetTupleElement(op::Parameter(0))),
op::Copy(op::GetTupleElement(op::Parameter(0))))));
}
TEST_F(CopyInsertionTest, NestedWhileAndConditional2) {
const std::string& hlo_string = R"(
HloModule TestModule
on_true
{
v1 = f32[2] parameter(0)
v2 = f32[2] add(v1,v1)
ROOT t1 = (f32[2], f32[2]) tuple(v1,v2)
}
on_false
{
v1 = f32[2] parameter(0)
v2 = f32[2] multiply(v1,v1)
ROOT t2 = (f32[2], f32[2]) tuple(v1,v2)
}
cond.outer {
param.1 = (pred[], f32[2], f32[2]) parameter(0)
ROOT param.cond.outer = pred[] get-tuple-element(param.1), index=0
}
body.outer {
param.1 = (pred[], f32[2], f32[2]) parameter(0)
pred.1 = pred[] get-tuple-element(param.1), index=0
arg_tuple.11 = f32[2] get-tuple-element(param.1), index=1
if = (f32[2], f32[2]) conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
e1 = f32[2] get-tuple-element(if), index=0
e2 = f32[2] get-tuple-element(if), index=1
ROOT res = (pred[], f32[2], f32[2]) tuple(pred.1,e1, e2)
}
ENTRY TestComputation {
entry_param.1 = pred[] parameter(0)
float_param = f32[2] parameter(1)
entry_param = (pred[], f32[2], f32[2]) tuple(entry_param.1, float_param, float_param)
ROOT while = (pred[], f32[2], f32[2]) while(entry_param), condition=cond.outer, body=body.outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 3);
}
TEST_F(CopyInsertionTest, NestedWhileAndConditional) {
const std::string& hlo_string = R"(
HloModule TestModule
on_true
{
v1 = f32[2] parameter(0)
ROOT v2 = f32[2] add(v1,v1)
}
on_false
{
v1 = f32[2] parameter(0)
ROOT v2 = f32[2] multiply(v1,v1)
}
cond.outer {
param.1 = (pred[], f32[2]) parameter(0)
ROOT param.cond.outer = pred[] get-tuple-element(param.1), index=0
}
body.outer {
param.1 = (pred[], f32[2]) parameter(0)
pred.1 = pred[] get-tuple-element(param.1), index=0
arg_tuple.11 = f32[2] get-tuple-element(param.1), index=1
if = f32[2] conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
ROOT res = (pred[], f32[2]) tuple(pred.1,if)
}
ENTRY TestComputation {
entry_param.1 = pred[] parameter(0)
float_param = f32[2] parameter(1)
entry_param = (pred[], f32[2]) tuple(entry_param.1, float_param)
ROOT while = (pred[], f32[2]) while(entry_param), condition=cond.outer, body=body.outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
VLOG(2) << module->ToString() << "\n";
EXPECT_EQ(CountCopies(*module), 2);
}
TEST_F(CopyInsertionTest, FixpointComputationRequired) {
const std::string& hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[3,3,96,1] parameter(0)
param1 = f32[] parameter(1)
broadcast = f32[3,3,96,1] broadcast(f32[] param1), dimensions={}
ROOT %add.0 = f32[3,3,96,1] add(f32[3,3,96,1] param0, f32[3,3,96,1] broadcast)
}
ENTRY entry_computation {
arg0 = f32[3,3,96,1] parameter(0)
arg1 = f32[] parameter(1)
fusion = f32[3,3,96,1] fusion(f32[3,3,96,1] arg0, f32[] arg1),
kind=kLoop, calls=fused_computation
negate = f32[] negate(f32[] arg1)
ROOT tuple = (f32[3,3,96,1], f32[3,3,96,1], f32[], f32[]) tuple(
f32[3,3,96,1] fusion,
f32[3,3,96,1] arg0,
f32[] negate,
f32[] arg1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1},
0,
{}));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{3},
1,
{}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, NoAliasCheckViolation) {
const std::string& hlo_string = R"(
HloModule cluster
ENTRY Entry {
%arg = f32[8,28,28,1] parameter(0)
%bitcast.2 = f32[8,1,28,28] bitcast(f32[8,28,28,1] %arg)
ROOT %tuple.1 = (f32[8,1,28,28], f32[8,28,28,1]) tuple(f32[8,1,28,28] %bitcast.2, f32[8,28,28,1] %arg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1},
0,
{}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, DynamicUpdateSliceNoCopy) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(negate, broadcast.6, constant.3, constant.3, constant.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, FusedDynamicUpdateSliceNoCopy) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
ROOT fusion = f32[1280,1,128] fusion(negate), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, DynamicUpdateSliceCopy) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
add = f32[1280,1,128] add(negate, negate)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(negate, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple = (f32[1280,1,128], f32[1280,1,128]) tuple(add, dynamic-update-slice.5)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, DynamicUpdateSliceParameterShareCopy) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
param = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param, broadcast.6, constant.3, constant.3, constant.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, FusedDynamicUpdateSliceCopy) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
add = f32[1280,1,128] add(negate, negate)
fusion = f32[1280,1,128] fusion(negate), kind=kLoop, calls=fused_computation
ROOT tuple = (f32[1280,1,128], f32[1280,1,128]) tuple(negate, fusion)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, ChainDynamicUpdateSliceCopy) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
state = (s32[], f32[1280,1,128]{2,1,0}) parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128]{2,1,0} broadcast(constant.1), dimensions={}
get-tuple-element.4 = f32[1280,1,128]{2,1,0} get-tuple-element(state), index=1
get-tuple-element.3 = s32[] get-tuple-element(state), index=0
constant.2 = s32[] constant(128)
add.5 = s32[] add(get-tuple-element.3, constant.2)
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[1280,1,128]{2,1,0} dynamic-update-slice(get-tuple-element.4, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.9 = f32[1280,1,128]{2,1,0} dynamic-update-slice(dynamic-update-slice.5, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.85 = (s32[], f32[1280,1,128]{2,1,0}) tuple(add.5, dynamic-update-slice.9)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, FusedDynamicUpdateSliceCopy2) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation.1 {
param0 = f32[1280,1,128] parameter(0)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, broadcast.6, constant.3, constant.3, constant.3)
}
fused_computation.2 {
param0 = f32[1280,1,128] parameter(0)
param1 = f32[1280,1,128] parameter(1)
slice = f32[128,1,128] slice(param1), slice={[0:128], [0:1], [0:128]}
constant.3 = s32[] constant(0)
ROOT dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param0, slice, constant.3, constant.3, constant.3)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate = f32[1280,1,128] negate(param)
add = f32[1280,1,128] add(negate, negate)
fusion1 = f32[1280,1,128] fusion(negate), kind=kLoop, calls=fused_computation.1
ROOT fusion2 = f32[1280,1,128] fusion(fusion1, negate), kind=kLoop, calls=fused_computation.2
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, MultiOutputFusedDynamicUpdateSliceCopy) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
param1 = f32[1280,1,128] parameter(1)
param2 = f32[1280,1,128] parameter(2)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
add.1 = f32[1280,1,128] add(param0, param0)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param1, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(param2, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.1 = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add.1, dynamic-update-slice.5, dynamic-update-slice.6)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate0 = f32[1280,1,128] negate(param)
negate1 = f32[1280,1,128] negate(param)
negate2 = f32[1280,1,128] negate(param)
fusion = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) fusion(negate0, negate1, negate2), kind=kLoop, calls=fused_computation
gte0 = f32[1280,1,128] get-tuple-element(fusion), index=0
gte1 = f32[1280,1,128] get-tuple-element(fusion), index=1
gte2 = f32[1280,1,128] get-tuple-element(fusion), index=2
add0 = f32[1280,1,128] add(negate0, gte0)
add1 = f32[1280,1,128] add(negate1, gte1)
add2 = f32[1280,1,128] add(negate2, gte2)
ROOT tuple = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 2);
}
TEST_F(CopyInsertionTest, MultiOutputFusedDynamicUpdateSliceNoCopy) {
absl::string_view hlo_string = R"(
HloModule Module
fused_computation {
param0 = f32[1280,1,128] parameter(0)
param1 = f32[1280,1,128] parameter(1)
param2 = f32[1280,1,128] parameter(2)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
add.1 = f32[1280,1,128] add(param0, param0)
dynamic-update-slice.5 = f32[1280,1,128] dynamic-update-slice(param1, broadcast.6, constant.3, constant.3, constant.3)
dynamic-update-slice.6 = f32[1280,1,128] dynamic-update-slice(param2, broadcast.6, constant.3, constant.3, constant.3)
ROOT tuple.1 = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add.1, dynamic-update-slice.5, dynamic-update-slice.6)
}
ENTRY main {
param = f32[1280,1,128] parameter(0)
negate0 = f32[1280,1,128] negate(param)
negate1 = f32[1280,1,128] negate(param)
negate2 = f32[1280,1,128] negate(param)
fusion = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) fusion(negate0, negate1, negate2), kind=kLoop, calls=fused_computation
gte0 = f32[1280,1,128] get-tuple-element(fusion), index=0
gte1 = f32[1280,1,128] get-tuple-element(fusion), index=1
gte2 = f32[1280,1,128] get-tuple-element(fusion), index=2
add0 = f32[1280,1,128] add(negate0, gte0)
add1 = f32[1280,1,128] add(gte1, gte1)
add2 = f32[1280,1,128] add(negate2, gte2)
ROOT tuple = (f32[1280,1,128], f32[1280,1,128], f32[1280,1,128]) tuple(add0, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest, ScatterSharedOperand) {
absl::string_view hlo_string = R"(
HloModule Module
update_s32 {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
fused_computation {
iota.1 = s32[73729]{0} iota(), iota_dimension=0
ROOT indices.1 = s32[73729]{0} reverse(iota.1), dimensions={0}
}
ENTRY main {
iota.2 = s32[73729]{0} iota(), iota_dimension=0
fusion = s32[73729]{0} fusion(), kind=kLoop, calls=fused_computation
ROOT scatter = s32[73729]{0} scatter(iota.2, fusion, iota.2), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=update_s32
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Scatter(op::Copy(op::Iota()), op::Fusion(), op::Iota()));
}
TEST_F(CopyInsertionTest, HorizontalLoopFusionNoCopy) {
const std::string& hlo_string = R"(
HloModule test
fused_computation {
p0 = f32[10,20] parameter(0)
p1 = f32[10,20] parameter(1)
p2 = f32[10,10] parameter(2)
p3 = f32[10,10] parameter(3)
add0 = f32[10, 20] add(p0, p1)
sub0 = f32[10, 10] subtract(p2, p3)
reshape0 = f32[200] reshape(add0)
reshape1 = f32[100] reshape(sub0)
concat0 = f32[300] concatenate(reshape0, reshape1), dimensions={0}
slice0 = f32[200] slice(concat0), slice={[0:200]}
slice1 = f32[100] slice(concat0), slice={[200:300]}
ROOT tuple = (f32[200], f32[100]) tuple(slice0, slice1)
}
ENTRY test {
p0 = f32[10,20] parameter(0)
p1 = f32[10,20] parameter(1)
p2 = f32[10,10] parameter(2)
p3 = f32[10,10] parameter(3)
fusion = (f32[200], f32[100]) fusion(p0, p1, p2, p3), kind=kInput, calls=fused_computation
gte0 = f32[200] get-tuple-element(fusion), index=0
gte1 = f32[100] get-tuple-element(fusion), index=1
bitcast0 = f32[10,20] bitcast(gte0)
bitcast1 = f32[10,10] bitcast(gte1)
ROOT tuple = (f32[10,20], f32[10,10]) tuple(bitcast0, bitcast1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{0},
0,
{}));
ASSERT_IS_OK(module->input_output_alias_config().SetUpAlias(
{1},
3,
{}));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, NestedWhileAndConditional3) {
const std::string& hlo_string = R"(
HloModule TestModule
on_true.1
{
ROOT v1 = f32[2] parameter(0)
}
on_false.1
{
v1 = f32[2] parameter(0)
ROOT v2 = f32[2] multiply(v1,v1)
}
on_true
{
v1 = f32[2] parameter(0)
v2 = f32[2] add(v1,v1)
v3 = (f32[2],f32[2]) tuple(v1,v2)
v4 = f32[2] get-tuple-element(v3), index=1
v5 = f32[2] multiply(v4,v2)
ROOT t1 = (f32[2], f32[2]) tuple(v5,v2)
}
on_false
{
v1 = f32[2] parameter(0)
v2 = f32[2] multiply(v1,v1)
pred.1 = pred[] constant(true)
v4 = f32[2] conditional(pred.1, v1, v2), true_computation=on_true.1, false_computation=on_false.1
v5 = f32[2] multiply(v4,v2)
ROOT t2 = (f32[2], f32[2]) tuple(v2,v5)
}
cond.outer {
param.1 = (pred[], f32[2], f32[2]) parameter(0)
ROOT param.cond.outer = pred[] get-tuple-element(param.1), index=0
}
body.outer {
param.1 = (pred[], f32[2], f32[2]) parameter(0)
pred.1 = pred[] get-tuple-element(param.1), index=0
arg_tuple.11 = f32[2] get-tuple-element(param.1), index=1
if = (f32[2], f32[2]) conditional(pred.1, arg_tuple.11, arg_tuple.11), true_computation=on_true, false_computation=on_false
e1 = f32[2] get-tuple-element(if), index=0
e2 = f32[2] get-tuple-element(if), index=1
ROOT res = (pred[], f32[2], f32[2]) tuple(pred.1,e1, e2)
}
ENTRY TestComputation {
entry_param.1 = pred[] parameter(0)
float_param = f32[2] parameter(1)
entry_param = (pred[], f32[2], f32[2]) tuple(entry_param.1, float_param, float_param)
ROOT while = (pred[], f32[2], f32[2]) while(entry_param), condition=cond.outer, body=body.outer
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 4);
}
TEST_F(CopyInsertionTest, ConditionalBranchMustCopy1) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0_comp.5.clone {
%parameter.0 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.0), index=0
%negate = s32[2]{0:T(128)} negate(s32[2]{0:T(128)} %get-tuple-element)
%copy = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %negate)
ROOT tuple.5 = (s32[2]{0:T(128)}) tuple(%copy)
}
branch_1_comp.12.clone {
%parameter.4 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element.5 = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.4), index=0
%copy.1 = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %get-tuple-element.5)
ROOT tuple.6 = (s32[2]{0:T(128)}) tuple(%copy.1)
}
ENTRY TestComputation {
%parameter.1 = s32[]{:T(128)} parameter(0), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.2 = s32[2]{0:T(128)} parameter(1), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.3 = s32[2]{0:T(128)} parameter(2), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.1 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.3)
%tuple.3 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.2)
%conditional.18 = (s32[2]{0:T(128)}) conditional(s32[]{:T(128)} %parameter.1, (s32[2]{0:T(128)}) %tuple.1, (s32[2]{0:T(128)}) %tuple.3), branch_computations={%branch_0_comp.5.clone, %branch_1_comp.12.clone}, metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%gte.1 = s32[2]{0:T(128)} get-tuple-element(conditional.18), index=0
ROOT tuple.4 = (s32[2]{0:T(128)},s32[2]{0:T(128)}) tuple(parameter.2, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto conditional18 = FindInstruction(module.get(), "conditional.18");
CHECK_NE(conditional18, nullptr);
auto tuple6 = conditional18->branch_computation(1)->root_instruction();
CHECK_EQ(tuple6->opcode(), HloOpcode::kTuple);
auto copy1 = tuple6->operand(0);
CHECK_EQ(copy1->opcode(), HloOpcode::kCopy);
}
TEST_F(CopyInsertionTest, ConditionalBranchMustCopy2) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0_comp.5.clone {
%parameter.0 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.0), index=0
%negate = s32[2]{0:T(128)} negate(s32[2]{0:T(128)} %get-tuple-element)
%copy = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %negate)
ROOT tuple.5 = (s32[2]{0:T(128)}) tuple(%copy)
}
branch_1_comp.12.clone {
%parameter.4 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element.5 = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.4), index=0
%copy.1 = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %get-tuple-element.5)
%constant.1 = s32[] constant(0)
%broadcast.6 = s32[2] broadcast(constant.1), dimensions={}
dynamic-update-slice.5 = s32[2]{0:T(128)} dynamic-update-slice(%copy.1, %broadcast.6, %constant.1)
%add.1 = s32[2]{0:T(128)} add(dynamic-update-slice.5, %copy.1)
ROOT tuple.6 = (s32[2]{0:T(128)}) tuple(%add.1)
}
ENTRY TestComputation {
%parameter.1 = s32[]{:T(128)} parameter(0), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.2 = s32[2]{0:T(128)} parameter(1), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.3 = s32[2]{0:T(128)} parameter(2), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.1 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.3)
%tuple.3 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.2)
%conditional.18 = (s32[2]{0:T(128)}) conditional(s32[]{:T(128)} %parameter.1, (s32[2]{0:T(128)}) %tuple.1, (s32[2]{0:T(128)}) %tuple.3), branch_computations={%branch_0_comp.5.clone, %branch_1_comp.12.clone}
%gte.1 = s32[2]{0:T(128)} get-tuple-element(conditional.18), index=0
ROOT tuple.4 = (s32[2]{0:T(128)},s32[2]{0:T(128)}) tuple(parameter.2, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
auto conditional18 = FindInstruction(module.get(), "conditional.18");
CHECK_NE(conditional18, nullptr);
auto tuple6 = conditional18->branch_computation(1)->root_instruction();
CHECK_EQ(tuple6->opcode(), HloOpcode::kTuple);
auto add1 = tuple6->operand(0);
CHECK_EQ(add1->opcode(), HloOpcode::kAdd);
auto dus = add1->operand(0);
auto copy1 = dus->operand(0);
CHECK_EQ(copy1->opcode(), HloOpcode::kCopy);
}
TEST_F(CopyInsertionTest, ConditionalBranchMustCopy3) {
const std::string& hlo_string = R"(
HloModule primitive_computation_cond.19
%branch_0_comp.5.clone (parameter.0: (s32[2])) -> (s32[2]) {
%parameter.0 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.0), index=0
%negate = s32[2]{0:T(128)} negate(s32[2]{0:T(128)} %get-tuple-element)
%copy = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %negate)
ROOT %tuple.5 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %copy)
}
%branch_1_comp.12.clone (parameter.4: (s32[2])) -> (s32[2]) {
%parameter.4 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element.5 = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.4), index=0
%copy.1 = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %get-tuple-element.5)
ROOT %tuple.6 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %copy.1)
}
ENTRY %primitive_computation_cond.19 (parameter.1: s32[], parameter.2: s32[2], parameter.3: s32[2]) -> (s32[2]) {
%parameter.1 = s32[]{:T(128)} parameter(0), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.3 = s32[2]{0:T(128)} parameter(2), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.1 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.3)
%parameter.2 = s32[2]{0:T(128)} parameter(1), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.3 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.2)
ROOT %conditional.18 = (s32[2]{0:T(128)}) conditional(s32[]{:T(128)} %parameter.1, (s32[2]{0:T(128)}) %tuple.1, (s32[2]{0:T(128)}) %tuple.3), branch_computations={%branch_0_comp.5.clone, %branch_1_comp.12.clone}, metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto conditional18 = FindInstruction(module.get(), "conditional.18");
CHECK_NE(conditional18, nullptr);
auto tuple6 = conditional18->branch_computation(1)->root_instruction();
CHECK_EQ(tuple6->opcode(), HloOpcode::kTuple);
auto copy1 = tuple6->operand(0);
CHECK_EQ(copy1->opcode(), HloOpcode::kCopy);
}
TEST_F(CopyInsertionTest, ConditionalBranchDoNotCopy1) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0_comp.5.clone {
%parameter.0 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.0), index=0
%negate = s32[2]{0:T(128)} negate(s32[2]{0:T(128)} %get-tuple-element)
%copy = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %negate)
ROOT tuple.5 = (s32[2]{0:T(128)}) tuple(%copy)
}
branch_1_comp.12.clone {
%parameter.4 = (s32[2]{0:T(128)}) parameter(0)
%get-tuple-element.5 = s32[2]{0:T(128)} get-tuple-element((s32[2]{0:T(128)}) %parameter.4), index=0
%copy.1 = s32[2]{0:T(128)} copy(s32[2]{0:T(128)} %get-tuple-element.5)
ROOT tuple.6 = (s32[2]{0:T(128)}) tuple(%copy.1)
}
ENTRY TestComputation {
%parameter.1 = s32[]{:T(128)} parameter(0), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.2 = s32[2]{0:T(128)} parameter(1), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%parameter.3 = s32[2]{0:T(128)} parameter(2), metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%tuple.1 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.3)
%tuple.3 = (s32[2]{0:T(128)}) tuple(s32[2]{0:T(128)} %parameter.2)
%conditional.18 = (s32[2]{0:T(128)}) conditional(s32[]{:T(128)} %parameter.1, (s32[2]{0:T(128)}) %tuple.1, (s32[2]{0:T(128)}) %tuple.3), branch_computations={%branch_0_comp.5.clone, %branch_1_comp.12.clone}, metadata={op_type="cond" op_name="cond[ linear=(False, False) ]"}
%gte.1 = s32[2]{0:T(128)} get-tuple-element(conditional.18), index=0
ROOT tuple.4 = (s32[2]{0:T(128)},s32[2]{0:T(128)}) tuple(gte.1, gte.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString() << "\n";
auto conditional18 = FindInstruction(module.get(), "conditional.18");
CHECK_NE(conditional18, nullptr);
auto tuple6 = conditional18->branch_computation(1)->root_instruction();
CHECK_EQ(tuple6->opcode(), HloOpcode::kParameter);
}
TEST_F(CopyInsertionTest, ConditionalWithMultiOutputFusion) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0 {
param_0 = f64[] parameter(0)
negate.2 = f64[] negate(f64[] param_0)
ROOT tuple = (f64[], f64[]) tuple(f64[] negate.2, f64[] negate.2)
}
fused_computation {
param_0.1 = f64[] parameter(0)
abs.2 = f64[] abs(f64[] param_0.1)
negate.1 = f64[] negate(f64[] param_0.1)
ROOT %tuple.2 = (f64[], f64[]) tuple(f64[] negate.1, f64[] abs.2)
}
branch_1 {
param_0.2 = f64[] parameter(0)
ROOT fusion = (f64[], f64[]) fusion(f64[] param_0.2), kind=kLoop, calls=%fused_computation
}
ENTRY main {
pred.0 = s32[] parameter(0)
param_1 = f64[] parameter(1)
param_2 = f64[] parameter(2)
ROOT conditional.0 = (f64[], f64[]) conditional(s32[] pred.0, f64[] param_1, f64[] param_2), branch_computations={%branch_0, %branch_1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
EXPECT_EQ(CountCopies(*module->GetComputationWithName("branch_0")), 1);
EXPECT_EQ(CountCopies(*module->GetComputationWithName("branch_1")), 0);
EXPECT_EQ(CountCopies(*module->GetComputationWithName("main")), 0);
}
TEST_F(CopyInsertionTest, ConditionalWithVariadicReduce) {
const std::string& hlo_string = R"(
HloModule TestModule
branch_0 {
empty_tuple.0 = () parameter(0)
c_0 = f64[] constant(0)
ROOT tuple.3 = (f64[], f64[]) tuple(c_0, c_0)
}
fused_computation {
param_0.1 = f64[] parameter(0)
abs.2 = f64[] abs(f64[] param_0.1)
negate.1 = f64[] negate(f64[] param_0.1)
ROOT %tuple.2 = (f64[], f64[]) tuple(f64[] negate.1, f64[] abs.2)
}
reduce_region {
param_0.0 = f64[] parameter(0)
param_2.0 = f64[] parameter(2)
add.1.0 = f64[] add(param_0.0, param_2.0)
param_1.0 = f64[] parameter(1)
param_3.0 = f64[] parameter(3)
multiply.1.0 = f64[] multiply(param_1.0, param_3.0)
ROOT tuple.0.0 = (f64[], f64[]) tuple(add.1.0, multiply.1.0)
}
branch_1 {
c_0 = f64[] constant(0)
param_0.1 = f64[128]{0} parameter(0)
ROOT reduce = (f64[], f64[]) reduce(param_0.1, param_0.1, c_0, c_0), dimensions={0}, to_apply=reduce_region
}
ENTRY main {
pred.0 = s32[] parameter(0)
empty_tuple = () tuple()
param_2 = f64[128] parameter(1), sharding={replicated}
ROOT conditional.0 = (f64[], f64[]) conditional(s32[] pred.0, () empty_tuple, f64[128] param_2), branch_computations={%branch_0, %branch_1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
EXPECT_EQ(CountCopies(*module->GetComputationWithName("branch_0")), 2);
EXPECT_EQ(CountCopies(*module->GetComputationWithName("branch_1")), 0);
EXPECT_EQ(CountCopies(*module->GetComputationWithName("main")), 0);
}
TEST_F(CopyInsertionTest, RootInstructionNotLast) {
const std::string& hlo_string = R"(
HloModule module, is_scheduled=true
body2 {
p_body2 = (f32[2]{0}) parameter(0)
p_body2.1 = f32[2]{0} get-tuple-element(p_body2), index=0
add.3 = f32[2]{0} add(p_body2.1, p_body2.1)
ROOT root2 = (f32[2]{0}) tuple(add.3)
}
condition2 {
p_cond2 = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
body {
p_body = (f32[2]{0}) parameter(0)
p_body.1 = f32[2]{0} get-tuple-element(p_body), index=0
ROOT root = (f32[2]{0}) tuple(p_body.1)
copy = f32[2]{0} copy(p_body.1)
tuple = (f32[2]{0}) tuple(copy)
while.1 = (f32[2]{0}) while(tuple), condition=condition2, body=body2
}
condition {
p_cond = (f32[2]{0}) parameter(0)
ROOT result = pred[] constant(true)
}
ENTRY entry {
const0 = f32[2]{0} constant({1, 2})
while_init = (f32[2]{0}) tuple(const0)
ROOT while.0 = (f32[2]{0}) while(while_init), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.RemoveUnnecessaryCopies(module.get()));
auto while_1 = FindInstruction(module.get(), "while.1");
EXPECT_THAT(while_1, op::While(op::Tuple(op::Copy())));
}
TEST_F(CopyInsertionTest, InPlaceCollectivePermuteCopy) {
absl::string_view hlo_string = R"(
HloModule hlo_runner_test_0.1
ENTRY hlo_runner_test_0.1 {
replica_id = u32[] replica-id()
broadcast.0 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] replica_id), dimensions={}
constant.1 = u32[] constant(1000)
broadcast.1 = u32[2,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
broadcast.2 = u32[4,8,128]{2,1,0:T(2,128)} broadcast(u32[] constant.1), dimensions={}
constant.2 = s32[] constant(0)
constant.3 = s32[] constant(1)
tuple.input = (u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.0, u32[2,8,128]{2,1,0:T(2,128)} broadcast.0)
tuple.output = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple(u32[2,8,128]{2,1,0:T(2,128)} broadcast.1, u32[4,8,128]{2,1,0:T(2,128)} broadcast.2)
tuple.2 = (s32[],s32[],s32[]) tuple(constant.2, constant.2, constant.2)
tuple.3 = (s32[],s32[],s32[]) tuple(constant.3, constant.2, constant.2)
tuple.4 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.3)
constant.4 = s32[] constant(2)
tuple.5 = (s32[],s32[],s32[]) tuple(constant.4, constant.2, constant.2)
tuple.6 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.5)
tuple.7 = ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple((s32[],s32[],s32[]) tuple.2, (s32[],s32[],s32[]) tuple.2)
tuple.8 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.7)
tuple.9 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.6)
tuple.10 = (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple(((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.4, ((s32[],s32[],s32[]), (s32[],s32[],s32[])) tuple.7)
collective-permute.0 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute((u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple.input, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.8, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.9), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
collective-permute.1 = (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) collective-permute((u32[2,8,128]{2,1,0:T(2,128)}, u32[2,8,128]{2,1,0:T(2,128)}) tuple.input, (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}) tuple.output, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.8, (((s32[],s32[],s32[]), (s32[],s32[],s32[])), ((s32[],s32[],s32[]), (s32[],s32[],s32[]))) tuple.10), source_target_pairs={{0,1},{1,2},{2,3},{3,0},{0,3},{3,2},{2,1},{1,0}}, slice_sizes={{1,8,128},{1,8,128},{2,8,128},{2,8,128}}
ROOT tuple = ((u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)}), (u32[2,8,128]{2,1,0:T(2,128)}, u32[4,8,128]{2,1,0:T(2,128)})) tuple(collective-permute.0, collective-permute.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
InsertCopies(module.get());
EXPECT_EQ(CountCopies(*module), 4);
}
TEST_F(CopyInsertionTest, KeepCopyOfBroadcast) {
absl::string_view hlo_string = R"(
HloModule Module
ENTRY main {
param = f32[128,1,128] parameter(0)
negate = f32[128,1,128] negate(param)
constant.1 = f32[] constant(0)
broadcast.6 = f32[128,1,128] broadcast(constant.1), dimensions={}
broadcast.7 = f32[128,1,128] broadcast(constant.1), dimensions={}
constant.3 = s32[] constant(0)
dynamic-update-slice.5 = f32[128,1,128] dynamic-update-slice(broadcast.6, broadcast.7, constant.3, constant.3, constant.3)
add1 = f32[128,1,128] add(dynamic-update-slice.5, dynamic-update-slice.5)
dynamic-update-slice.4 = f32[128,1,128] dynamic-update-slice(broadcast.6, broadcast.7, constant.3, constant.3, constant.3)
add2 = f32[128,1,128] add(dynamic-update-slice.4, dynamic-update-slice.4)
tuple = (f32[128,1,128], f32[128,1,128]) tuple(add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
EXPECT_EQ(CountCopies(*module), 2);
}
TEST_F(CopyInsertionTest, CustomCallAliasingCopyInsertedAliasedParam) {
const char* const kModuleString = R"(
HloModule xla_computation_f
ENTRY xla_computation_f {
parameter.1 = f32[2,3,4,5] parameter(0)
parameter.2 = f32[2,3,4,5] parameter(1)
ROOT custom-call = f32[2,3,4,5] custom-call(parameter.1, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
InsertCopies(module.get());
HloInstruction* custom_call = module->entry_computation()->root_instruction();
EXPECT_THAT(custom_call->operand(0), op::Copy(op::Parameter(0)));
}
TEST_F(CopyInsertionTest, CustomCallAliasingCopyInsertedAliasedReuse) {
const char* const kModuleString = R"(
HloModule xla_computation_f
ENTRY xla_computation_f {
parameter.1 = f32[2,3,4,5] parameter(0)
parameter.2 = f32[2,3,4,5] parameter(1)
add.1 = f32[2,3,4,5] add(parameter.1, parameter.2)
custom-call = f32[2,3,4,5] custom-call(add.1, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}
ROOT add.2 = f32[2,3,4,5] add(custom-call, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
InsertCopies(module.get());
HloInstruction* custom_call = FindInstruction(module.get(), "custom-call");
CHECK_NE(custom_call, nullptr);
EXPECT_THAT(custom_call->operand(0), op::Copy(op::Add()));
}
TEST_F(CopyInsertionTest, CustomCallAliasingCopyRemoved) {
const char* const kModuleString = R"(
HloModule xla_computation_f__1
ENTRY xla_computation_f {
parameter.1 = f32[2,3,4,5] parameter(0)
parameter.2 = f32[2,3,4,5] parameter(1)
add = f32[2,3,4,5] add(parameter.1, parameter.2)
ROOT custom-call = f32[2,3,4,5] custom-call(add, parameter.2), custom_call_target="dm_softmax", operand_layout_constraints={f32[2,3,4,5], f32[2,3,4,5]}, output_to_operand_aliasing={{}: (0, {})}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
InsertCopies(module.get());
HloInstruction* custom_call = module->entry_computation()->root_instruction();
EXPECT_THAT(custom_call->operand(0), op::Add());
}
TEST_F(CopyInsertionTest, ReverseInConditional) {
const char* const kModuleString = R"(
HloModule jit_f.0
%region_0.4 (Arg_.5: u8[300,451,3]) -> (u8[300,451,3]) {
%Arg_.5 = u8[300,451,3]{1,0,2:T(8,128)(4,1)} parameter(0)
ROOT %tuple = (u8[300,451,3]{1,0,2:T(8,128)(4,1)}) tuple(u8[300,451,3]{1,0,2:T(8,128)(4,1)} %Arg_.5)
}
%region_1.9 (Arg_.10: u8[300,451,3]) -> (u8[300,451,3]) {
%Arg_.10 = u8[300,451,3]{1,0,2:T(8,128)(4,1)} parameter(0)
%reverse = u8[300,451,3]{1,0,2:T(8,128)(4,1)} reverse(u8[300,451,3]{1,0,2:T(8,128)(4,1)} %Arg_.10), dimensions={0}
ROOT %tuple.1 = (u8[300,451,3]{1,0,2:T(8,128)(4,1)}) tuple(u8[300,451,3]{1,0,2:T(8,128)(4,1)} %reverse)
}
ENTRY %main.13 (Arg_0.1: pred[], Arg_1.2: u8[300,451,3]) -> u8[300,451,3] {
%Arg_0.1 = pred[]{:T(1024)} parameter(0)
%convert.3 = s32[]{:T(256)} convert(pred[]{:T(1024)} %Arg_0.1)
%Arg_1.2 = u8[300,451,3]{1,0,2:T(8,128)(4,1)} parameter(1)
%conditional.12.clone = (u8[300,451,3]{1,0,2:T(8,128)(4,1)}) conditional(s32[]{:T(256)} %convert.3, u8[300,451,3]{1,0,2:T(8,128)(4,1)} %Arg_1.2, u8[300,451,3]{1,0,2:T(8,128)(4,1)} %Arg_1.2), branch_computations={%region_0.4, %region_1.9}
ROOT %get-tuple-element = u8[300,451,3]{1,0,2:T(8,128)(4,1)} get-tuple-element((u8[300,451,3]{1,0,2:T(8,128)(4,1)}) %conditional.12.clone), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(2) << module->ToString();
HloInstruction* reverse = FindInstruction(module.get(), "reverse");
EXPECT_THAT(reverse->operand(0), op::Copy());
}
TEST_F(CopyInsertionTest, InputOutputAliasCopy) {
const char* const kModuleString = R"(
HloModule main_tf2xla.11, input_output_alias={ {0}: (0, {1}, may-alias) }
ENTRY %main_tf2xla.11 (arg_tuple.1: (f32[], f32[])) -> (f32[], f32[]) {
ROOT %arg_tuple.1 = (f32[]{:T(256)}, f32[]{:T(256)}) parameter(0), parameter_replication={false,false}, sharding={{replicated}, {replicated}}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(2) << module->ToString();
}
TEST_F(CopyInsertionTest, AddControlDependencyForInputOutputAlias) {
const char* const kModuleString = R"(
HloModule test, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias) }
ENTRY test {
x = f32[3] parameter(0)
y = f32[3] parameter(1)
add = f32[3] add(x, y)
mul = f32[3] multiply(x, y)
ROOT result = (f32[3], f32[3]) tuple(add, mul)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
EXPECT_EQ(CountCopies(*module), 1);
EXPECT_EQ(CountControlEdges(*module), 2);
HloInstruction* add_instr = FindInstruction(module.get(), HloOpcode::kAdd);
HloInstruction* mul_instr =
FindInstruction(module.get(), HloOpcode::kMultiply);
HloInstruction* copy_instr = FindInstruction(module.get(), HloOpcode::kCopy);
EXPECT_TRUE(add_instr->control_predecessors()[0] == mul_instr);
EXPECT_TRUE(copy_instr->control_predecessors()[0] == add_instr);
}
TEST_F(CopyInsertionTest, AsyncCallDUSNoCopy) {
const char* const kModuleString = R"(
HloModule async_call
%called_computation {
%out_param = s32[1024]{0} parameter(1)
%input = s32[1024]{0} parameter(0)
%size = s32[] constant(256)
%index = s32[] custom-call(), custom_call_target="Baz"
%start = s32[] multiply(s32[] %size, s32[] %index)
%input2 = s32[256]{0} dynamic-slice(s32[1024]{0} %input, s32[] %start), dynamic_slice_sizes={256}
%output = s32[256]{0} add(s32[256]{0} %input2, s32[256]{0} %input2)
ROOT %output2 = s32[1024]{0} dynamic-update-slice(s32[1024]{0} %out_param, s32[256]{0} %output, s32[] %start)
}, execution_thread="foobar"
%async_wrapped {
%async_param = s32[1024]{0} parameter(0)
%async_param.1 = s32[1024]{0} parameter(1)
ROOT %call = s32[1024]{0} call(s32[1024]{0} %async_param, s32[1024]{0} %async_param.1), to_apply=%called_computation
}, execution_thread="foobar"
ENTRY %main {
%input.1 = s32[1024]{0} parameter(0)
%buf = s32[1024]{0} custom-call(), custom_call_target="AllocateBuffer"
%async-start = ((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) async-start(s32[1024]{0} %input.1, s32[1024]{0} %buf), async_execution_thread="foobar", calls=%async_wrapped
ROOT %async-done = s32[1024]{0} async-done(((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) %async-start), async_execution_thread="foobar", calls=%async_wrapped
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get(), {"foobar"}).status());
VLOG(2) << module->ToString();
EXPECT_EQ(CountCopies(*module), 0);
}
TEST_F(CopyInsertionTest, AsyncCallDUSCopy) {
const char* const kModuleString = R"(
HloModule async_call
%called_computation {
%out_param = s32[1024]{0} parameter(1)
%input = s32[1024]{0} parameter(0)
%size = s32[] constant(256)
%index = s32[] custom-call(), custom_call_target="Baz"
%start = s32[] multiply(s32[] %size, s32[] %index)
%input2 = s32[256]{0} dynamic-slice(s32[1024]{0} %input, s32[] %start), dynamic_slice_sizes={256}
%output = s32[256]{0} add(s32[256]{0} %input2, s32[256]{0} %input2)
ROOT %output2 = s32[1024]{0} dynamic-update-slice(s32[1024]{0} %out_param, s32[256]{0} %output, s32[] %start)
}, execution_thread="foobar"
%async_wrapped {
%async_param = s32[1024]{0} parameter(0)
%async_param.1 = s32[1024]{0} parameter(1)
ROOT %call = s32[1024]{0} call(s32[1024]{0} %async_param, s32[1024]{0} %async_param.1), to_apply=%called_computation
}, execution_thread="foobar"
ENTRY %main {
%input.1 = s32[1024]{0} parameter(0)
%input.2 = s32[1024]{0} parameter(1)
%async-start = ((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) async-start(s32[1024]{0} %input.1, s32[1024]{0} %input.2), async_execution_thread="foobar", calls=%async_wrapped
ROOT %async-done = s32[1024]{0} async-done(((s32[1024]{0}, s32[1024]{0}), s32[1024]{0}, u32[]) %async-start), async_execution_thread="foobar", calls=%async_wrapped
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get(), {"foobar"}).status());
VLOG(2) << module->ToString();
EXPECT_EQ(CountCopies(*module), 1);
}
TEST_F(CopyInsertionTest,
RegionAnalysisDoesNotAddUnnecessaryCopyOfInputTupleElements) {
const char* const kModuleString = R"(
HloModule while_aliasing, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias) }
add {
param_0 = f32[1,128] parameter(0)
param_1 = f32[1,128] parameter(1)
ROOT add = f32[1,128] add(param_0, param_1)
}
condition {
input_tuple = (f32[1,128], f32[1,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
input_tuple = (f32[1,128], f32[1,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[1,128] get-tuple-element(input_tuple), index=1
cond = pred[] get-tuple-element(input_tuple), index=2
add = f32[1,128] add(param_0, param_1)
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
ROOT output_tuple = (f32[1,128], f32[1,128], pred[]) tuple(add, splat_c0, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[1,128] parameter(1)
param_2 = pred[] parameter(2)
tuple = (f32[1,128], f32[1,128], pred[]) tuple(param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto add = FindInstruction(module.get(), "add.1");
EXPECT_NE(add, nullptr);
EXPECT_EQ(add->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_EQ(add->operand(1)->opcode(), HloOpcode::kGetTupleElement);
}
TEST_F(CopyInsertionTest,
RegionAnalysisDoesNotAddCopyForNonUpdateParameterOfDynamicSliceUpdate) {
const char* const kModuleString = R"(
HloModule while_aliasing, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias), {2}: (2, {}, may-alias), {3}: (3, {}, may-alias) }
fused_computation {
param_0 = f32[4,2,128,512]{3,2,1,0} parameter(0)
param_1 = f32[2,128,512]{2,1,0} parameter(1)
bitcast.1 = f32[1,2,128,512]{3,2,1,0} bitcast(param_1)
param_2 = s32[] parameter(2)
constant.1 = s32[] constant(0)
compare.1 = pred[] compare(param_2, constant.1), direction=LT
constant.2 = s32[] constant(4)
add.1 = s32[] add(param_2, constant.2)
select.1 = s32[] select(compare.1, add.1, param_2)
ROOT dynamic-update-slice.73 = f32[4,2,128,512]{3,2,1,0} dynamic-update-slice(param_0, bitcast.1, select.1, constant.1, constant.1, constant.1)
}
condition {
input_tuple = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=3
}
body {
input_tuple = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) parameter(0)
get-tuple-element.0 = s32[] get-tuple-element(input_tuple), index=0
get-tuple-element.1 = f32[4,2,128,512]{3,2,1,0} get-tuple-element(input_tuple), index=2
get-tuple-element.2 = f32[2,128,512]{2,1,0} get-tuple-element(input_tuple), index=1
fusion = f32[4,2,128,512]{3,2,1,0} fusion(get-tuple-element.1, get-tuple-element.2, get-tuple-element.0), kind=kLoop, calls=fused_computation
cond = pred[] get-tuple-element(input_tuple), index=3
c0 = f32[] constant(0)
fusion.1 = f32[2,128,512]{2,1,0} broadcast(c0), dimensions={}
ROOT output_tuple = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) tuple(get-tuple-element.0, fusion.1, fusion, cond)
}
ENTRY main {
param_0 = f32[2,128,512] parameter(0)
param_1 = f32[4,2,128,512] parameter(1)
param_2 = pred[] parameter(2)
param_3 = s32[] parameter(3)
tuple = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) tuple(param_3, param_0, param_1, param_2)
ROOT while = (s32[], f32[2,128,512], f32[4,2,128,512], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto fusion = FindInstruction(module.get(), "fusion");
EXPECT_NE(fusion, nullptr);
EXPECT_EQ(fusion->operand(1)->opcode(), HloOpcode::kGetTupleElement);
}
TEST_F(CopyInsertionTest, RegionAnalysisNoCopyOfAddOutputInsideWhileBody) {
const char* const kModuleString = R"(
HloModule while_aliasing
condition {
input_tuple = (f32[1,128], f32[1,128], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=2
}
body {
input_tuple = (f32[1,128], f32[1,128], pred[]) parameter(0)
param_0 = f32[1,128] get-tuple-element(input_tuple), index=0
param_1 = f32[1,128] get-tuple-element(input_tuple), index=1
cond = pred[] get-tuple-element(input_tuple), index=2
c0 = f32[] constant(0)
splat_c0 = f32[1,128] broadcast(c0), dimensions={}
add = f32[1,128] add(splat_c0, param_1)
add_1 = f32[1,128] add(splat_c0, splat_c0)
ROOT output_tuple = (f32[1,128], f32[1,128], pred[]) tuple(add, add_1, cond)
}
ENTRY main {
param_0 = f32[1,128] parameter(0)
param_1 = f32[1,128] parameter(1)
param_2 = pred[] parameter(2)
tuple = (f32[1,128], f32[1,128], pred[]) tuple(param_0, param_1, param_2)
ROOT while = (f32[1,128], f32[1,128], pred[]) while(tuple), condition=condition, body=body
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(3) << module->ToString();
auto root = FindInstruction(module.get(), "tuple.3");
EXPECT_NE(root, nullptr);
EXPECT_EQ(root->operand(0)->opcode(), HloOpcode::kAdd);
EXPECT_EQ(root->operand(1)->opcode(), HloOpcode::kAdd);
EXPECT_EQ(root->operand(2)->opcode(), HloOpcode::kGetTupleElement);
}
TEST_F(CopyInsertionTest, DontInsertCopiesInAsyncComputation) {
constexpr absl::string_view kModuleString = R"(
HloModule test
%async_computation {
%param_0 = f32[10,32,512]{2,1,0:T(8,128)S(5)} parameter(0)
%param_1 = f32[1,32,512]{2,1,0:T(8,128)} parameter(1)
%param_2 = s32[]{:T(128)} parameter(2)
%param_3 = s32[]{:T(128)} parameter(3)
%param_4 = s32[]{:T(128)} parameter(4)
ROOT %dynamic-update-slice.1 = f32[10,32,512]{2,1,0:T(8,128)S(5)}
dynamic-update-slice(%param_0, %param_1, %param_2, %param_3, %param_4)
}
ENTRY %main {
%param.1 = (s32[]{:T(128)}, f32[32,512]{1,0:T(8,128)},
f32[10,32,512]{2,1,0:T(8,128)S(5)}) parameter(0)
%get-tuple-element.132 = f32[10,32,512]{2,1,0:T(8,128)S(5)} get-tuple-element(
%param.1), index=2
%get-tuple-element.131 = f32[32,512]{1,0:T(8,128)} get-tuple-element(
%param.1), index=1
%cosine.0 = f32[32,512]{1,0:T(8,128)} cosine(%get-tuple-element.131)
%reshape.6 = f32[1,32,512]{2,1,0:T(8,128)} reshape(%cosine.0)
%get-tuple-element.130 = s32[]{:T(128)} get-tuple-element(%param.1), index=0
%constant.49 = s32[]{:T(128)} constant(0)
%compare.13 = pred[]{:T(512)} compare(
%get-tuple-element.130, %constant.49), direction=LT
%constant.50 = s32[]{:T(128)} constant(10)
%add.22 = s32[]{:T(128)} add(%get-tuple-element.130, %constant.50)
%select.6 = s32[]{:T(128)} select(
%compare.13, %add.22, %get-tuple-element.130)
%dynamic-update-slice-start = (
(f32[10,32,512]{2,1,0:T(8,128)S(5)}, f32[1,32,512]{2,1,0:T(8,128)},
s32[]{:T(128)}, s32[]{:T(128)}, s32[]{:T(128)}),
f32[10,32,512]{2,1,0:T(8,128)S(5)}, u32[]) async-start(
%get-tuple-element.132, %reshape.6, %select.6,
%constant.49, %constant.49), calls=%async_computation
ROOT %dynamic-update-slice-done = f32[10,32,512]{2,1,0:T(8,128)S(5)}
async-done(%dynamic-update-slice-start), calls=%async_computation
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion;
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
LOG(INFO) << module->ToString();
auto* async_computation = module->GetComputationWithName("async_computation");
ASSERT_THAT(async_computation, NotNull());
EXPECT_EQ(CountCopies(*async_computation), 0);
auto* main_computation = module->GetComputationWithName("main");
ASSERT_THAT(main_computation, NotNull());
EXPECT_EQ(CountCopies(*main_computation), 1);
}
TEST_F(CopyInsertionTest, AsyncDUSInLoop) {
constexpr absl::string_view kModuleString = R"(
HloModule module
async_wrapped {
async_param.1 = s32[1024]{0} parameter(0)
async_param.2 = s32[256]{0} parameter(1)
async_param.3 = s32[] parameter(2)
ROOT dus = s32[1024]{0} dynamic-update-slice(async_param.1, async_param.2, async_param.3)
}
condition {
input_tuple = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) parameter(0)
ROOT cond = pred[] get-tuple-element(input_tuple), index=3
}
body {
input_tuple = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) parameter(0)
input.1 = s32[1024]{0} get-tuple-element(input_tuple), index=0
input.2 = s32[256]{0} get-tuple-element(input_tuple), index=1
input.3 = s32[] get-tuple-element(input_tuple), index=2
input.4 = pred[] get-tuple-element(input_tuple), index=3
async-start = ((s32[1024]{0}, s32[256]{0}, s32[]), s32[1024]{0}, u32[]) async-start(input.1, input.2, input.3), calls=%async_wrapped
async-done = s32[1024]{0} async-done(async-start), calls=async_wrapped
ROOT tuple = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) tuple(async-done, input.2, input.3, input.4)
}
ENTRY main {
input.1 = s32[256]{0} parameter(0)
input.2 = s32[] parameter(1)
input.3 = pred[] parameter(2)
broadcast = s32[1024]{0} broadcast(input.2), dimensions={}
while_tuple = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) tuple(broadcast, input.1, input.2, input.3)
while = (s32[1024]{0}, s32[256]{0}, s32[], pred[]) while(while_tuple), condition=condition, body=body
ROOT gte = s32[1024]{0} get-tuple-element(while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module,
ParseAndReturnVerifiedModule(kModuleString));
CopyInsertion copy_insertion(nullptr,
-1);
ASSERT_IS_OK(copy_insertion.Run(module.get()).status());
VLOG(2) << module->ToString();
EXPECT_EQ(CountCopies(*module), 0);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/copy_insertion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/copy_insertion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
f478e72d-13d7-41bc-b552-c5d252f61f35 | cpp | google/arolla | decision_forest_operator | arolla/decision_forest/expr_operator/decision_forest_operator.cc | arolla/decision_forest/expr_operator/decision_forest_operator_test.cc | #include "arolla/decision_forest/expr_operator/decision_forest_operator.h"
#include <algorithm>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/expr/basic_expr_operator.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/qtype/array_like/array_like_qtype.h"
#include "arolla/qtype/qtype.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/status_macros_backport.h"
namespace arolla {
namespace {
std::vector<int> GetRequiredInputIds(
const absl::flat_hash_map<int, QTypePtr>& required_types) {
std::vector<int> result;
result.reserve(required_types.size());
for (const auto& [id, _] : required_types) {
result.push_back(id);
}
return result;
}
}
DecisionForestOperator::DecisionForestOperator(
DecisionForestPtr forest, std::vector<TreeFilter> tree_filters)
: DecisionForestOperator(GetRequiredInputIds(forest->GetRequiredQTypes()),
forest, std::move(tree_filters)) {}
DecisionForestOperator::DecisionForestOperator(
DecisionForestPtr forest, std::vector<TreeFilter> tree_filters,
const absl::flat_hash_map<int, QTypePtr>& required_types)
: DecisionForestOperator(GetRequiredInputIds(required_types),
std::move(forest), std::move(tree_filters)) {}
DecisionForestOperator::DecisionForestOperator(
std::vector<int> required_input_ids, DecisionForestPtr forest,
std::vector<TreeFilter> tree_filters)
: BasicExprOperator(
"anonymous.decision_forest_operator",
expr::ExprOperatorSignature::MakeVariadicArgs(),
"Evaluates decision forest stored in the operator state.",
FingerprintHasher("::arolla::DecisionForestOperator")
.Combine(forest->fingerprint())
.CombineSpan(tree_filters)
.Finish()),
forest_(std::move(forest)),
tree_filters_(std::move(tree_filters)),
required_input_ids_(std::move(required_input_ids)) {
std::sort(required_input_ids_.begin(), required_input_ids_.end());
}
absl::StatusOr<QTypePtr> DecisionForestOperator::GetOutputQType(
absl::Span<const QTypePtr> input_qtypes) const {
int last_forest_input_id =
required_input_ids_.empty() ? -1 : required_input_ids_.back();
if (last_forest_input_id >= static_cast<int>(input_qtypes.size())) {
return absl::InvalidArgumentError(absl::StrFormat(
"not enough arguments for the decision forest: expected at least %d, "
"got %d",
last_forest_input_id + 1, input_qtypes.size()));
}
bool batched = !input_qtypes.empty() && !required_input_ids_.empty() &&
IsArrayLikeQType(input_qtypes[required_input_ids_[0]]);
for (int id : required_input_ids_) {
if (IsArrayLikeQType(input_qtypes[id]) != batched) {
DCHECK(!required_input_ids_.empty());
return absl::InvalidArgumentError(absl::StrFormat(
"either all forest inputs must be scalars or all forest inputs "
"must be arrays, but arg[%d] is %s and arg[%d] is %s",
required_input_ids_[0], input_qtypes[required_input_ids_[0]]->name(),
id, input_qtypes[id]->name()));
}
}
QTypePtr output_type;
if (batched) {
DCHECK(!required_input_ids_.empty());
ASSIGN_OR_RETURN(const ArrayLikeQType* array_type,
ToArrayLikeQType(input_qtypes[required_input_ids_[0]]));
ASSIGN_OR_RETURN(output_type,
array_type->WithValueQType(GetQType<float>()));
} else {
output_type = GetQType<float>();
}
return MakeTupleQType(
std::vector<QTypePtr>(tree_filters_.size(), output_type));
}
} | #include "arolla/decision_forest/expr_operator/decision_forest_operator.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <utility>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/status_matchers.h"
#include "absl/status/statusor.h"
#include "arolla/decision_forest/decision_forest.h"
#include "arolla/decision_forest/split_conditions/interval_split_condition.h"
#include "arolla/decision_forest/split_conditions/set_of_values_split_condition.h"
#include "arolla/dense_array/qtype/types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/tuple_qtype.h"
namespace arolla {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::absl_testing::StatusIs;
using ::testing::HasSubstr;
constexpr float inf = std::numeric_limits<float>::infinity();
constexpr auto S = DecisionTreeNodeId::SplitNodeId;
constexpr auto A = DecisionTreeNodeId::AdjustmentId;
absl::StatusOr<DecisionForestPtr> CreateForest() {
std::vector<DecisionTree> trees(2);
trees[0].adjustments = {0.5, 1.5, 2.5, 3.5};
trees[0].tag.submodel_id = 0;
trees[0].split_nodes = {
{S(1), S(2), IntervalSplit(0, 1.5, inf)},
{A(0), A(1), SetOfValuesSplit<int64_t>(1, {5}, false)},
{A(2), A(3), IntervalSplit(0, -inf, 10)}};
trees[1].adjustments = {5};
trees[1].tag.submodel_id = 1;
return DecisionForest::FromTrees(std::move(trees));
}
TEST(DecisionForestOperatorTest, GetOutputQType) {
ASSERT_OK_AND_ASSIGN(const DecisionForestPtr forest, CreateForest());
{
auto forest_op = std::make_shared<DecisionForestOperator>(
forest, std::vector<TreeFilter>{});
EXPECT_THAT(forest_op->GetOutputQType({GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough arguments for the decision "
"forest: expected at least 2, got 1")));
EXPECT_THAT(
forest_op->GetOutputQType(
{GetQType<float>(), GetDenseArrayQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("either all forest inputs must be scalars or all "
"forest inputs must be arrays, but arg[0] is "
"FLOAT32 and arg[1] is DENSE_ARRAY_FLOAT32")));
EXPECT_THAT(
forest_op->GetOutputQType({GetQType<float>(), GetQType<float>()}),
IsOkAndHolds(MakeTupleQType({})));
}
{
auto forest_op = std::make_shared<DecisionForestOperator>(
forest, std::vector<TreeFilter>{TreeFilter{.submodels = {0}},
TreeFilter{.submodels = {1, 2}}});
EXPECT_THAT(forest_op->GetOutputQType({GetQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("not enough arguments for the decision "
"forest: expected at least 2, got 1")));
EXPECT_THAT(
forest_op->GetOutputQType(
{GetQType<float>(), GetDenseArrayQType<float>()}),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("either all forest inputs must be scalars or all "
"forest inputs must be arrays, but arg[0] is "
"FLOAT32 and arg[1] is DENSE_ARRAY_FLOAT32")));
EXPECT_THAT(
forest_op->GetOutputQType({GetQType<float>(), GetQType<float>()}),
IsOkAndHolds(MakeTupleQType({GetQType<float>(), GetQType<float>()})));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/expr_operator/decision_forest_operator.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/decision_forest/expr_operator/decision_forest_operator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
51570f0d-4ef4-48ce-8d82-5c34931cc933 | cpp | google/quiche | array_output_buffer | quiche/http2/core/array_output_buffer.cc | quiche/http2/core/array_output_buffer_test.cc | #include "quiche/http2/core/array_output_buffer.h"
#include <cstdint>
namespace spdy {
void ArrayOutputBuffer::Next(char** data, int* size) {
*data = current_;
*size = capacity_ > 0 ? capacity_ : 0;
}
void ArrayOutputBuffer::AdvanceWritePtr(int64_t count) {
current_ += count;
capacity_ -= count;
}
uint64_t ArrayOutputBuffer::BytesFree() const { return capacity_; }
} | #include "quiche/http2/core/array_output_buffer.h"
#include <cstdint>
#include <cstring>
#include "quiche/common/platform/api/quiche_test.h"
namespace spdy {
namespace test {
TEST(ArrayOutputBufferTest, InitializedFromArray) {
char array[100];
ArrayOutputBuffer buffer(array, sizeof(array));
EXPECT_EQ(sizeof(array), buffer.BytesFree());
EXPECT_EQ(0u, buffer.Size());
EXPECT_EQ(array, buffer.Begin());
}
TEST(ArrayOutputBufferTest, WriteAndReset) {
char array[100];
ArrayOutputBuffer buffer(array, sizeof(array));
char* dst;
int size;
buffer.Next(&dst, &size);
ASSERT_GT(size, 1);
ASSERT_NE(nullptr, dst);
const int64_t written = size / 2;
memset(dst, 'x', written);
buffer.AdvanceWritePtr(written);
EXPECT_EQ(static_cast<uint64_t>(size) - written, buffer.BytesFree());
EXPECT_EQ(static_cast<uint64_t>(written), buffer.Size());
buffer.Reset();
EXPECT_EQ(sizeof(array), buffer.BytesFree());
EXPECT_EQ(0u, buffer.Size());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/array_output_buffer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/core/array_output_buffer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
f5f375a3-9c49-4b88-aed3-8a4b684c5d55 | cpp | tensorflow/tensorflow | sparse_tensor_slice_dataset_op | tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc | tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op_test.cc | #include <numeric>
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/register_types.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/util/sparse/sparse_tensor.h"
namespace tensorflow {
namespace data {
namespace {
template <typename T>
class Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx,
const sparse::SparseTensor& sparse_tensor)
: DatasetBase(DatasetContext(ctx)),
sparse_tensor_(sparse_tensor),
dtypes_({DT_INT64, sparse_tensor.dtype(), DT_INT64}),
shapes_({{-1, sparse_tensor.dims() - 1},
{-1},
{sparse_tensor.dims() - 1}}) {}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(typename Iterator::Params{
this, strings::StrCat(prefix, "::SparseTensorSlice")});
}
const DataTypeVector& output_dtypes() const override { return dtypes_; }
const std::vector<PartialTensorShape>& output_shapes() const override {
return shapes_;
}
string DebugString() const override {
return "SparseTensorSliceDatasetOp::Dataset";
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
return sparse_tensor_.shape()[0];
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
return absl::OkStatus();
}
Status CheckExternalState() const override { return absl::OkStatus(); }
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* indices_node;
TF_RETURN_IF_ERROR(b->AddTensor(sparse_tensor_.indices(), &indices_node));
Node* value_node;
TF_RETURN_IF_ERROR(b->AddTensor(sparse_tensor_.values(), &value_node));
Node* dense_shape_node;
std::vector<int64_t> dense_shape;
dense_shape.reserve(sparse_tensor_.shape().size());
for (int i = 0; i < sparse_tensor_.shape().size(); i++)
dense_shape.emplace_back(sparse_tensor_.shape()[i]);
TF_RETURN_IF_ERROR(b->AddVector(dense_shape, &dense_shape_node));
AttrValue val_dtype;
b->BuildAttrValue(sparse_tensor_.dtype(), &val_dtype);
TF_RETURN_IF_ERROR(
b->AddDataset(this, {indices_node, value_node, dense_shape_node},
{{"Tvalues", val_dtype}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset<T>> {
public:
explicit Iterator(const typename Iterator::Params& params)
: DatasetIterator<Dataset<T>>(params),
num_elements_(params.dataset->sparse_tensor_.shape()[0]),
dense_shape_(DT_INT64, {params.dataset->sparse_tensor_.dims() - 1}),
group_iterable_(params.dataset->sparse_tensor_.group({0})),
iter_(group_iterable_.begin()) {
for (size_t i = 0; i < dense_shape_.NumElements(); ++i) {
dense_shape_.vec<int64_t>()(i) =
params.dataset->sparse_tensor_.shape()[i + 1];
}
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (i_ == num_elements_) {
*end_of_sequence = true;
return absl::OkStatus();
}
out_tensors->clear();
out_tensors->reserve(3);
const int rank = Iterator::dataset()->sparse_tensor_.dims();
if (i_ > next_non_empty_i_ && iter_ != group_iterable_.end()) {
sparse::Group group = *iter_;
const auto indices = group.indices();
const auto values = group.values<T>();
const int64_t num_entries = values.size();
next_non_empty_i_ = indices(0, 0);
next_indices_ = Tensor(DT_INT64, {num_entries, rank - 1});
next_values_ = Tensor(DataTypeToEnum<T>::value, {num_entries});
auto next_indices_t = next_indices_.matrix<int64_t>();
auto next_values_t = next_values_.vec<T>();
for (int64_t i = 0; i < num_entries; ++i) {
for (int d = 1; d < rank; ++d) {
next_indices_t(i, d - 1) = indices(i, d);
}
next_values_t(i) = values(i);
}
++iter_;
}
if (i_ == next_non_empty_i_) {
out_tensors->push_back(std::move(next_indices_));
out_tensors->push_back(std::move(next_values_));
out_tensors->push_back(dense_shape_);
next_non_empty_i_ = kNextNonEmptyUnknown;
} else {
DCHECK(i_ < next_non_empty_i_ || iter_ == group_iterable_.end());
out_tensors->push_back(Tensor(DT_INT64, TensorShape({0, rank - 1})));
out_tensors->push_back(Tensor(DataTypeToEnum<T>::value, {0}));
out_tensors->push_back(dense_shape_);
}
++i_;
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeSourceNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(Iterator::prefix(), "i", i_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(Iterator::prefix(), "iter_loc", iter_.loc()));
TF_RETURN_IF_ERROR(writer->WriteScalar(
Iterator::prefix(), "next_non_empty_i_", next_non_empty_i_));
if (i_ <= next_non_empty_i_) {
TF_RETURN_IF_ERROR(writer->WriteTensor(Iterator::prefix(),
"next_indices_", next_indices_));
TF_RETURN_IF_ERROR(writer->WriteTensor(Iterator::prefix(),
"next_values_", next_values_));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(reader->ReadScalar(Iterator::prefix(), "i", &i_));
int64_t iter_loc;
TF_RETURN_IF_ERROR(
reader->ReadScalar(Iterator::prefix(), "iter_loc", &iter_loc));
iter_ = group_iterable_.at(iter_loc);
TF_RETURN_IF_ERROR(reader->ReadScalar(
Iterator::prefix(), "next_non_empty_i_", &next_non_empty_i_));
if (i_ <= next_non_empty_i_) {
TF_RETURN_IF_ERROR(reader->ReadTensor(Iterator::prefix(),
"next_indices_", &next_indices_));
TF_RETURN_IF_ERROR(reader->ReadTensor(Iterator::prefix(),
"next_values_", &next_values_));
}
return absl::OkStatus();
}
private:
const int64_t num_elements_;
Tensor dense_shape_;
mutex mu_;
sparse::GroupIterable group_iterable_ TF_GUARDED_BY(mu_);
sparse::GroupIterable::IteratorStep iter_ TF_GUARDED_BY(mu_);
int64_t i_ TF_GUARDED_BY(mu_) = 0;
const int64_t kNextNonEmptyUnknown = -1;
int64_t next_non_empty_i_ TF_GUARDED_BY(mu_) = kNextNonEmptyUnknown;
Tensor next_indices_ TF_GUARDED_BY(mu_);
Tensor next_values_ TF_GUARDED_BY(mu_);
};
const sparse::SparseTensor sparse_tensor_;
const DataTypeVector dtypes_;
const std::vector<PartialTensorShape> shapes_;
};
template <typename T>
class SparseTensorSliceDatasetOp : public DatasetOpKernel {
public:
explicit SparseTensorSliceDatasetOp(OpKernelConstruction* ctx)
: DatasetOpKernel(ctx) {}
void MakeDataset(OpKernelContext* ctx, DatasetBase** output) override {
const Tensor* indices;
OP_REQUIRES_OK(ctx, ctx->input("indices", &indices));
const Tensor* values;
OP_REQUIRES_OK(ctx, ctx->input("values", &values));
const Tensor* dense_shape;
OP_REQUIRES_OK(ctx, ctx->input("dense_shape", &dense_shape));
OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(indices->shape()),
errors::InvalidArgument("Input indices must be a matrix. Got: ",
indices->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(values->shape()),
errors::InvalidArgument("Input values must be a vector. Got: ",
values->shape().DebugString()));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(dense_shape->shape()),
errors::InvalidArgument("Input shape must be a vector. Got: ",
dense_shape->shape().DebugString()));
OP_REQUIRES(
ctx, values->shape().dim_size(0) == indices->shape().dim_size(0),
errors::InvalidArgument(
"Number of values must match first dimension of indices. ", "Got ",
values->shape().dim_size(0),
" values, indices shape: ", indices->shape().DebugString()));
OP_REQUIRES(
ctx, dense_shape->shape().dim_size(0) == indices->shape().dim_size(1),
errors::InvalidArgument(
"Number of dimensions must match second dimension of indices. ",
"Got ", dense_shape->shape().dim_size(0),
" dimensions, indices shape: ", indices->shape().DebugString()));
OP_REQUIRES(ctx, dense_shape->NumElements() > 0,
errors::InvalidArgument(
"The shape argument requires at least one element."));
int64_t previous_batch_index = -1;
for (int64_t i = 0; i < indices->dim_size(0); ++i) {
int64_t next_batch_index = indices->matrix<int64_t>()(i, 0);
OP_REQUIRES(
ctx, next_batch_index >= previous_batch_index,
errors::Unimplemented("The SparseTensor must be ordered in the batch "
"dimension; handling arbitrarily ordered input "
"is not currently supported."));
previous_batch_index = next_batch_index;
}
absl::InlinedVector<int64_t, 8UL> std_order(dense_shape->NumElements(), 0);
TensorShape shape;
OP_REQUIRES_OK(ctx, TensorShape::BuildTensorShape(
dense_shape->vec<int64_t>(), &shape));
sparse::SparseTensor tensor;
OP_REQUIRES_OK(ctx, sparse::SparseTensor::Create(*indices, *values, shape,
std_order, &tensor));
*output = new Dataset<T>(ctx, std::move(tensor));
}
private:
};
#define REGISTER_DATASET_KERNEL(type) \
REGISTER_KERNEL_BUILDER(Name("SparseTensorSliceDataset") \
.Device(DEVICE_CPU) \
.TypeConstraint<type>("Tvalues"), \
SparseTensorSliceDatasetOp<type>);
TF_CALL_DATASET_TYPES(REGISTER_DATASET_KERNEL);
#undef REGISTER_DATASET_KERNEL
}
}
} | #include <string>
#include <utility>
#include "tensorflow/core/data/dataset_test_base.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/serialization_utils.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "sparse_tensor_slice_dataset";
constexpr char kDatasetType[] = "SparseTensorSlice";
class SparseTensorSliceDatasetParams : public DatasetParams {
public:
SparseTensorSliceDatasetParams(Tensor indices, Tensor values,
Tensor dense_shape, DataType tvalues,
string node_name)
: DatasetParams({tvalues}, {PartialTensorShape({})},
std::move(node_name)),
indices_(std::move(indices)),
values_(std::move(values)),
dense_shape_(std::move(dense_shape)),
tvalues_(tvalues) {
iterator_prefix_ = "Iterator";
}
std::vector<Tensor> GetInputTensors() const override {
return {indices_, values_, dense_shape_};
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->emplace_back("indices");
input_names->emplace_back("values");
input_names->emplace_back("dense_shape");
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
attr_vector->clear();
attr_vector->emplace_back("Tvalues", tvalues_);
return absl::OkStatus();
}
string dataset_type() const override { return kDatasetType; }
private:
Tensor indices_;
Tensor values_;
Tensor dense_shape_;
DataType tvalues_;
};
class SparseTensorSliceDatasetOpTest : public DatasetOpsTestBase {};
SparseTensorSliceDatasetParams TwoDimsSparseTensorSliceDatasetParams() {
return SparseTensorSliceDatasetParams(
CreateTensor<int64_t>({2, 2}, {0, 0, 1, 1}),
CreateTensor<int32>({2}, {888, 999}),
CreateTensor<int64_t>({2}, {2, 2}),
DT_INT32,
kNodeName);
}
SparseTensorSliceDatasetParams ThreeDimsSparseTensorSliceDatasetParams() {
return SparseTensorSliceDatasetParams(
CreateTensor<int64_t>({2, 3}, {0, 0, 0, 1, 1, 1}),
CreateTensor<double>({2}, {888.0, 999.0}),
CreateTensor<int64_t>({3}, {2, 2, 2}),
DT_DOUBLE,
kNodeName);
}
SparseTensorSliceDatasetParams FourDimsSparseTensorSliceDatasetParams() {
return SparseTensorSliceDatasetParams(
CreateTensor<int64_t>({2, 4}, {0, 0, 0, 0, 1, 1, 1, 1}),
CreateTensor<tstring>({2}, {"a", "b"}),
CreateTensor<int64_t>({4}, {3, 2, 2, 2}),
DT_STRING,
kNodeName);
}
SparseTensorSliceDatasetParams FiveDimsSparseTensorSliceDatasetParams() {
return SparseTensorSliceDatasetParams(
CreateTensor<int64_t>({2, 5}, {0, 0, 0, 0, 0, 1, 1, 1, 1, 1}),
CreateTensor<int32>({2}, {888, 999}),
CreateTensor<int64_t>({5}, {3, 2, 2, 2, 2}),
DT_INT32,
kNodeName);
}
template <typename T>
struct GetNextTestCase {
T dataset_params;
std::vector<std::vector<Tensor>> expected_outputs;
};
std::vector<GetNextTestCase<SparseTensorSliceDatasetParams>>
GetNextTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{{ CreateTensor<int64_t>({1, 1}, {0}),
CreateTensor<int32>({1}, {888}),
CreateTensor<int64_t>({1}, {2})},
{ CreateTensor<int64_t>({1, 1}, {1}),
CreateTensor<int32>({1}, {999}),
CreateTensor<int64_t>({1}, {2})}}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{{ CreateTensor<int64_t>({1, 2}, {0, 0}),
CreateTensor<double>({1}, {888.0}),
CreateTensor<int64_t>({2}, {2, 2})},
{{ CreateTensor<int64_t>({1, 2}, {1, 1})},
{ CreateTensor<double>({1}, {999.0})},
{ CreateTensor<int64_t>({2}, {2, 2})}}}},
{FourDimsSparseTensorSliceDatasetParams(),
{{ CreateTensor<int64_t>({1, 3}, {0, 0, 0}),
CreateTensor<tstring>({1}, {"a"}),
CreateTensor<int64_t>({3}, {2, 2, 2})},
{ CreateTensor<int64_t>({1, 3}, {1, 1, 1}),
CreateTensor<tstring>({1}, {"b"}),
CreateTensor<int64_t>({3}, {2, 2, 2})},
{ CreateTensor<int64_t>({0, 3}, {}),
CreateTensor<tstring>({0}, {}),
CreateTensor<int64_t>({3}, {2, 2, 2})}}},
{FiveDimsSparseTensorSliceDatasetParams(),
{
{ CreateTensor<int64_t>({1, 4}, {0, 0, 0, 0}),
CreateTensor<int32>({1}, {888}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})},
{ CreateTensor<int64_t>({1, 4}, {1, 1, 1, 1}),
CreateTensor<int32>({1}, {999}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})},
{ CreateTensor<int64_t>({0, 4}, {}),
CreateTensor<int32>({0}, {}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})}}}};
}
class ParameterizedGetNextTest
: public SparseTensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
GetNextTestCase<SparseTensorSliceDatasetParams>> {};
TEST_P(ParameterizedGetNextTest, GetNext) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
auto expected_outputs_it = test_case.expected_outputs.begin();
while (!end_of_sequence) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
if (!end_of_sequence) {
TF_EXPECT_OK(ExpectEqual(out_tensors[0], expected_outputs_it->at(0)));
TF_EXPECT_OK(ExpectEqual(out_tensors[1], expected_outputs_it->at(1)));
TF_EXPECT_OK(ExpectEqual(out_tensors[2], expected_outputs_it->at(2)));
expected_outputs_it++;
}
}
EXPECT_EQ(expected_outputs_it, test_case.expected_outputs.end());
}
INSTANTIATE_TEST_CASE_P(SparseTensorSliceDatasetOpTest,
ParameterizedGetNextTest,
::testing::ValuesIn(GetNextTestCases()));
TEST_F(SparseTensorSliceDatasetOpTest, DatasetTypeString) {
auto dataset_params = TwoDimsSparseTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(name_utils::OpName(kDatasetType)));
}
TEST_F(SparseTensorSliceDatasetOpTest, DatasetNodeName) {
auto dataset_params = TwoDimsSparseTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
std::vector<DatasetOutputDtypesTestCase<SparseTensorSliceDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_INT32, DT_INT64}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_DOUBLE, DT_INT64}},
{FourDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_STRING, DT_INT64}},
{FiveDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_INT32, DT_INT64}}};
}
DATASET_OUTPUT_DTYPES_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<SparseTensorSliceDatasetParams>>
DatasetOutputShapesTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 1}),
PartialTensorShape({1}),
PartialTensorShape({1})}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 2}),
PartialTensorShape({1}),
PartialTensorShape({2})}},
{FourDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 3}),
PartialTensorShape({1}),
PartialTensorShape({3})}},
{FiveDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 4}),
PartialTensorShape({1}),
PartialTensorShape({4})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<SparseTensorSliceDatasetParams>>
CardinalityTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
2},
{ThreeDimsSparseTensorSliceDatasetParams(),
2},
{FourDimsSparseTensorSliceDatasetParams(),
3},
{FiveDimsSparseTensorSliceDatasetParams(),
3}};
}
DATASET_CARDINALITY_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<SparseTensorSliceDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_INT32, DT_INT64}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_DOUBLE, DT_INT64}},
{FourDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_STRING, DT_INT64}},
{FiveDimsSparseTensorSliceDatasetParams(),
{DT_INT64, DT_INT32, DT_INT64}}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<SparseTensorSliceDatasetParams>>
IteratorOutputShapesTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 1}),
PartialTensorShape({1}),
PartialTensorShape({1})}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 2}),
PartialTensorShape({1}),
PartialTensorShape({2})}},
{FourDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 3}),
PartialTensorShape({1}),
PartialTensorShape({3})}},
{FiveDimsSparseTensorSliceDatasetParams(),
{PartialTensorShape({1, 4}),
PartialTensorShape({1}),
PartialTensorShape({4})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(SparseTensorSliceDatasetOpTest,
SparseTensorSliceDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(SparseTensorSliceDatasetOpTest, IteratorPrefix) {
auto dataset_params = TwoDimsSparseTensorSliceDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
kDatasetType, dataset_params.iterator_prefix())));
}
template <typename T>
struct IteratorSaveAndRestoreTestCase {
T dataset_params;
std::vector<int> breakpoints;
std::vector<std::vector<Tensor>> expected_outputs;
};
std::vector<IteratorSaveAndRestoreTestCase<SparseTensorSliceDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{TwoDimsSparseTensorSliceDatasetParams(),
{0, 1, 2},
{{ CreateTensor<int64_t>({1, 1}, {0}),
CreateTensor<int32>({1}, {888}),
CreateTensor<int64_t>({1}, {2})},
{ CreateTensor<int64_t>({1, 1}, {1}),
CreateTensor<int32>({1}, {999}),
CreateTensor<int64_t>({1}, {2})}}},
{ThreeDimsSparseTensorSliceDatasetParams(),
{0, 1, 2},
{{ CreateTensor<int64_t>({1, 2}, {0, 0}),
CreateTensor<double>({1}, {888.0}),
CreateTensor<int64_t>({2}, {2, 2})},
{{ CreateTensor<int64_t>({1, 2}, {1, 1})},
{ CreateTensor<double>({1}, {999.0})},
{ CreateTensor<int64_t>({2}, {2, 2})}}}},
{FourDimsSparseTensorSliceDatasetParams(),
{0, 1, 3},
{{ CreateTensor<int64_t>({1, 3}, {0, 0, 0}),
CreateTensor<tstring>({1}, {"a"}),
CreateTensor<int64_t>({3}, {2, 2, 2})},
{ CreateTensor<int64_t>({1, 3}, {1, 1, 1}),
CreateTensor<tstring>({1}, {"b"}),
CreateTensor<int64_t>({3}, {2, 2, 2})},
{ CreateTensor<int64_t>({0, 3}, {}),
CreateTensor<tstring>({0}, {}),
CreateTensor<int64_t>({3}, {2, 2, 2})}}},
{FiveDimsSparseTensorSliceDatasetParams(),
{0, 1, 2},
{{ CreateTensor<int64_t>({1, 4}, {0, 0, 0, 0}),
CreateTensor<int32>({1}, {888}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})},
{ CreateTensor<int64_t>({1, 4}, {1, 1, 1, 1}),
CreateTensor<int32>({1}, {999}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})},
{ CreateTensor<int64_t>({0, 4}, {}),
CreateTensor<int32>({0}, {}),
CreateTensor<int64_t>({4}, {2, 2, 2, 2})}}}};
}
class ParameterizedIteratorSaveAndRestoreTest
: public SparseTensorSliceDatasetOpTest,
public ::testing::WithParamInterface<
IteratorSaveAndRestoreTestCase<SparseTensorSliceDatasetParams>> {};
TEST_P(ParameterizedIteratorSaveAndRestoreTest, IteratorSaveAndRestore) {
auto test_case = GetParam();
TF_ASSERT_OK(Initialize(test_case.dataset_params));
std::unique_ptr<SerializationContext> serialization_ctx;
TF_ASSERT_OK(CreateSerializationContext(&serialization_ctx));
int cur_iteration = 0;
bool end_of_sequence = false;
int64_t num_slices = dataset_->Cardinality();
std::vector<Tensor> out_tensors;
for (int breakpoint : test_case.breakpoints) {
while (cur_iteration < breakpoint) {
TF_EXPECT_OK(iterator_->GetNext(iterator_ctx_.get(), &out_tensors,
&end_of_sequence));
cur_iteration++;
}
if (breakpoint == 0) {
EXPECT_FALSE(end_of_sequence);
} else if (breakpoint <= num_slices) {
for (int i = 0; i < out_tensors.size(); ++i) {
TF_EXPECT_OK(ExpectEqual(
out_tensors[0], test_case.expected_outputs[cur_iteration - 1][0]));
TF_EXPECT_OK(ExpectEqual(
out_tensors[1], test_case.expected_outputs[cur_iteration - 1][1]));
TF_EXPECT_OK(ExpectEqual(
out_tensors[2], test_case.expected_outputs[cur_iteration - 1][2]));
}
} else {
EXPECT_TRUE(end_of_sequence);
}
VariantTensorDataWriter writer;
TF_ASSERT_OK(iterator_->Save(serialization_ctx.get(), &writer));
std::vector<const VariantTensorData*> data;
writer.GetData(&data);
VariantTensorDataReader reader(data);
TF_EXPECT_OK(RestoreIterator(iterator_ctx_.get(), &reader,
test_case.dataset_params.iterator_prefix(),
*dataset_, &iterator_));
}
}
INSTANTIATE_TEST_CASE_P(SparseTensorSliceDatasetOpTest,
ParameterizedIteratorSaveAndRestoreTest,
::testing::ValuesIn(IteratorSaveAndRestoreTestCases()));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/sparse_tensor_slice_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8c0d2933-3526-4cfe-b392-f834ea6bc8ef | cpp | google/cel-cpp | regex_match_step | eval/eval/regex_match_step.cc | eval/eval/regex_match_step_test.cc | #include "eval/eval/regex_match_step.h"
#include <cstdint>
#include <cstdio>
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "common/casting.h"
#include "common/value.h"
#include "eval/eval/attribute_trail.h"
#include "eval/eval/direct_expression_step.h"
#include "eval/eval/evaluator_core.h"
#include "eval/eval/expression_step_base.h"
#include "internal/status_macros.h"
#include "re2/re2.h"
namespace google::api::expr::runtime {
namespace {
using ::cel::BoolValue;
using ::cel::Cast;
using ::cel::ErrorValue;
using ::cel::InstanceOf;
using ::cel::StringValue;
using ::cel::UnknownValue;
using ::cel::Value;
inline constexpr int kNumRegexMatchArguments = 1;
inline constexpr size_t kRegexMatchStepSubject = 0;
struct MatchesVisitor final {
const RE2& re;
bool operator()(const absl::Cord& value) const {
if (auto flat = value.TryFlat(); flat.has_value()) {
return RE2::PartialMatch(*flat, re);
}
return RE2::PartialMatch(static_cast<std::string>(value), re);
}
bool operator()(absl::string_view value) const {
return RE2::PartialMatch(value, re);
}
};
class RegexMatchStep final : public ExpressionStepBase {
public:
RegexMatchStep(int64_t expr_id, std::shared_ptr<const RE2> re2)
: ExpressionStepBase(expr_id, true),
re2_(std::move(re2)) {}
absl::Status Evaluate(ExecutionFrame* frame) const override {
if (!frame->value_stack().HasEnough(kNumRegexMatchArguments)) {
return absl::Status(absl::StatusCode::kInternal,
"Insufficient arguments supplied for regular "
"expression match");
}
auto input_args = frame->value_stack().GetSpan(kNumRegexMatchArguments);
const auto& subject = input_args[kRegexMatchStepSubject];
if (!subject->Is<cel::StringValue>()) {
return absl::Status(absl::StatusCode::kInternal,
"First argument for regular "
"expression match must be a string");
}
bool match = subject.GetString().NativeValue(MatchesVisitor{*re2_});
frame->value_stack().Pop(kNumRegexMatchArguments);
frame->value_stack().Push(frame->value_factory().CreateBoolValue(match));
return absl::OkStatus();
}
private:
const std::shared_ptr<const RE2> re2_;
};
class RegexMatchDirectStep final : public DirectExpressionStep {
public:
RegexMatchDirectStep(int64_t expr_id,
std::unique_ptr<DirectExpressionStep> subject,
std::shared_ptr<const RE2> re2)
: DirectExpressionStep(expr_id),
subject_(std::move(subject)),
re2_(std::move(re2)) {}
absl::Status Evaluate(ExecutionFrameBase& frame, Value& result,
AttributeTrail& attribute) const override {
AttributeTrail subject_attr;
CEL_RETURN_IF_ERROR(subject_->Evaluate(frame, result, subject_attr));
if (InstanceOf<ErrorValue>(result) ||
cel::InstanceOf<UnknownValue>(result)) {
return absl::OkStatus();
}
if (!InstanceOf<StringValue>(result)) {
return absl::Status(absl::StatusCode::kInternal,
"First argument for regular "
"expression match must be a string");
}
bool match = Cast<StringValue>(result).NativeValue(MatchesVisitor{*re2_});
result = BoolValue(match);
return absl::OkStatus();
}
private:
std::unique_ptr<DirectExpressionStep> subject_;
const std::shared_ptr<const RE2> re2_;
};
}
std::unique_ptr<DirectExpressionStep> CreateDirectRegexMatchStep(
int64_t expr_id, std::unique_ptr<DirectExpressionStep> subject,
std::shared_ptr<const RE2> re2) {
return std::make_unique<RegexMatchDirectStep>(expr_id, std::move(subject),
std::move(re2));
}
absl::StatusOr<std::unique_ptr<ExpressionStep>> CreateRegexMatchStep(
std::shared_ptr<const RE2> re2, int64_t expr_id) {
return std::make_unique<RegexMatchStep>(expr_id, std::move(re2));
}
} | #include "eval/eval/regex_match_step.h"
#include "google/api/expr/v1alpha1/checked.pb.h"
#include "google/api/expr/v1alpha1/syntax.pb.h"
#include "google/protobuf/arena.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "eval/public/activation.h"
#include "eval/public/builtin_func_registrar.h"
#include "eval/public/cel_expr_builder_factory.h"
#include "eval/public/cel_options.h"
#include "internal/testing.h"
#include "parser/parser.h"
namespace google::api::expr::runtime {
namespace {
using ::absl_testing::StatusIs;
using google::api::expr::v1alpha1::CheckedExpr;
using google::api::expr::v1alpha1::Reference;
using ::testing::Eq;
using ::testing::HasSubstr;
Reference MakeMatchesStringOverload() {
Reference reference;
reference.add_overload_id("matches_string");
return reference;
}
TEST(RegexMatchStep, Precompiled) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(auto parsed_expr, parser::Parse("foo.matches('hello')"));
CheckedExpr checked_expr;
*checked_expr.mutable_expr() = parsed_expr.expr();
*checked_expr.mutable_source_info() = parsed_expr.source_info();
checked_expr.mutable_reference_map()->insert(
{checked_expr.expr().id(), MakeMatchesStringOverload()});
InterpreterOptions options;
options.enable_regex_precompilation = true;
auto expr_builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(expr_builder->GetRegistry(), options));
ASSERT_OK_AND_ASSIGN(auto expr,
expr_builder->CreateExpression(&checked_expr));
activation.InsertValue("foo", CelValue::CreateStringView("hello world!"));
ASSERT_OK_AND_ASSIGN(auto result, expr->Evaluate(activation, &arena));
EXPECT_TRUE(result.IsBool());
EXPECT_TRUE(result.BoolOrDie());
}
TEST(RegexMatchStep, PrecompiledInvalidRegex) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(auto parsed_expr, parser::Parse("foo.matches('(')"));
CheckedExpr checked_expr;
*checked_expr.mutable_expr() = parsed_expr.expr();
*checked_expr.mutable_source_info() = parsed_expr.source_info();
checked_expr.mutable_reference_map()->insert(
{checked_expr.expr().id(), MakeMatchesStringOverload()});
InterpreterOptions options;
options.enable_regex_precompilation = true;
auto expr_builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(expr_builder->GetRegistry(), options));
EXPECT_THAT(expr_builder->CreateExpression(&checked_expr),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("invalid_argument")));
}
TEST(RegexMatchStep, PrecompiledInvalidProgramTooLarge) {
google::protobuf::Arena arena;
Activation activation;
ASSERT_OK_AND_ASSIGN(auto parsed_expr, parser::Parse("foo.matches('hello')"));
CheckedExpr checked_expr;
*checked_expr.mutable_expr() = parsed_expr.expr();
*checked_expr.mutable_source_info() = parsed_expr.source_info();
checked_expr.mutable_reference_map()->insert(
{checked_expr.expr().id(), MakeMatchesStringOverload()});
InterpreterOptions options;
options.regex_max_program_size = 1;
options.enable_regex_precompilation = true;
auto expr_builder = CreateCelExpressionBuilder(options);
ASSERT_OK(RegisterBuiltinFunctions(expr_builder->GetRegistry(), options));
EXPECT_THAT(expr_builder->CreateExpression(&checked_expr),
StatusIs(absl::StatusCode::kInvalidArgument,
Eq("exceeded RE2 max program size")));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/regex_match_step.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/eval/regex_match_step_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
b1461e3e-bfdf-4425-90cb-2c455821fae2 | cpp | tensorflow/tensorflow | simple_delegate | tensorflow/lite/delegates/utils/simple_delegate.cc | tensorflow/lite/delegates/utils/simple_delegate_test.cc | #include "tensorflow/lite/delegates/utils/simple_delegate.h"
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <memory>
#include <string>
#include <vector>
#include "tensorflow/lite/array.h"
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/delegates/utils.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/logger.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
namespace {
TfLiteRegistration GetDelegateKernelRegistration(
SimpleDelegateInterface* delegate) {
TfLiteRegistration kernel_registration{};
kernel_registration.profiling_string = nullptr;
kernel_registration.builtin_code = kTfLiteBuiltinDelegate;
kernel_registration.custom_name = delegate->Name();
kernel_registration.version = 1;
kernel_registration.free = [](TfLiteContext* context, void* buffer) -> void {
delete reinterpret_cast<SimpleDelegateKernelInterface*>(buffer);
};
kernel_registration.init = [](TfLiteContext* context, const char* buffer,
size_t length) -> void* {
const TfLiteDelegateParams* params =
reinterpret_cast<const TfLiteDelegateParams*>(buffer);
if (params == nullptr) {
TF_LITE_KERNEL_LOG(context, "NULL TfLiteDelegateParams passed.");
return nullptr;
}
auto* delegate =
reinterpret_cast<SimpleDelegateInterface*>(params->delegate->data_);
std::unique_ptr<SimpleDelegateKernelInterface> delegate_kernel(
delegate->CreateDelegateKernelInterface());
if (delegate_kernel->Init(context, params) != kTfLiteOk) {
return nullptr;
}
return delegate_kernel.release();
};
kernel_registration.prepare = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
if (node->user_data == nullptr) {
TF_LITE_KERNEL_LOG(context, "Delegate kernel was not initialized");
return kTfLiteError;
}
SimpleDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleDelegateKernelInterface*>(node->user_data);
return delegate_kernel->Prepare(context, node);
};
kernel_registration.invoke = [](TfLiteContext* context,
TfLiteNode* node) -> TfLiteStatus {
SimpleDelegateKernelInterface* delegate_kernel =
reinterpret_cast<SimpleDelegateKernelInterface*>(node->user_data);
TFLITE_DCHECK(delegate_kernel != nullptr);
return delegate_kernel->Eval(context, node);
};
return kernel_registration;
}
TfLiteStatus DelegatePrepare(TfLiteContext* context,
TfLiteDelegate* base_delegate) {
auto* delegate =
reinterpret_cast<SimpleDelegateInterface*>(base_delegate->data_);
auto delegate_options = delegate->DelegateOptions();
if (delegate_options.max_delegated_partitions <= 0)
delegate_options.max_delegated_partitions = std::numeric_limits<int>::max();
TF_LITE_ENSURE_STATUS(delegate->Initialize(context));
delegates::IsNodeSupportedFn node_supported_fn =
[=](TfLiteContext* context, TfLiteNode* node,
TfLiteRegistration* registration,
std::string* unsupported_details) -> bool {
return delegate->IsNodeSupportedByDelegate(registration, node, context);
};
delegates::GraphPartitionHelper helper(context, node_supported_fn);
TF_LITE_ENSURE_STATUS(helper.Partition(nullptr));
std::vector<int> supported_nodes = helper.GetNodesOfFirstNLargestPartitions(
delegate_options.max_delegated_partitions,
delegate_options.min_nodes_per_partition);
TFLITE_LOG_PROD_ONCE(tflite::TFLITE_LOG_INFO,
"%s delegate: %d nodes delegated out of %d nodes with "
"%d partitions.\n",
delegate->Name(), supported_nodes.size(),
helper.num_total_nodes(), helper.num_partitions());
TfLiteRegistration delegate_kernel_registration =
GetDelegateKernelRegistration(delegate);
return context->ReplaceNodeSubsetsWithDelegateKernels(
context, delegate_kernel_registration,
BuildTfLiteArray(supported_nodes).get(), base_delegate);
}
}
TfLiteDelegate* TfLiteDelegateFactory::CreateSimpleDelegate(
std::unique_ptr<SimpleDelegateInterface> simple_delegate, int64_t flag) {
if (simple_delegate == nullptr) {
return nullptr;
}
auto delegate = new TfLiteDelegate{};
delegate->Prepare = &DelegatePrepare;
delegate->flags = flag;
delegate->data_ = simple_delegate.release();
delegate->CopyFromBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) -> TfLiteStatus {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
return simple_delegate->CopyFromBufferHandle(context, buffer_handle,
tensor);
};
delegate->CopyToBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor) -> TfLiteStatus {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
return simple_delegate->CopyToBufferHandle(context, buffer_handle, tensor);
};
delegate->FreeBufferHandle = [](TfLiteContext* context,
TfLiteDelegate* delegate,
TfLiteBufferHandle* buffer_handle) {
auto* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
simple_delegate->FreeBufferHandle(context, buffer_handle);
};
return delegate;
}
void TfLiteDelegateFactory::DeleteSimpleDelegate(TfLiteDelegate* delegate) {
if (!delegate) return;
SimpleDelegateInterface* simple_delegate =
reinterpret_cast<SimpleDelegateInterface*>(delegate->data_);
delete simple_delegate;
delete delegate;
}
} | #include <stdlib.h>
#include <memory>
#include <utility>
#include <gtest/gtest.h>
#include "tensorflow/lite/builtin_ops.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/core/kernels/builtin_op_kernels.h"
#include "tensorflow/lite/delegates/utils/dummy_delegate/dummy_delegate.h"
#include "tensorflow/lite/interpreter.h"
namespace tflite {
namespace {
class TestDelegate : public ::testing::Test {
protected:
void SetUp() override {
interpreter_ = std::make_unique<Interpreter>();
interpreter_->AddTensors(5);
interpreter_->SetInputs({0, 1});
interpreter_->SetOutputs({3, 4});
TfLiteQuantizationParams quant;
interpreter_->SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(1, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(2, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(3, kTfLiteFloat32, "", {3},
quant);
interpreter_->SetTensorParametersReadWrite(4, kTfLiteFloat32, "", {3},
quant);
TfLiteRegistration* reg = ops::builtin::Register_ADD();
void* builtin_data_1 = malloc(sizeof(int));
void* builtin_data_2 = malloc(sizeof(int));
void* builtin_data_3 = malloc(sizeof(int));
interpreter_->AddNodeWithParameters({0, 0}, {2}, nullptr, 0, builtin_data_1,
reg);
interpreter_->AddNodeWithParameters({1, 1}, {3}, nullptr, 0, builtin_data_2,
reg);
interpreter_->AddNodeWithParameters({2, 1}, {4}, nullptr, 0, builtin_data_3,
reg);
}
void TearDown() override { interpreter_.reset(); }
protected:
std::unique_ptr<Interpreter> interpreter_;
};
TEST_F(TestDelegate, BasicDelegate) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
interpreter_->ModifyGraphWithDelegate(std::move(delegate));
ASSERT_EQ(interpreter_->execution_plan().size(), 1);
int node = interpreter_->execution_plan()[0];
const auto* node_and_reg = interpreter_->node_and_registration(node);
EXPECT_STREQ("DummyDelegate", node_and_reg->second.custom_name);
EXPECT_EQ(1, node_and_reg->second.version);
const TfLiteDelegateParams* params = static_cast<const TfLiteDelegateParams*>(
node_and_reg->first.builtin_data);
ASSERT_EQ(params->nodes_to_replace->size, 3);
EXPECT_EQ(params->nodes_to_replace->data[0], 0);
EXPECT_EQ(params->nodes_to_replace->data[1], 1);
EXPECT_EQ(params->nodes_to_replace->data[2], 2);
ASSERT_EQ(params->input_tensors->size, 2);
EXPECT_EQ(params->input_tensors->data[0], 0);
EXPECT_EQ(params->input_tensors->data[1], 1);
ASSERT_EQ(params->output_tensors->size, 2);
EXPECT_EQ(params->output_tensors->data[0], 3);
EXPECT_EQ(params->output_tensors->data[1], 4);
}
TEST_F(TestDelegate, NoNodesToDelegate) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinSub;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
interpreter_->ModifyGraphWithDelegate(std::move(delegate));
ASSERT_EQ(interpreter_->execution_plan().size(), 3);
}
TEST_F(TestDelegate, DelegateFailedPrepare) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_prepare = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteDelegateError,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
}
TEST_F(TestDelegate, DelegateFailedInvoke) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_invoke = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteOk,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
ASSERT_EQ(kTfLiteError, interpreter_->Invoke());
}
TEST_F(TestDelegate, DelegateFailedInit) {
DummyDelegateOptions options = TfLiteDummyDelegateOptionsDefault();
options.allowed_builtin_code = kTfLiteBuiltinAdd;
options.error_during_init = true;
auto delegate = TfLiteDummyDelegateCreateUnique(&options);
ASSERT_EQ(kTfLiteDelegateError,
interpreter_->ModifyGraphWithDelegate(std::move(delegate)));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/simple_delegate.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/delegates/utils/simple_delegate_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
1c360f4d-582c-463d-a680-29eb0516227f | cpp | google/arolla | iterator | arolla/util/iterator.h | arolla/util/iterator_test.cc | #ifndef AROLLA_UTIL_ITERATOR_H_
#define AROLLA_UTIL_ITERATOR_H_
#include <iterator>
namespace arolla {
template <typename Array>
class ConstArrayIterator {
public:
using iterator_category = std::random_access_iterator_tag;
using value_type = typename Array::value_type;
using pointer = const value_type*;
using reference = decltype(std::declval<const Array>()[0]);
using size_type = typename Array::size_type;
using difference_type = typename Array::difference_type;
ConstArrayIterator() : arr_(nullptr), pos_(0) {}
ConstArrayIterator(const Array* arr, size_type pos) : arr_(arr), pos_(pos) {}
reference operator*() const { return (*arr_)[pos_]; }
pointer operator->() const { return &**this; }
reference operator[](size_type n) const { return *(*this + n); }
ConstArrayIterator& operator+=(difference_type n) {
pos_ += n;
return *this;
}
ConstArrayIterator& operator-=(difference_type n) { return *this += -n; }
ConstArrayIterator& operator++() { return *this += 1; }
ConstArrayIterator& operator--() { return *this -= 1; }
ConstArrayIterator operator++(int) {
ConstArrayIterator t = *this;
++*this;
return t;
}
ConstArrayIterator operator--(int) {
ConstArrayIterator t = *this;
--*this;
return t;
}
friend ConstArrayIterator operator+(ConstArrayIterator v, difference_type n) {
return v += n;
}
friend ConstArrayIterator operator+(difference_type n, ConstArrayIterator v) {
return v += n;
}
friend ConstArrayIterator operator-(ConstArrayIterator v, difference_type n) {
return v -= n;
}
friend difference_type operator-(const ConstArrayIterator& a,
const ConstArrayIterator& b) {
return static_cast<difference_type>(a.pos_) -
static_cast<difference_type>(b.pos_);
}
friend bool operator==(ConstArrayIterator a, ConstArrayIterator b) {
return a.pos_ == b.pos_;
}
friend bool operator!=(ConstArrayIterator a, ConstArrayIterator b) {
return !(a == b);
}
friend bool operator<(ConstArrayIterator a, ConstArrayIterator b) {
return b - a > 0;
}
friend bool operator>(ConstArrayIterator a, ConstArrayIterator b) {
return b < a;
}
friend bool operator<=(ConstArrayIterator a, ConstArrayIterator b) {
return !(a > b);
}
friend bool operator>=(ConstArrayIterator a, ConstArrayIterator b) {
return !(a < b);
}
private:
const Array* arr_;
size_type pos_;
};
}
#endif | #include "arolla/util/iterator.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
using ::testing::ElementsAre;
namespace arolla {
namespace {
class FloatGeneratorArray {
public:
using value_type = float;
using size_type = int64_t;
using difference_type = int64_t;
using const_iterator = ConstArrayIterator<FloatGeneratorArray>;
explicit FloatGeneratorArray(int64_t size) : size_(size) {}
float operator[](int64_t i) const { return i * 10.0f; }
const_iterator begin() const { return const_iterator{this, 0}; }
const_iterator end() const { return const_iterator{this, size_}; }
private:
int64_t size_;
};
TEST(Iterator, IteratorOverFloatGeneratorArray) {
FloatGeneratorArray array(10);
auto iter1 = array.begin();
EXPECT_EQ(*(iter1++), 0.0f);
EXPECT_EQ(*iter1, 10.0f);
EXPECT_EQ(*(++iter1), 20.0f);
EXPECT_EQ(*(iter1--), 20.0f);
EXPECT_EQ(*(--iter1), 0.0f);
EXPECT_EQ(iter1[5], 50.0f);
EXPECT_EQ(iter1, array.begin());
EXPECT_NE(iter1, array.end());
EXPECT_LT(array.begin(), array.end());
EXPECT_GT(array.end(), array.begin());
EXPECT_EQ(*(iter1 += 9), 90.0f);
EXPECT_EQ(*(iter1 -= 2), 70.0f);
EXPECT_EQ(iter1, array.begin() + 7);
EXPECT_EQ(iter1, 7 + array.begin());
EXPECT_EQ(iter1, array.end() - 3);
EXPECT_EQ(iter1 - array.begin(), 7);
EXPECT_LE(array.begin() + 10, array.end());
EXPECT_GE(array.end(), array.begin() + 10);
EXPECT_THAT(array, ElementsAre(0.0f, 10.0f, 20.0f, 30.0f, 40.0f, 50.0f, 60.0f,
70.0f, 80.0f, 90.0f));
}
TEST(Iterator, Algorithms) {
FloatGeneratorArray array(5);
std::vector<float> copy1(array.begin(), array.end());
EXPECT_THAT(copy1, ElementsAre(0.f, 10.f, 20.f, 30.f, 40.f));
std::vector<float> copy2;
std::copy_if(array.begin(), array.end(), std::back_inserter(copy2),
[](float value) { return value > 12.f; });
EXPECT_THAT(copy2, ElementsAre(20.f, 30.f, 40.f));
std::vector<float> copy3;
for (float val : array) {
copy3.push_back(val);
}
EXPECT_THAT(copy3, ElementsAre(0.f, 10.f, 20.f, 30.f, 40.f));
}
TEST(Iterator, IteratorOverStdVector) {
using StringContainer = std::vector<std::string>;
StringContainer strings{"one", "two", "three"};
ConstArrayIterator<StringContainer> my_iter(&strings, 0);
EXPECT_EQ(*my_iter, "one");
EXPECT_EQ(my_iter[2], "three");
EXPECT_EQ(my_iter->size(), 3);
auto strings_copy = strings;
EXPECT_TRUE(std::equal(strings_copy.begin(), strings_copy.end(), my_iter));
EXPECT_THAT(strings, ElementsAre("one", "two", "three"));
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/iterator.h | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/util/iterator_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
2635ad77-cef5-4bfd-a7c1-071b30efa586 | cpp | google/quiche | quic_utils | quiche/quic/core/quic_utils.cc | quiche/quic/core/quic_utils_test.cc | #include "quiche/quic/core/quic_utils.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <limits>
#include <string>
#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "openssl/sha.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/core/quic_versions.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_mem_slice.h"
#include "quiche/common/quiche_endian.h"
namespace quic {
namespace {
#if defined(__x86_64__) && \
((defined(__GNUC__) && \
(__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8))) || \
defined(__clang__))
#define QUIC_UTIL_HAS_UINT128 1
#endif
#ifdef QUIC_UTIL_HAS_UINT128
absl::uint128 IncrementalHashFast(absl::uint128 uhash, absl::string_view data) {
static const absl::uint128 kPrime =
(static_cast<absl::uint128>(16777216) << 64) + 315;
auto hi = absl::Uint128High64(uhash);
auto lo = absl::Uint128Low64(uhash);
absl::uint128 xhash = (static_cast<absl::uint128>(hi) << 64) + lo;
const uint8_t* octets = reinterpret_cast<const uint8_t*>(data.data());
for (size_t i = 0; i < data.length(); ++i) {
xhash = (xhash ^ static_cast<uint32_t>(octets[i])) * kPrime;
}
return absl::MakeUint128(absl::Uint128High64(xhash),
absl::Uint128Low64(xhash));
}
#endif
#ifndef QUIC_UTIL_HAS_UINT128
absl::uint128 IncrementalHashSlow(absl::uint128 hash, absl::string_view data) {
static const absl::uint128 kPrime = absl::MakeUint128(16777216, 315);
const uint8_t* octets = reinterpret_cast<const uint8_t*>(data.data());
for (size_t i = 0; i < data.length(); ++i) {
hash = hash ^ absl::MakeUint128(0, octets[i]);
hash = hash * kPrime;
}
return hash;
}
#endif
absl::uint128 IncrementalHash(absl::uint128 hash, absl::string_view data) {
#ifdef QUIC_UTIL_HAS_UINT128
return IncrementalHashFast(hash, data);
#else
return IncrementalHashSlow(hash, data);
#endif
}
}
uint64_t QuicUtils::FNV1a_64_Hash(absl::string_view data) {
static const uint64_t kOffset = UINT64_C(14695981039346656037);
static const uint64_t kPrime = UINT64_C(1099511628211);
const uint8_t* octets = reinterpret_cast<const uint8_t*>(data.data());
uint64_t hash = kOffset;
for (size_t i = 0; i < data.length(); ++i) {
hash = hash ^ octets[i];
hash = hash * kPrime;
}
return hash;
}
absl::uint128 QuicUtils::FNV1a_128_Hash(absl::string_view data) {
return FNV1a_128_Hash_Three(data, absl::string_view(), absl::string_view());
}
absl::uint128 QuicUtils::FNV1a_128_Hash_Two(absl::string_view data1,
absl::string_view data2) {
return FNV1a_128_Hash_Three(data1, data2, absl::string_view());
}
absl::uint128 QuicUtils::FNV1a_128_Hash_Three(absl::string_view data1,
absl::string_view data2,
absl::string_view data3) {
const absl::uint128 kOffset = absl::MakeUint128(
UINT64_C(7809847782465536322), UINT64_C(7113472399480571277));
absl::uint128 hash = IncrementalHash(kOffset, data1);
if (data2.empty()) {
return hash;
}
hash = IncrementalHash(hash, data2);
if (data3.empty()) {
return hash;
}
return IncrementalHash(hash, data3);
}
void QuicUtils::SerializeUint128Short(absl::uint128 v, uint8_t* out) {
const uint64_t lo = absl::Uint128Low64(v);
const uint64_t hi = absl::Uint128High64(v);
memcpy(out, &lo, sizeof(lo));
memcpy(out + sizeof(lo), &hi, sizeof(hi) / 2);
}
#define RETURN_STRING_LITERAL(x) \
case x: \
return #x;
std::string QuicUtils::AddressChangeTypeToString(AddressChangeType type) {
switch (type) {
RETURN_STRING_LITERAL(NO_CHANGE);
RETURN_STRING_LITERAL(PORT_CHANGE);
RETURN_STRING_LITERAL(IPV4_SUBNET_CHANGE);
RETURN_STRING_LITERAL(IPV4_TO_IPV6_CHANGE);
RETURN_STRING_LITERAL(IPV6_TO_IPV4_CHANGE);
RETURN_STRING_LITERAL(IPV6_TO_IPV6_CHANGE);
RETURN_STRING_LITERAL(IPV4_TO_IPV4_CHANGE);
}
return "INVALID_ADDRESS_CHANGE_TYPE";
}
const char* QuicUtils::SentPacketStateToString(SentPacketState state) {
switch (state) {
RETURN_STRING_LITERAL(OUTSTANDING);
RETURN_STRING_LITERAL(NEVER_SENT);
RETURN_STRING_LITERAL(ACKED);
RETURN_STRING_LITERAL(UNACKABLE);
RETURN_STRING_LITERAL(NEUTERED);
RETURN_STRING_LITERAL(HANDSHAKE_RETRANSMITTED);
RETURN_STRING_LITERAL(LOST);
RETURN_STRING_LITERAL(PTO_RETRANSMITTED);
RETURN_STRING_LITERAL(NOT_CONTRIBUTING_RTT);
}
return "INVALID_SENT_PACKET_STATE";
}
const char* QuicUtils::QuicLongHeaderTypetoString(QuicLongHeaderType type) {
switch (type) {
RETURN_STRING_LITERAL(VERSION_NEGOTIATION);
RETURN_STRING_LITERAL(INITIAL);
RETURN_STRING_LITERAL(RETRY);
RETURN_STRING_LITERAL(HANDSHAKE);
RETURN_STRING_LITERAL(ZERO_RTT_PROTECTED);
default:
return "INVALID_PACKET_TYPE";
}
}
const char* QuicUtils::AckResultToString(AckResult result) {
switch (result) {
RETURN_STRING_LITERAL(PACKETS_NEWLY_ACKED);
RETURN_STRING_LITERAL(NO_PACKETS_NEWLY_ACKED);
RETURN_STRING_LITERAL(UNSENT_PACKETS_ACKED);
RETURN_STRING_LITERAL(UNACKABLE_PACKETS_ACKED);
RETURN_STRING_LITERAL(PACKETS_ACKED_IN_WRONG_PACKET_NUMBER_SPACE);
}
return "INVALID_ACK_RESULT";
}
AddressChangeType QuicUtils::DetermineAddressChangeType(
const QuicSocketAddress& old_address,
const QuicSocketAddress& new_address) {
if (!old_address.IsInitialized() || !new_address.IsInitialized() ||
old_address == new_address) {
return NO_CHANGE;
}
if (old_address.host() == new_address.host()) {
return PORT_CHANGE;
}
bool old_ip_is_ipv4 = old_address.host().IsIPv4() ? true : false;
bool migrating_ip_is_ipv4 = new_address.host().IsIPv4() ? true : false;
if (old_ip_is_ipv4 && !migrating_ip_is_ipv4) {
return IPV4_TO_IPV6_CHANGE;
}
if (!old_ip_is_ipv4) {
return migrating_ip_is_ipv4 ? IPV6_TO_IPV4_CHANGE : IPV6_TO_IPV6_CHANGE;
}
const int kSubnetMaskLength = 24;
if (old_address.host().InSameSubnet(new_address.host(), kSubnetMaskLength)) {
return IPV4_SUBNET_CHANGE;
}
return IPV4_TO_IPV4_CHANGE;
}
bool QuicUtils::IsAckable(SentPacketState state) {
return state != NEVER_SENT && state != ACKED && state != UNACKABLE;
}
bool QuicUtils::IsRetransmittableFrame(QuicFrameType type) {
switch (type) {
case ACK_FRAME:
case PADDING_FRAME:
case STOP_WAITING_FRAME:
case MTU_DISCOVERY_FRAME:
case PATH_CHALLENGE_FRAME:
case PATH_RESPONSE_FRAME:
return false;
default:
return true;
}
}
bool QuicUtils::IsHandshakeFrame(const QuicFrame& frame,
QuicTransportVersion transport_version) {
if (!QuicVersionUsesCryptoFrames(transport_version)) {
return frame.type == STREAM_FRAME &&
frame.stream_frame.stream_id == GetCryptoStreamId(transport_version);
} else {
return frame.type == CRYPTO_FRAME;
}
}
bool QuicUtils::ContainsFrameType(const QuicFrames& frames,
QuicFrameType type) {
for (const QuicFrame& frame : frames) {
if (frame.type == type) {
return true;
}
}
return false;
}
SentPacketState QuicUtils::RetransmissionTypeToPacketState(
TransmissionType retransmission_type) {
switch (retransmission_type) {
case ALL_ZERO_RTT_RETRANSMISSION:
return UNACKABLE;
case HANDSHAKE_RETRANSMISSION:
return HANDSHAKE_RETRANSMITTED;
case LOSS_RETRANSMISSION:
return LOST;
case PTO_RETRANSMISSION:
return PTO_RETRANSMITTED;
case PATH_RETRANSMISSION:
return NOT_CONTRIBUTING_RTT;
case ALL_INITIAL_RETRANSMISSION:
return UNACKABLE;
default:
QUIC_BUG(quic_bug_10839_2)
<< retransmission_type << " is not a retransmission_type";
return UNACKABLE;
}
}
bool QuicUtils::IsIetfPacketHeader(uint8_t first_byte) {
return (first_byte & FLAGS_LONG_HEADER) || (first_byte & FLAGS_FIXED_BIT) ||
!(first_byte & FLAGS_DEMULTIPLEXING_BIT);
}
bool QuicUtils::IsIetfPacketShortHeader(uint8_t first_byte) {
return IsIetfPacketHeader(first_byte) && !(first_byte & FLAGS_LONG_HEADER);
}
QuicStreamId QuicUtils::GetInvalidStreamId(QuicTransportVersion version) {
return VersionHasIetfQuicFrames(version)
? std::numeric_limits<QuicStreamId>::max()
: 0;
}
QuicStreamId QuicUtils::GetCryptoStreamId(QuicTransportVersion version) {
QUIC_BUG_IF(quic_bug_12982_1, QuicVersionUsesCryptoFrames(version))
<< "CRYPTO data aren't in stream frames; they have no stream ID.";
return QuicVersionUsesCryptoFrames(version) ? GetInvalidStreamId(version) : 1;
}
bool QuicUtils::IsCryptoStreamId(QuicTransportVersion version,
QuicStreamId stream_id) {
if (QuicVersionUsesCryptoFrames(version)) {
return false;
}
return stream_id == GetCryptoStreamId(version);
}
QuicStreamId QuicUtils::GetHeadersStreamId(QuicTransportVersion version) {
QUICHE_DCHECK(!VersionUsesHttp3(version));
return GetFirstBidirectionalStreamId(version, Perspective::IS_CLIENT);
}
bool QuicUtils::IsClientInitiatedStreamId(QuicTransportVersion version,
QuicStreamId id) {
if (id == GetInvalidStreamId(version)) {
return false;
}
return VersionHasIetfQuicFrames(version) ? id % 2 == 0 : id % 2 != 0;
}
bool QuicUtils::IsServerInitiatedStreamId(QuicTransportVersion version,
QuicStreamId id) {
if (id == GetInvalidStreamId(version)) {
return false;
}
return VersionHasIetfQuicFrames(version) ? id % 2 != 0 : id % 2 == 0;
}
bool QuicUtils::IsOutgoingStreamId(ParsedQuicVersion version, QuicStreamId id,
Perspective perspective) {
const bool perspective_is_server = perspective == Perspective::IS_SERVER;
const bool stream_is_server =
QuicUtils::IsServerInitiatedStreamId(version.transport_version, id);
return perspective_is_server == stream_is_server;
}
bool QuicUtils::IsBidirectionalStreamId(QuicStreamId id,
ParsedQuicVersion version) {
QUICHE_DCHECK(version.HasIetfQuicFrames());
return id % 4 < 2;
}
StreamType QuicUtils::GetStreamType(QuicStreamId id, Perspective perspective,
bool peer_initiated,
ParsedQuicVersion version) {
QUICHE_DCHECK(version.HasIetfQuicFrames());
if (IsBidirectionalStreamId(id, version)) {
return BIDIRECTIONAL;
}
if (peer_initiated) {
if (perspective == Perspective::IS_SERVER) {
QUICHE_DCHECK_EQ(2u, id % 4);
} else {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective);
QUICHE_DCHECK_EQ(3u, id % 4);
}
return READ_UNIDIRECTIONAL;
}
if (perspective == Perspective::IS_SERVER) {
QUICHE_DCHECK_EQ(3u, id % 4);
} else {
QUICHE_DCHECK_EQ(Perspective::IS_CLIENT, perspective);
QUICHE_DCHECK_EQ(2u, id % 4);
}
return WRITE_UNIDIRECTIONAL;
}
QuicStreamId QuicUtils::StreamIdDelta(QuicTransportVersion version) {
return VersionHasIetfQuicFrames(version) ? 4 : 2;
}
QuicStreamId QuicUtils::GetFirstBidirectionalStreamId(
QuicTransportVersion version, Perspective perspective) {
if (VersionHasIetfQuicFrames(version)) {
return perspective == Perspective::IS_CLIENT ? 0 : 1;
} else if (QuicVersionUsesCryptoFrames(version)) {
return perspective == Perspective::IS_CLIENT ? 1 : 2;
}
return perspective == Perspective::IS_CLIENT ? 3 : 2;
}
QuicStreamId QuicUtils::GetFirstUnidirectionalStreamId(
QuicTransportVersion version, Perspective perspective) {
if (VersionHasIetfQuicFrames(version)) {
return perspective == Perspective::IS_CLIENT ? 2 : 3;
} else if (QuicVersionUsesCryptoFrames(version)) {
return perspective == Perspective::IS_CLIENT ? 1 : 2;
}
return perspective == Perspective::IS_CLIENT ? 3 : 2;
}
QuicStreamId QuicUtils::GetMaxClientInitiatedBidirectionalStreamId(
QuicTransportVersion version) {
if (VersionHasIetfQuicFrames(version)) {
return std::numeric_limits<QuicStreamId>::max() - 3;
}
return std::numeric_limits<QuicStreamId>::max();
}
QuicConnectionId QuicUtils::CreateRandomConnectionId() {
return CreateRandomConnectionId(kQuicDefaultConnectionIdLength,
QuicRandom::GetInstance());
}
QuicConnectionId QuicUtils::CreateRandomConnectionId(QuicRandom* random) {
return CreateRandomConnectionId(kQuicDefaultConnectionIdLength, random);
}
QuicConnectionId QuicUtils::CreateRandomConnectionId(
uint8_t connection_id_length) {
return CreateRandomConnectionId(connection_id_length,
QuicRandom::GetInstance());
}
QuicConnectionId QuicUtils::CreateRandomConnectionId(
uint8_t connection_id_length, QuicRandom* random) {
QuicConnectionId connection_id;
connection_id.set_length(connection_id_length);
if (connection_id.length() > 0) {
random->RandBytes(connection_id.mutable_data(), connection_id.length());
}
return connection_id;
}
QuicConnectionId QuicUtils::CreateZeroConnectionId(
QuicTransportVersion version) {
if (!VersionAllowsVariableLengthConnectionIds(version)) {
char connection_id_bytes[8] = {0, 0, 0, 0, 0, 0, 0, 0};
return QuicConnectionId(static_cast<char*>(connection_id_bytes),
ABSL_ARRAYSIZE(connection_id_bytes));
}
return EmptyQuicConnectionId();
}
bool QuicUtils::IsConnectionIdLengthValidForVersion(
size_t connection_id_length, QuicTransportVersion transport_version) {
if (connection_id_length >
static_cast<size_t>(std::numeric_limits<uint8_t>::max())) {
return false;
}
if (transport_version == QUIC_VERSION_UNSUPPORTED ||
transport_version == QUIC_VERSION_RESERVED_FOR_NEGOTIATION) {
return true;
}
const uint8_t connection_id_length8 =
static_cast<uint8_t>(connection_id_length);
if (!VersionAllowsVariableLengthConnectionIds(transport_version)) {
return connection_id_length8 == kQuicDefaultConnectionIdLength;
}
return connection_id_length8 <= kQuicMaxConnectionIdWithLengthPrefixLength;
}
bool QuicUtils::IsConnectionIdValidForVersion(
QuicConnectionId connection_id, QuicTransportVersion transport_version) {
return IsConnectionIdLengthValidForVersion(connection_id.length(),
transport_version);
}
StatelessResetToken QuicUtils::GenerateStatelessResetToken(
QuicConnectionId connection_id) {
static_assert(sizeof(absl::uint128) == sizeof(StatelessResetToken),
"bad size");
static_assert(alignof(absl::uint128) >= alignof(StatelessResetToken),
"bad alignment");
absl::uint128 hash = FNV1a_128_Hash(
absl::string_view(connection_id.data(), connection_id.length()));
return *reinterpret_cast<StatelessResetToken*>(&hash);
}
QuicStreamCount QuicUtils::GetMaxStreamCount() {
return (kMaxQuicStreamCount >> 2) + 1;
}
PacketNumberSpace QuicUtils::GetPacketNumberSpace(
EncryptionLevel encryption_level) {
switch (encryption_level) {
case ENCRYPTION_INITIAL:
return INITIAL_DATA;
case ENCRYPTION_HANDSHAKE:
return HANDSHAKE_DATA;
case ENCRYPTION_ZERO_RTT:
case ENCRYPTION_FORWARD_SECURE:
return APPLICATION_DATA;
default:
QUIC_BUG(quic_bug_10839_3)
<< "Try to get packet number space of encryption level: "
<< encryption_level;
return NUM_PACKET_NUMBER_SPACES;
}
}
EncryptionLevel QuicUtils::GetEncryptionLevelToSendAckofSpace(
PacketNumberSpace packet_number_space) {
switch (packet_number_space) {
case INITIAL_DATA:
return ENCRYPTION_INITIAL;
case HANDSHAKE_DATA:
return ENCRYPTION_HANDSHAKE;
case APPLICATION_DATA:
return ENCRYPTION_FORWARD_SECURE;
default:
QUICHE_DCHECK(false);
return NUM_ENCRYPTION_LEVELS;
}
}
bool QuicUtils::IsProbingFrame(QuicFrameType type) {
switch (type) {
case PATH_CHALLENGE_FRAME:
case PATH_RESPONSE_FRAME:
case NEW_CONNECTION_ID_FRAME:
case PADDING_FRAME:
return true;
default:
return false;
}
}
bool QuicUtils::IsAckElicitingFrame(QuicFrameType type) {
switch (type) {
case PADDING_FRAME:
case STOP_WAITING_FRAME:
case ACK_FRAME:
case CONNECTION_CLOSE_FRAME:
return false;
default:
return true;
}
}
bool QuicUtils::AreStatelessResetTokensEqual(
const StatelessResetToken& token1, const StatelessResetToken& token2) {
char byte = 0;
for (size_t i = 0; i < kStatelessResetTokenLength; i++) {
byte |= (token1[i] ^ token2[i]);
}
return byte == 0;
}
bool IsValidWebTransportSessionId(WebTransportSessionId id,
ParsedQuicVersion version) {
QUICHE_DCHECK(version.UsesHttp3());
return (id <= std::numeric_limits<QuicStreamId>::max()) &&
QuicUtils::IsBidirectionalStreamId(id, version) &&
QuicUtils::IsClientInitiatedStreamId(version.transport_version, id);
}
QuicByteCount MemSliceSpanTotalSize(absl::Span<quiche::QuicheMemSlice> span) {
QuicByteCount total = 0;
for (const quiche::QuicheMemSlice& slice : span) {
total += slice.length();
}
return total;
}
absl::string_view PosixBasename(absl::string_view path) {
constexpr char kPathSeparator = '/';
size_t pos = path.find_last_of(kPathSeparator);
if (pos == absl::string_view::npos) {
return path;
}
if (pos == 0) {
return absl::ClippedSubstr(path, 1);
}
return absl::ClippedSubstr(path, pos + 1);
}
std::string RawSha256(absl::string_view input) {
std::string raw_hash;
raw_hash.resize(SHA256_DIGEST_LENGTH);
SHA256(reinterpret_cast<const uint8_t*>(input.data()), input.size(),
reinterpret_cast<uint8_t*>(&raw_hash[0]));
return raw_hash;
}
#undef RETURN_STRING_LITERAL
} | #include "quiche/quic/core/quic_utils.h"
#include <string>
#include <vector>
#include "absl/base/macros.h"
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_connection_id.h"
#include "quiche/quic/core/quic_types.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
namespace quic {
namespace test {
namespace {
class QuicUtilsTest : public QuicTest {};
TEST_F(QuicUtilsTest, DetermineAddressChangeType) {
const std::string kIPv4String1 = "1.2.3.4";
const std::string kIPv4String2 = "1.2.3.5";
const std::string kIPv4String3 = "1.1.3.5";
const std::string kIPv6String1 = "2001:700:300:1800::f";
const std::string kIPv6String2 = "2001:700:300:1800:1:1:1:f";
QuicSocketAddress old_address;
QuicSocketAddress new_address;
QuicIpAddress address;
EXPECT_EQ(NO_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
ASSERT_TRUE(address.FromString(kIPv4String1));
old_address = QuicSocketAddress(address, 1234);
EXPECT_EQ(NO_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
new_address = QuicSocketAddress(address, 1234);
EXPECT_EQ(NO_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
new_address = QuicSocketAddress(address, 5678);
EXPECT_EQ(PORT_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
ASSERT_TRUE(address.FromString(kIPv6String1));
old_address = QuicSocketAddress(address, 1234);
new_address = QuicSocketAddress(address, 5678);
EXPECT_EQ(PORT_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
ASSERT_TRUE(address.FromString(kIPv4String1));
old_address = QuicSocketAddress(address, 1234);
ASSERT_TRUE(address.FromString(kIPv6String1));
new_address = QuicSocketAddress(address, 1234);
EXPECT_EQ(IPV4_TO_IPV6_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
old_address = QuicSocketAddress(address, 1234);
ASSERT_TRUE(address.FromString(kIPv4String1));
new_address = QuicSocketAddress(address, 1234);
EXPECT_EQ(IPV6_TO_IPV4_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
ASSERT_TRUE(address.FromString(kIPv6String2));
new_address = QuicSocketAddress(address, 1234);
EXPECT_EQ(IPV6_TO_IPV6_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
ASSERT_TRUE(address.FromString(kIPv4String1));
old_address = QuicSocketAddress(address, 1234);
ASSERT_TRUE(address.FromString(kIPv4String2));
new_address = QuicSocketAddress(address, 1234);
EXPECT_EQ(IPV4_SUBNET_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
ASSERT_TRUE(address.FromString(kIPv4String3));
new_address = QuicSocketAddress(address, 1234);
EXPECT_EQ(IPV4_TO_IPV4_CHANGE,
QuicUtils::DetermineAddressChangeType(old_address, new_address));
}
absl::uint128 IncrementalHashReference(const void* data, size_t len) {
absl::uint128 hash = absl::MakeUint128(UINT64_C(7809847782465536322),
UINT64_C(7113472399480571277));
const absl::uint128 kPrime = absl::MakeUint128(16777216, 315);
const uint8_t* octets = reinterpret_cast<const uint8_t*>(data);
for (size_t i = 0; i < len; ++i) {
hash = hash ^ absl::MakeUint128(0, octets[i]);
hash = hash * kPrime;
}
return hash;
}
TEST_F(QuicUtilsTest, ReferenceTest) {
std::vector<uint8_t> data(32);
for (size_t i = 0; i < data.size(); ++i) {
data[i] = i % 255;
}
EXPECT_EQ(IncrementalHashReference(data.data(), data.size()),
QuicUtils::FNV1a_128_Hash(absl::string_view(
reinterpret_cast<const char*>(data.data()), data.size())));
}
TEST_F(QuicUtilsTest, IsUnackable) {
for (size_t i = FIRST_PACKET_STATE; i <= LAST_PACKET_STATE; ++i) {
if (i == NEVER_SENT || i == ACKED || i == UNACKABLE) {
EXPECT_FALSE(QuicUtils::IsAckable(static_cast<SentPacketState>(i)));
} else {
EXPECT_TRUE(QuicUtils::IsAckable(static_cast<SentPacketState>(i)));
}
}
}
TEST_F(QuicUtilsTest, RetransmissionTypeToPacketState) {
for (size_t i = FIRST_TRANSMISSION_TYPE; i <= LAST_TRANSMISSION_TYPE; ++i) {
if (i == NOT_RETRANSMISSION) {
continue;
}
SentPacketState state = QuicUtils::RetransmissionTypeToPacketState(
static_cast<TransmissionType>(i));
if (i == HANDSHAKE_RETRANSMISSION) {
EXPECT_EQ(HANDSHAKE_RETRANSMITTED, state);
} else if (i == LOSS_RETRANSMISSION) {
EXPECT_EQ(LOST, state);
} else if (i == ALL_ZERO_RTT_RETRANSMISSION) {
EXPECT_EQ(UNACKABLE, state);
} else if (i == PTO_RETRANSMISSION) {
EXPECT_EQ(PTO_RETRANSMITTED, state);
} else if (i == PATH_RETRANSMISSION) {
EXPECT_EQ(NOT_CONTRIBUTING_RTT, state);
} else if (i == ALL_INITIAL_RETRANSMISSION) {
EXPECT_EQ(UNACKABLE, state);
} else {
QUICHE_DCHECK(false)
<< "No corresponding packet state according to transmission type: "
<< i;
}
}
}
TEST_F(QuicUtilsTest, IsIetfPacketHeader) {
uint8_t first_byte = 0;
EXPECT_TRUE(QuicUtils::IsIetfPacketHeader(first_byte));
EXPECT_TRUE(QuicUtils::IsIetfPacketShortHeader(first_byte));
first_byte |= (FLAGS_LONG_HEADER | FLAGS_DEMULTIPLEXING_BIT);
EXPECT_TRUE(QuicUtils::IsIetfPacketHeader(first_byte));
EXPECT_FALSE(QuicUtils::IsIetfPacketShortHeader(first_byte));
first_byte = 0;
first_byte |= FLAGS_LONG_HEADER;
EXPECT_TRUE(QuicUtils::IsIetfPacketHeader(first_byte));
EXPECT_FALSE(QuicUtils::IsIetfPacketShortHeader(first_byte));
first_byte = 0;
first_byte |= PACKET_PUBLIC_FLAGS_8BYTE_CONNECTION_ID;
EXPECT_FALSE(QuicUtils::IsIetfPacketHeader(first_byte));
EXPECT_FALSE(QuicUtils::IsIetfPacketShortHeader(first_byte));
}
TEST_F(QuicUtilsTest, RandomConnectionId) {
MockRandom random(33);
QuicConnectionId connection_id = QuicUtils::CreateRandomConnectionId(&random);
EXPECT_EQ(connection_id.length(), sizeof(uint64_t));
char connection_id_bytes[sizeof(uint64_t)];
random.RandBytes(connection_id_bytes, ABSL_ARRAYSIZE(connection_id_bytes));
EXPECT_EQ(connection_id,
QuicConnectionId(static_cast<char*>(connection_id_bytes),
ABSL_ARRAYSIZE(connection_id_bytes)));
EXPECT_NE(connection_id, EmptyQuicConnectionId());
EXPECT_NE(connection_id, TestConnectionId());
EXPECT_NE(connection_id, TestConnectionId(1));
EXPECT_NE(connection_id, TestConnectionIdNineBytesLong(1));
EXPECT_EQ(QuicUtils::CreateRandomConnectionId().length(),
kQuicDefaultConnectionIdLength);
}
TEST_F(QuicUtilsTest, RandomConnectionIdVariableLength) {
MockRandom random(1337);
const uint8_t connection_id_length = 9;
QuicConnectionId connection_id =
QuicUtils::CreateRandomConnectionId(connection_id_length, &random);
EXPECT_EQ(connection_id.length(), connection_id_length);
char connection_id_bytes[connection_id_length];
random.RandBytes(connection_id_bytes, ABSL_ARRAYSIZE(connection_id_bytes));
EXPECT_EQ(connection_id,
QuicConnectionId(static_cast<char*>(connection_id_bytes),
ABSL_ARRAYSIZE(connection_id_bytes)));
EXPECT_NE(connection_id, EmptyQuicConnectionId());
EXPECT_NE(connection_id, TestConnectionId());
EXPECT_NE(connection_id, TestConnectionId(1));
EXPECT_NE(connection_id, TestConnectionIdNineBytesLong(1));
EXPECT_EQ(QuicUtils::CreateRandomConnectionId(connection_id_length).length(),
connection_id_length);
}
TEST_F(QuicUtilsTest, VariableLengthConnectionId) {
EXPECT_FALSE(VersionAllowsVariableLengthConnectionIds(QUIC_VERSION_46));
EXPECT_TRUE(QuicUtils::IsConnectionIdValidForVersion(
QuicUtils::CreateZeroConnectionId(QUIC_VERSION_46), QUIC_VERSION_46));
EXPECT_NE(QuicUtils::CreateZeroConnectionId(QUIC_VERSION_46),
EmptyQuicConnectionId());
EXPECT_FALSE(QuicUtils::IsConnectionIdValidForVersion(EmptyQuicConnectionId(),
QUIC_VERSION_46));
}
TEST_F(QuicUtilsTest, StatelessResetToken) {
QuicConnectionId connection_id1a = test::TestConnectionId(1);
QuicConnectionId connection_id1b = test::TestConnectionId(1);
QuicConnectionId connection_id2 = test::TestConnectionId(2);
StatelessResetToken token1a =
QuicUtils::GenerateStatelessResetToken(connection_id1a);
StatelessResetToken token1b =
QuicUtils::GenerateStatelessResetToken(connection_id1b);
StatelessResetToken token2 =
QuicUtils::GenerateStatelessResetToken(connection_id2);
EXPECT_EQ(token1a, token1b);
EXPECT_NE(token1a, token2);
EXPECT_TRUE(QuicUtils::AreStatelessResetTokensEqual(token1a, token1b));
EXPECT_FALSE(QuicUtils::AreStatelessResetTokensEqual(token1a, token2));
}
TEST_F(QuicUtilsTest, EcnCodepointToString) {
EXPECT_EQ(EcnCodepointToString(ECN_NOT_ECT), "Not-ECT");
EXPECT_EQ(EcnCodepointToString(ECN_ECT0), "ECT(0)");
EXPECT_EQ(EcnCodepointToString(ECN_ECT1), "ECT(1)");
EXPECT_EQ(EcnCodepointToString(ECN_CE), "CE");
}
TEST_F(QuicUtilsTest, PosixBasename) {
EXPECT_EQ("", PosixBasename("/hello/"));
EXPECT_EQ("hello", PosixBasename("/hello"));
EXPECT_EQ("world", PosixBasename("hello/world"));
EXPECT_EQ("", PosixBasename("hello/"));
EXPECT_EQ("world", PosixBasename("world"));
EXPECT_EQ("", PosixBasename("/"));
EXPECT_EQ("", PosixBasename(""));
EXPECT_EQ("C:\\hello", PosixBasename("C:\\hello"));
EXPECT_EQ("world", PosixBasename("C:\\hello/world"));
}
enum class TestEnumClassBit : uint8_t {
BIT_ZERO = 0,
BIT_ONE,
BIT_TWO,
};
enum TestEnumBit {
TEST_BIT_0 = 0,
TEST_BIT_1,
TEST_BIT_2,
};
TEST(QuicBitMaskTest, EnumClass) {
BitMask<TestEnumClassBit> mask(
{TestEnumClassBit::BIT_ZERO, TestEnumClassBit::BIT_TWO});
EXPECT_TRUE(mask.IsSet(TestEnumClassBit::BIT_ZERO));
EXPECT_FALSE(mask.IsSet(TestEnumClassBit::BIT_ONE));
EXPECT_TRUE(mask.IsSet(TestEnumClassBit::BIT_TWO));
mask.ClearAll();
EXPECT_FALSE(mask.IsSet(TestEnumClassBit::BIT_ZERO));
EXPECT_FALSE(mask.IsSet(TestEnumClassBit::BIT_ONE));
EXPECT_FALSE(mask.IsSet(TestEnumClassBit::BIT_TWO));
}
TEST(QuicBitMaskTest, Enum) {
BitMask<TestEnumBit> mask({TEST_BIT_1, TEST_BIT_2});
EXPECT_FALSE(mask.IsSet(TEST_BIT_0));
EXPECT_TRUE(mask.IsSet(TEST_BIT_1));
EXPECT_TRUE(mask.IsSet(TEST_BIT_2));
mask.ClearAll();
EXPECT_FALSE(mask.IsSet(TEST_BIT_0));
EXPECT_FALSE(mask.IsSet(TEST_BIT_1));
EXPECT_FALSE(mask.IsSet(TEST_BIT_2));
}
TEST(QuicBitMaskTest, Integer) {
BitMask<int> mask({1, 3});
EXPECT_EQ(mask.Max(), 3);
mask.Set(3);
mask.Set({5, 7, 9});
EXPECT_EQ(mask.Max(), 9);
EXPECT_FALSE(mask.IsSet(0));
EXPECT_TRUE(mask.IsSet(1));
EXPECT_FALSE(mask.IsSet(2));
EXPECT_TRUE(mask.IsSet(3));
EXPECT_FALSE(mask.IsSet(4));
EXPECT_TRUE(mask.IsSet(5));
EXPECT_FALSE(mask.IsSet(6));
EXPECT_TRUE(mask.IsSet(7));
EXPECT_FALSE(mask.IsSet(8));
EXPECT_TRUE(mask.IsSet(9));
}
TEST(QuicBitMaskTest, NumBits) {
EXPECT_EQ(64u, BitMask<int>::NumBits());
EXPECT_EQ(32u, (BitMask<int, uint32_t>::NumBits()));
}
TEST(QuicBitMaskTest, Constructor) {
BitMask<int> empty_mask;
for (size_t bit = 0; bit < empty_mask.NumBits(); ++bit) {
EXPECT_FALSE(empty_mask.IsSet(bit));
}
BitMask<int> mask({1, 3});
BitMask<int> mask2 = mask;
BitMask<int> mask3(mask2);
for (size_t bit = 0; bit < mask.NumBits(); ++bit) {
EXPECT_EQ(mask.IsSet(bit), mask2.IsSet(bit));
EXPECT_EQ(mask.IsSet(bit), mask3.IsSet(bit));
}
EXPECT_TRUE(std::is_trivially_copyable<BitMask<int>>::value);
}
TEST(QuicBitMaskTest, Any) {
BitMask<int> mask;
EXPECT_FALSE(mask.Any());
mask.Set(3);
EXPECT_TRUE(mask.Any());
mask.Set(2);
EXPECT_TRUE(mask.Any());
mask.ClearAll();
EXPECT_FALSE(mask.Any());
}
TEST(QuicBitMaskTest, And) {
using Mask = BitMask<int>;
EXPECT_EQ(Mask({1, 3, 6}) & Mask({3, 5, 6}), Mask({3, 6}));
EXPECT_EQ(Mask({1, 2, 4}) & Mask({3, 5}), Mask({}));
EXPECT_EQ(Mask({1, 2, 3, 4, 5}) & Mask({}), Mask({}));
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_utils.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/quic_utils_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
c0b6a6bc-27ba-43e0-95f9-2ca9a301d851 | cpp | abseil/abseil-cpp | format | absl/time/format.cc | absl/time/format_test.cc | #include <string.h>
#include <cctype>
#include <cstdint>
#include <utility>
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "absl/time/internal/cctz/include/cctz/time_zone.h"
#include "absl/time/time.h"
namespace cctz = absl::time_internal::cctz;
namespace absl {
ABSL_NAMESPACE_BEGIN
ABSL_DLL extern const char RFC3339_full[] = "%Y-%m-%d%ET%H:%M:%E*S%Ez";
ABSL_DLL extern const char RFC3339_sec[] = "%Y-%m-%d%ET%H:%M:%S%Ez";
ABSL_DLL extern const char RFC1123_full[] = "%a, %d %b %E4Y %H:%M:%S %z";
ABSL_DLL extern const char RFC1123_no_wday[] = "%d %b %E4Y %H:%M:%S %z";
namespace {
const char kInfiniteFutureStr[] = "infinite-future";
const char kInfinitePastStr[] = "infinite-past";
struct cctz_parts {
cctz::time_point<cctz::seconds> sec;
cctz::detail::femtoseconds fem;
};
inline cctz::time_point<cctz::seconds> unix_epoch() {
return std::chrono::time_point_cast<cctz::seconds>(
std::chrono::system_clock::from_time_t(0));
}
cctz_parts Split(absl::Time t) {
const auto d = time_internal::ToUnixDuration(t);
const int64_t rep_hi = time_internal::GetRepHi(d);
const int64_t rep_lo = time_internal::GetRepLo(d);
const auto sec = unix_epoch() + cctz::seconds(rep_hi);
const auto fem = cctz::detail::femtoseconds(rep_lo * (1000 * 1000 / 4));
return {sec, fem};
}
absl::Time Join(const cctz_parts& parts) {
const int64_t rep_hi = (parts.sec - unix_epoch()).count();
const uint32_t rep_lo =
static_cast<uint32_t>(parts.fem.count() / (1000 * 1000 / 4));
const auto d = time_internal::MakeDuration(rep_hi, rep_lo);
return time_internal::FromUnixDuration(d);
}
}
std::string FormatTime(absl::string_view format, absl::Time t,
absl::TimeZone tz) {
if (t == absl::InfiniteFuture()) return std::string(kInfiniteFutureStr);
if (t == absl::InfinitePast()) return std::string(kInfinitePastStr);
const auto parts = Split(t);
return cctz::detail::format(std::string(format), parts.sec, parts.fem,
cctz::time_zone(tz));
}
std::string FormatTime(absl::Time t, absl::TimeZone tz) {
return FormatTime(RFC3339_full, t, tz);
}
std::string FormatTime(absl::Time t) {
return absl::FormatTime(RFC3339_full, t, absl::LocalTimeZone());
}
bool ParseTime(absl::string_view format, absl::string_view input,
absl::Time* time, std::string* err) {
return absl::ParseTime(format, input, absl::UTCTimeZone(), time, err);
}
bool ParseTime(absl::string_view format, absl::string_view input,
absl::TimeZone tz, absl::Time* time, std::string* err) {
auto strip_leading_space = [](absl::string_view* sv) {
while (!sv->empty()) {
if (!std::isspace(sv->front())) return;
sv->remove_prefix(1);
}
};
struct Literal {
const char* name;
size_t size;
absl::Time value;
};
static Literal literals[] = {
{kInfiniteFutureStr, strlen(kInfiniteFutureStr), InfiniteFuture()},
{kInfinitePastStr, strlen(kInfinitePastStr), InfinitePast()},
};
strip_leading_space(&input);
for (const auto& lit : literals) {
if (absl::StartsWith(input, absl::string_view(lit.name, lit.size))) {
absl::string_view tail = input;
tail.remove_prefix(lit.size);
strip_leading_space(&tail);
if (tail.empty()) {
*time = lit.value;
return true;
}
}
}
std::string error;
cctz_parts parts;
const bool b =
cctz::detail::parse(std::string(format), std::string(input),
cctz::time_zone(tz), &parts.sec, &parts.fem, &error);
if (b) {
*time = Join(parts);
} else if (err != nullptr) {
*err = std::move(error);
}
return b;
}
bool AbslParseFlag(absl::string_view text, absl::Time* t, std::string* error) {
return absl::ParseTime(RFC3339_full, text, absl::UTCTimeZone(), t, error);
}
std::string AbslUnparseFlag(absl::Time t) {
return absl::FormatTime(RFC3339_full, t, absl::UTCTimeZone());
}
bool ParseFlag(const std::string& text, absl::Time* t, std::string* error) {
return absl::ParseTime(RFC3339_full, text, absl::UTCTimeZone(), t, error);
}
std::string UnparseFlag(absl::Time t) {
return absl::FormatTime(RFC3339_full, t, absl::UTCTimeZone());
}
ABSL_NAMESPACE_END
} | #include <cstdint>
#include <limits>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/time/internal/test_util.h"
#include "absl/time/time.h"
using testing::HasSubstr;
namespace {
void TestFormatSpecifier(absl::Time t, absl::TimeZone tz,
const std::string& fmt, const std::string& ans) {
EXPECT_EQ(ans, absl::FormatTime(fmt, t, tz));
EXPECT_EQ("xxx " + ans, absl::FormatTime("xxx " + fmt, t, tz));
EXPECT_EQ(ans + " yyy", absl::FormatTime(fmt + " yyy", t, tz));
EXPECT_EQ("xxx " + ans + " yyy",
absl::FormatTime("xxx " + fmt + " yyy", t, tz));
}
TEST(FormatTime, Basics) {
absl::TimeZone tz = absl::UTCTimeZone();
absl::Time t = absl::FromTimeT(0);
EXPECT_EQ("", absl::FormatTime("", t, tz));
EXPECT_EQ(" ", absl::FormatTime(" ", t, tz));
EXPECT_EQ(" ", absl::FormatTime(" ", t, tz));
EXPECT_EQ("xxx", absl::FormatTime("xxx", t, tz));
std::string big(128, 'x');
EXPECT_EQ(big, absl::FormatTime(big, t, tz));
std::string bigger(100000, 'x');
EXPECT_EQ(bigger, absl::FormatTime(bigger, t, tz));
t += absl::Hours(13) + absl::Minutes(4) + absl::Seconds(5);
t += absl::Milliseconds(6) + absl::Microseconds(7) + absl::Nanoseconds(8);
EXPECT_EQ("1970-01-01", absl::FormatTime("%Y-%m-%d", t, tz));
EXPECT_EQ("13:04:05", absl::FormatTime("%H:%M:%S", t, tz));
EXPECT_EQ("13:04:05.006", absl::FormatTime("%H:%M:%E3S", t, tz));
EXPECT_EQ("13:04:05.006007", absl::FormatTime("%H:%M:%E6S", t, tz));
EXPECT_EQ("13:04:05.006007008", absl::FormatTime("%H:%M:%E9S", t, tz));
}
TEST(FormatTime, LocaleSpecific) {
const absl::TimeZone tz = absl::UTCTimeZone();
absl::Time t = absl::FromTimeT(0);
TestFormatSpecifier(t, tz, "%a", "Thu");
TestFormatSpecifier(t, tz, "%A", "Thursday");
TestFormatSpecifier(t, tz, "%b", "Jan");
TestFormatSpecifier(t, tz, "%B", "January");
const std::string s =
absl::FormatTime("%c", absl::FromTimeT(0), absl::UTCTimeZone());
EXPECT_THAT(s, HasSubstr("1970"));
EXPECT_THAT(s, HasSubstr("00:00:00"));
TestFormatSpecifier(t, tz, "%p", "AM");
TestFormatSpecifier(t, tz, "%x", "01/01/70");
TestFormatSpecifier(t, tz, "%X", "00:00:00");
}
TEST(FormatTime, ExtendedSeconds) {
const absl::TimeZone tz = absl::UTCTimeZone();
absl::Time t = absl::FromTimeT(0) + absl::Seconds(5);
EXPECT_EQ("05", absl::FormatTime("%E*S", t, tz));
EXPECT_EQ("05.000000000000000", absl::FormatTime("%E15S", t, tz));
t += absl::Milliseconds(6) + absl::Microseconds(7) + absl::Nanoseconds(8);
EXPECT_EQ("05.006007008", absl::FormatTime("%E*S", t, tz));
EXPECT_EQ("05", absl::FormatTime("%E0S", t, tz));
EXPECT_EQ("05.006007008000000", absl::FormatTime("%E15S", t, tz));
t = absl::FromUnixMicros(-1);
EXPECT_EQ("1969-12-31 23:59:59.999999",
absl::FormatTime("%Y-%m-%d %H:%M:%E*S", t, tz));
t = absl::FromUnixMicros(1395024427333304);
EXPECT_EQ("2014-03-17 02:47:07.333304",
absl::FormatTime("%Y-%m-%d %H:%M:%E*S", t, tz));
t += absl::Microseconds(1);
EXPECT_EQ("2014-03-17 02:47:07.333305",
absl::FormatTime("%Y-%m-%d %H:%M:%E*S", t, tz));
}
TEST(FormatTime, RFC1123FormatPadsYear) {
absl::TimeZone tz = absl::UTCTimeZone();
absl::Time t = absl::FromCivil(absl::CivilSecond(77, 6, 28, 9, 8, 7), tz);
EXPECT_EQ("Mon, 28 Jun 0077 09:08:07 +0000",
absl::FormatTime(absl::RFC1123_full, t, tz));
EXPECT_EQ("28 Jun 0077 09:08:07 +0000",
absl::FormatTime(absl::RFC1123_no_wday, t, tz));
}
TEST(FormatTime, InfiniteTime) {
absl::TimeZone tz = absl::time_internal::LoadTimeZone("America/Los_Angeles");
EXPECT_EQ("infinite-future",
absl::FormatTime("%H:%M blah", absl::InfiniteFuture(), tz));
EXPECT_EQ("infinite-past",
absl::FormatTime("%H:%M blah", absl::InfinitePast(), tz));
}
TEST(ParseTime, Basics) {
absl::Time t = absl::FromTimeT(1234567890);
std::string err;
EXPECT_TRUE(absl::ParseTime("", "", &t, &err)) << err;
EXPECT_EQ(absl::UnixEpoch(), t);
EXPECT_TRUE(absl::ParseTime(" ", " ", &t, &err)) << err;
EXPECT_TRUE(absl::ParseTime(" ", " ", &t, &err)) << err;
EXPECT_TRUE(absl::ParseTime("x", "x", &t, &err)) << err;
EXPECT_TRUE(absl::ParseTime("xxx", "xxx", &t, &err)) << err;
EXPECT_TRUE(absl::ParseTime("%Y-%m-%d %H:%M:%S %z",
"2013-06-28 19:08:09 -0800", &t, &err))
<< err;
const auto ci = absl::FixedTimeZone(-8 * 60 * 60).At(t);
EXPECT_EQ(absl::CivilSecond(2013, 6, 28, 19, 8, 9), ci.cs);
EXPECT_EQ(absl::ZeroDuration(), ci.subsecond);
}
TEST(ParseTime, NullErrorString) {
absl::Time t;
EXPECT_FALSE(absl::ParseTime("%Q", "invalid format", &t, nullptr));
EXPECT_FALSE(absl::ParseTime("%H", "12 trailing data", &t, nullptr));
EXPECT_FALSE(
absl::ParseTime("%H out of range", "42 out of range", &t, nullptr));
}
TEST(ParseTime, WithTimeZone) {
const absl::TimeZone tz =
absl::time_internal::LoadTimeZone("America/Los_Angeles");
absl::Time t;
std::string e;
EXPECT_TRUE(
absl::ParseTime("%Y-%m-%d %H:%M:%S", "2013-06-28 19:08:09", tz, &t, &e))
<< e;
auto ci = tz.At(t);
EXPECT_EQ(absl::CivilSecond(2013, 6, 28, 19, 8, 9), ci.cs);
EXPECT_EQ(absl::ZeroDuration(), ci.subsecond);
EXPECT_TRUE(absl::ParseTime("%Y-%m-%d %H:%M:%S %z",
"2013-06-28 19:08:09 +0800", tz, &t, &e))
<< e;
ci = absl::FixedTimeZone(8 * 60 * 60).At(t);
EXPECT_EQ(absl::CivilSecond(2013, 6, 28, 19, 8, 9), ci.cs);
EXPECT_EQ(absl::ZeroDuration(), ci.subsecond);
}
TEST(ParseTime, ErrorCases) {
absl::Time t = absl::FromTimeT(0);
std::string err;
EXPECT_FALSE(absl::ParseTime("%S", "123", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Illegal trailing data"));
err.clear();
EXPECT_FALSE(absl::ParseTime("%Q", "x", &t, &err)) << err;
EXPECT_FALSE(err.empty());
EXPECT_FALSE(absl::ParseTime("%m-%d", "2-3 blah", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Illegal trailing data"));
EXPECT_FALSE(absl::ParseTime("%m-%d", "2-31", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Out-of-range"));
EXPECT_TRUE(absl::ParseTime("%z", "-0203", &t, &err)) << err;
EXPECT_FALSE(absl::ParseTime("%z", "- 2 3", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_TRUE(absl::ParseTime("%Ez", "-02:03", &t, &err)) << err;
EXPECT_FALSE(absl::ParseTime("%Ez", "- 2: 3", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%Ez", "+-08:00", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%Ez", "-+08:00", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%Y", "-0", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%E4Y", "-0", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%H", "-0", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%M", "-0", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%S", "-0", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%z", "+-000", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%Ez", "+-0:00", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%z", "-00-0", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Illegal trailing data"));
EXPECT_FALSE(absl::ParseTime("%Ez", "-00:-0", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Illegal trailing data"));
}
TEST(ParseTime, ExtendedSeconds) {
std::string err;
absl::Time t;
t = absl::UnixEpoch();
EXPECT_TRUE(absl::ParseTime("%E*S", "0.2147483647", &t, &err)) << err;
EXPECT_EQ(absl::UnixEpoch() + absl::Nanoseconds(214748364) +
absl::Nanoseconds(1) / 2,
t);
t = absl::UnixEpoch();
EXPECT_TRUE(absl::ParseTime("%E*S", "0.2147483648", &t, &err)) << err;
EXPECT_EQ(absl::UnixEpoch() + absl::Nanoseconds(214748364) +
absl::Nanoseconds(3) / 4,
t);
t = absl::UnixEpoch();
EXPECT_TRUE(absl::ParseTime(
"%E*S", "0.214748364801234567890123456789012345678901234567890123456789",
&t, &err))
<< err;
EXPECT_EQ(absl::UnixEpoch() + absl::Nanoseconds(214748364) +
absl::Nanoseconds(3) / 4,
t);
}
TEST(ParseTime, ExtendedOffsetErrors) {
std::string err;
absl::Time t;
EXPECT_FALSE(absl::ParseTime("%z", "-123", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Illegal trailing data"));
EXPECT_FALSE(absl::ParseTime("%z", "-1", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
EXPECT_FALSE(absl::ParseTime("%Ez", "-12:3", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Illegal trailing data"));
EXPECT_FALSE(absl::ParseTime("%Ez", "-123", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Illegal trailing data"));
EXPECT_FALSE(absl::ParseTime("%Ez", "-1", &t, &err)) << err;
EXPECT_THAT(err, HasSubstr("Failed to parse"));
}
TEST(ParseTime, InfiniteTime) {
absl::Time t;
std::string err;
EXPECT_TRUE(absl::ParseTime("%H:%M blah", "infinite-future", &t, &err));
EXPECT_EQ(absl::InfiniteFuture(), t);
EXPECT_TRUE(absl::ParseTime("%H:%M blah", " infinite-future", &t, &err));
EXPECT_EQ(absl::InfiniteFuture(), t);
EXPECT_TRUE(absl::ParseTime("%H:%M blah", "infinite-future ", &t, &err));
EXPECT_EQ(absl::InfiniteFuture(), t);
EXPECT_TRUE(absl::ParseTime("%H:%M blah", " infinite-future ", &t, &err));
EXPECT_EQ(absl::InfiniteFuture(), t);
EXPECT_TRUE(absl::ParseTime("%H:%M blah", "infinite-past", &t, &err));
EXPECT_EQ(absl::InfinitePast(), t);
EXPECT_TRUE(absl::ParseTime("%H:%M blah", " infinite-past", &t, &err));
EXPECT_EQ(absl::InfinitePast(), t);
EXPECT_TRUE(absl::ParseTime("%H:%M blah", "infinite-past ", &t, &err));
EXPECT_EQ(absl::InfinitePast(), t);
EXPECT_TRUE(absl::ParseTime("%H:%M blah", " infinite-past ", &t, &err));
EXPECT_EQ(absl::InfinitePast(), t);
absl::TimeZone tz = absl::UTCTimeZone();
EXPECT_TRUE(absl::ParseTime("infinite-future %H:%M", "infinite-future 03:04",
&t, &err));
EXPECT_NE(absl::InfiniteFuture(), t);
EXPECT_EQ(3, tz.At(t).cs.hour());
EXPECT_EQ(4, tz.At(t).cs.minute());
EXPECT_TRUE(
absl::ParseTime("infinite-past %H:%M", "infinite-past 03:04", &t, &err));
EXPECT_NE(absl::InfinitePast(), t);
EXPECT_EQ(3, tz.At(t).cs.hour());
EXPECT_EQ(4, tz.At(t).cs.minute());
EXPECT_FALSE(absl::ParseTime("infinite-future %H:%M", "03:04", &t, &err));
EXPECT_FALSE(absl::ParseTime("infinite-past %H:%M", "03:04", &t, &err));
}
TEST(ParseTime, FailsOnUnrepresentableTime) {
const absl::TimeZone utc = absl::UTCTimeZone();
absl::Time t;
EXPECT_FALSE(
absl::ParseTime("%Y-%m-%d", "-292277022657-01-27", utc, &t, nullptr));
EXPECT_TRUE(
absl::ParseTime("%Y-%m-%d", "-292277022657-01-28", utc, &t, nullptr));
EXPECT_TRUE(
absl::ParseTime("%Y-%m-%d", "292277026596-12-04", utc, &t, nullptr));
EXPECT_FALSE(
absl::ParseTime("%Y-%m-%d", "292277026596-12-05", utc, &t, nullptr));
}
TEST(FormatParse, RoundTrip) {
const absl::TimeZone lax =
absl::time_internal::LoadTimeZone("America/Los_Angeles");
const absl::Time in =
absl::FromCivil(absl::CivilSecond(1977, 6, 28, 9, 8, 7), lax);
const absl::Duration subseconds = absl::Nanoseconds(654321);
std::string err;
{
absl::Time out;
const std::string s =
absl::FormatTime(absl::RFC3339_full, in + subseconds, lax);
EXPECT_TRUE(absl::ParseTime(absl::RFC3339_full, s, &out, &err))
<< s << ": " << err;
EXPECT_EQ(in + subseconds, out);
}
{
absl::Time out;
const std::string s = absl::FormatTime(absl::RFC1123_full, in, lax);
EXPECT_TRUE(absl::ParseTime(absl::RFC1123_full, s, &out, &err))
<< s << ": " << err;
EXPECT_EQ(in, out);
}
#if !defined(_MSC_VER) && !defined(__EMSCRIPTEN__)
{
absl::Time out;
const std::string s = absl::FormatTime("%c", in, absl::UTCTimeZone());
EXPECT_TRUE(absl::ParseTime("%c", s, &out, &err)) << s << ": " << err;
EXPECT_EQ(in, out);
}
#endif
}
TEST(FormatParse, RoundTripDistantFuture) {
const absl::TimeZone tz = absl::UTCTimeZone();
const absl::Time in =
absl::FromUnixSeconds(std::numeric_limits<int64_t>::max());
std::string err;
absl::Time out;
const std::string s = absl::FormatTime(absl::RFC3339_full, in, tz);
EXPECT_TRUE(absl::ParseTime(absl::RFC3339_full, s, &out, &err))
<< s << ": " << err;
EXPECT_EQ(in, out);
}
TEST(FormatParse, RoundTripDistantPast) {
const absl::TimeZone tz = absl::UTCTimeZone();
const absl::Time in =
absl::FromUnixSeconds(std::numeric_limits<int64_t>::min());
std::string err;
absl::Time out;
const std::string s = absl::FormatTime(absl::RFC3339_full, in, tz);
EXPECT_TRUE(absl::ParseTime(absl::RFC3339_full, s, &out, &err))
<< s << ": " << err;
EXPECT_EQ(in, out);
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/format.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/time/format_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
d8c6f192-fbf5-4544-be2d-8bd71d36f15e | cpp | google/quiche | qbone_packet_processor | quiche/quic/qbone/qbone_packet_processor.cc | quiche/quic/qbone/qbone_packet_processor_test.cc | #include "quiche/quic/qbone/qbone_packet_processor.h"
#include <netinet/icmp6.h>
#include <netinet/in.h>
#include <netinet/ip6.h>
#include <cstdint>
#include <cstring>
#include <string>
#include "absl/base/optimization.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_bug_tracker.h"
#include "quiche/quic/platform/api/quic_ip_address.h"
#include "quiche/quic/platform/api/quic_ip_address_family.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/qbone/platform/icmp_packet.h"
#include "quiche/quic/qbone/platform/tcp_packet.h"
#include "quiche/common/quiche_endian.h"
namespace {
constexpr size_t kIPv6AddressSize = 16;
constexpr size_t kIPv6MinPacketSize = 1280;
constexpr size_t kIcmpTtl = 64;
constexpr size_t kICMPv6DestinationUnreachableDueToSourcePolicy = 5;
constexpr size_t kIPv6DestinationOffset = 8;
}
namespace quic {
const QuicIpAddress QbonePacketProcessor::kInvalidIpAddress =
QuicIpAddress::Any6();
QbonePacketProcessor::QbonePacketProcessor(QuicIpAddress self_ip,
QuicIpAddress client_ip,
size_t client_ip_subnet_length,
OutputInterface* output,
StatsInterface* stats)
: client_ip_(client_ip),
output_(output),
stats_(stats),
filter_(new Filter) {
memcpy(self_ip_.s6_addr, self_ip.ToPackedString().data(), kIPv6AddressSize);
QUICHE_DCHECK_LE(client_ip_subnet_length, kIPv6AddressSize * 8);
client_ip_subnet_length_ = client_ip_subnet_length;
QUICHE_DCHECK(IpAddressFamily::IP_V6 == self_ip.address_family());
QUICHE_DCHECK(IpAddressFamily::IP_V6 == client_ip.address_family());
QUICHE_DCHECK(self_ip != kInvalidIpAddress);
}
QbonePacketProcessor::OutputInterface::~OutputInterface() {}
QbonePacketProcessor::StatsInterface::~StatsInterface() {}
QbonePacketProcessor::Filter::~Filter() {}
QbonePacketProcessor::ProcessingResult
QbonePacketProcessor::Filter::FilterPacket(Direction direction,
absl::string_view full_packet,
absl::string_view payload,
icmp6_hdr* icmp_header) {
return ProcessingResult::OK;
}
void QbonePacketProcessor::ProcessPacket(std::string* packet,
Direction direction) {
uint8_t traffic_class = TrafficClassFromHeader(*packet);
if (ABSL_PREDICT_FALSE(!IsValid())) {
QUIC_BUG(quic_bug_11024_1)
<< "QuicPacketProcessor is invoked in an invalid state.";
stats_->OnPacketDroppedSilently(direction, traffic_class);
return;
}
stats_->RecordThroughput(packet->size(), direction, traffic_class);
uint8_t transport_protocol;
char* transport_data;
icmp6_hdr icmp_header;
memset(&icmp_header, 0, sizeof(icmp_header));
ProcessingResult result = ProcessIPv6HeaderAndFilter(
packet, direction, &transport_protocol, &transport_data, &icmp_header);
in6_addr dst;
memcpy(&dst, &packet->data()[kIPv6DestinationOffset], kIPv6AddressSize);
switch (result) {
case ProcessingResult::OK:
switch (direction) {
case Direction::FROM_OFF_NETWORK:
output_->SendPacketToNetwork(*packet);
break;
case Direction::FROM_NETWORK:
output_->SendPacketToClient(*packet);
break;
}
stats_->OnPacketForwarded(direction, traffic_class);
break;
case ProcessingResult::SILENT_DROP:
stats_->OnPacketDroppedSilently(direction, traffic_class);
break;
case ProcessingResult::ICMP:
if (icmp_header.icmp6_type == ICMP6_ECHO_REPLY) {
auto icmp_body = absl::string_view(*packet).substr(sizeof(ip6_hdr) +
sizeof(icmp6_hdr));
SendIcmpResponse(dst, &icmp_header, icmp_body, direction);
} else {
SendIcmpResponse(dst, &icmp_header, *packet, direction);
}
stats_->OnPacketDroppedWithIcmp(direction, traffic_class);
break;
case ProcessingResult::ICMP_AND_TCP_RESET:
SendIcmpResponse(dst, &icmp_header, *packet, direction);
stats_->OnPacketDroppedWithIcmp(direction, traffic_class);
SendTcpReset(*packet, direction);
stats_->OnPacketDroppedWithTcpReset(direction, traffic_class);
break;
case ProcessingResult::TCP_RESET:
SendTcpReset(*packet, direction);
stats_->OnPacketDroppedWithTcpReset(direction, traffic_class);
break;
}
}
QbonePacketProcessor::ProcessingResult
QbonePacketProcessor::ProcessIPv6HeaderAndFilter(std::string* packet,
Direction direction,
uint8_t* transport_protocol,
char** transport_data,
icmp6_hdr* icmp_header) {
ProcessingResult result = ProcessIPv6Header(
packet, direction, transport_protocol, transport_data, icmp_header);
if (result == ProcessingResult::OK) {
char* packet_data = &*packet->begin();
size_t header_size = *transport_data - packet_data;
if (packet_data >= *transport_data || header_size > packet->size() ||
header_size < kIPv6HeaderSize) {
QUIC_BUG(quic_bug_11024_2)
<< "Invalid pointers encountered in "
"QbonePacketProcessor::ProcessPacket. Dropping the packet";
return ProcessingResult::SILENT_DROP;
}
result = filter_->FilterPacket(
direction, *packet,
absl::string_view(*transport_data, packet->size() - header_size),
icmp_header);
}
if (result == ProcessingResult::ICMP) {
const uint8_t* header = reinterpret_cast<const uint8_t*>(packet->data());
constexpr size_t kIPv6NextHeaderOffset = 6;
constexpr size_t kIcmpMessageTypeOffset = kIPv6HeaderSize + 0;
constexpr size_t kIcmpMessageTypeMaxError = 127;
if (
packet->size() >= (kIPv6HeaderSize + kICMPv6HeaderSize) &&
header[kIPv6NextHeaderOffset] == IPPROTO_ICMPV6 &&
header[kIcmpMessageTypeOffset] < kIcmpMessageTypeMaxError) {
result = ProcessingResult::SILENT_DROP;
}
}
return result;
}
QbonePacketProcessor::ProcessingResult QbonePacketProcessor::ProcessIPv6Header(
std::string* packet, Direction direction, uint8_t* transport_protocol,
char** transport_data, icmp6_hdr* icmp_header) {
if (packet->size() < kIPv6HeaderSize) {
QUIC_DVLOG(1) << "Dropped malformed packet: IPv6 header too short";
return ProcessingResult::SILENT_DROP;
}
ip6_hdr* header = reinterpret_cast<ip6_hdr*>(&*packet->begin());
if (header->ip6_vfc >> 4 != 6) {
QUIC_DVLOG(1) << "Dropped malformed packet: IP version is not IPv6";
return ProcessingResult::SILENT_DROP;
}
const size_t declared_payload_size =
quiche::QuicheEndian::NetToHost16(header->ip6_plen);
const size_t actual_payload_size = packet->size() - kIPv6HeaderSize;
if (declared_payload_size != actual_payload_size) {
QUIC_DVLOG(1)
<< "Dropped malformed packet: incorrect packet length specified";
return ProcessingResult::SILENT_DROP;
}
QuicIpAddress address_to_check;
uint8_t address_reject_code;
bool ip_parse_result;
switch (direction) {
case Direction::FROM_OFF_NETWORK:
ip_parse_result = address_to_check.FromPackedString(
reinterpret_cast<const char*>(&header->ip6_src),
sizeof(header->ip6_src));
address_reject_code = kICMPv6DestinationUnreachableDueToSourcePolicy;
break;
case Direction::FROM_NETWORK:
ip_parse_result = address_to_check.FromPackedString(
reinterpret_cast<const char*>(&header->ip6_dst),
sizeof(header->ip6_src));
address_reject_code = ICMP6_DST_UNREACH_NOROUTE;
break;
}
QUICHE_DCHECK(ip_parse_result);
if (!client_ip_.InSameSubnet(address_to_check, client_ip_subnet_length_)) {
QUIC_DVLOG(1)
<< "Dropped packet: source/destination address is not client's";
icmp_header->icmp6_type = ICMP6_DST_UNREACH;
icmp_header->icmp6_code = address_reject_code;
return ProcessingResult::ICMP;
}
if (header->ip6_hops <= 1) {
icmp_header->icmp6_type = ICMP6_TIME_EXCEEDED;
icmp_header->icmp6_code = ICMP6_TIME_EXCEED_TRANSIT;
return ProcessingResult::ICMP;
}
header->ip6_hops--;
switch (header->ip6_nxt) {
case IPPROTO_TCP:
case IPPROTO_UDP:
case IPPROTO_ICMPV6:
*transport_protocol = header->ip6_nxt;
*transport_data = (&*packet->begin()) + kIPv6HeaderSize;
break;
default:
icmp_header->icmp6_type = ICMP6_PARAM_PROB;
icmp_header->icmp6_code = ICMP6_PARAMPROB_NEXTHEADER;
return ProcessingResult::ICMP;
}
return ProcessingResult::OK;
}
void QbonePacketProcessor::SendIcmpResponse(in6_addr dst,
icmp6_hdr* icmp_header,
absl::string_view payload,
Direction original_direction) {
CreateIcmpPacket(self_ip_, dst, *icmp_header, payload,
[this, original_direction](absl::string_view packet) {
SendResponse(original_direction, packet);
});
}
void QbonePacketProcessor::SendTcpReset(absl::string_view original_packet,
Direction original_direction) {
CreateTcpResetPacket(original_packet,
[this, original_direction](absl::string_view packet) {
SendResponse(original_direction, packet);
});
}
void QbonePacketProcessor::SendResponse(Direction original_direction,
absl::string_view packet) {
switch (original_direction) {
case Direction::FROM_OFF_NETWORK:
output_->SendPacketToClient(packet);
break;
case Direction::FROM_NETWORK:
output_->SendPacketToNetwork(packet);
break;
}
}
uint8_t QbonePacketProcessor::TrafficClassFromHeader(
absl::string_view ipv6_header) {
if (ipv6_header.length() < 2) {
return 0;
}
return ipv6_header[0] << 4 | ipv6_header[1] >> 4;
}
} | #include "quiche/quic/qbone/qbone_packet_processor.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/qbone/qbone_packet_processor_test_tools.h"
#include "quiche/common/quiche_text_utils.h"
namespace quic::test {
namespace {
using Direction = QbonePacketProcessor::Direction;
using ProcessingResult = QbonePacketProcessor::ProcessingResult;
using OutputInterface = QbonePacketProcessor::OutputInterface;
using ::testing::_;
using ::testing::Eq;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::WithArgs;
static const char kReferenceClientPacketData[] = {
0x60, 0x00, 0x00, 0x00,
0x00, 0x08,
17,
50,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x30, 0x39,
0x01, 0xbb,
0x00, 0x00,
0x00, 0x00,
};
static const char kReferenceClientPacketDataAF4[] = {
0x68, 0x00, 0x00, 0x00,
0x00, 0x08,
17,
50,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x30, 0x39,
0x01, 0xbb,
0x00, 0x00,
0x00, 0x00,
};
static const char kReferenceClientPacketDataAF3[] = {
0x66, 0x00, 0x00, 0x00,
0x00, 0x08,
17,
50,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x30, 0x39,
0x01, 0xbb,
0x00, 0x00,
0x00, 0x00,
};
static const char kReferenceClientPacketDataAF2[] = {
0x64, 0x00, 0x00, 0x00,
0x00, 0x08,
17,
50,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x30, 0x39,
0x01, 0xbb,
0x00, 0x00,
0x00, 0x00,
};
static const char kReferenceClientPacketDataAF1[] = {
0x62, 0x00, 0x00, 0x00,
0x00, 0x08,
17,
50,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x30, 0x39,
0x01, 0xbb,
0x00, 0x00,
0x00, 0x00,
};
static const char kReferenceNetworkPacketData[] = {
0x60, 0x00, 0x00, 0x00,
0x00, 0x08,
17,
50,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x01, 0xbb,
0x30, 0x39,
0x00, 0x00,
0x00, 0x00,
};
static const char kReferenceClientSubnetPacketData[] = {
0x60, 0x00, 0x00, 0x00,
0x00, 0x08,
17,
50,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x30, 0x39,
0x01, 0xbb,
0x00, 0x00,
0x00, 0x00,
};
static const char kReferenceEchoRequestData[] = {
0x60, 0x00, 0x00, 0x00,
0x00, 64,
58,
127,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfe, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x71, 0x62, 0x6f, 0x6e, 0x6f,
128,
0,
0x00, 0x00,
0xca, 0xfe,
0x00, 0x01,
0x67, 0x37, 0x8a, 0x63, 0x00, 0x00, 0x00, 0x00,
0x96, 0x58, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
};
static const char kReferenceEchoReplyData[] = {
0x60, 0x00, 0x00, 0x00,
0x00, 64,
58,
255,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0xfd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
129,
0,
0x66, 0xb6,
0xca, 0xfe,
0x00, 0x01,
0x67, 0x37, 0x8a, 0x63, 0x00, 0x00, 0x00, 0x00,
0x96, 0x58, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
};
static const absl::string_view kReferenceClientPacket(
kReferenceClientPacketData, ABSL_ARRAYSIZE(kReferenceClientPacketData));
static const absl::string_view kReferenceClientPacketAF4(
kReferenceClientPacketDataAF4,
ABSL_ARRAYSIZE(kReferenceClientPacketDataAF4));
static const absl::string_view kReferenceClientPacketAF3(
kReferenceClientPacketDataAF3,
ABSL_ARRAYSIZE(kReferenceClientPacketDataAF3));
static const absl::string_view kReferenceClientPacketAF2(
kReferenceClientPacketDataAF2,
ABSL_ARRAYSIZE(kReferenceClientPacketDataAF2));
static const absl::string_view kReferenceClientPacketAF1(
kReferenceClientPacketDataAF1,
ABSL_ARRAYSIZE(kReferenceClientPacketDataAF1));
static const absl::string_view kReferenceNetworkPacket(
kReferenceNetworkPacketData, ABSL_ARRAYSIZE(kReferenceNetworkPacketData));
static const absl::string_view kReferenceClientSubnetPacket(
kReferenceClientSubnetPacketData,
ABSL_ARRAYSIZE(kReferenceClientSubnetPacketData));
static const absl::string_view kReferenceEchoRequest(
kReferenceEchoRequestData, ABSL_ARRAYSIZE(kReferenceEchoRequestData));
MATCHER_P(IsIcmpMessage, icmp_type,
"Checks whether the argument is an ICMP message of supplied type") {
if (arg.size() < kTotalICMPv6HeaderSize) {
return false;
}
return arg[40] == icmp_type;
}
class MockPacketFilter : public QbonePacketProcessor::Filter {
public:
MOCK_METHOD(ProcessingResult, FilterPacket,
(Direction, absl::string_view, absl::string_view, icmp6_hdr*),
(override));
};
class QbonePacketProcessorTest : public QuicTest {
protected:
QbonePacketProcessorTest() {
QUICHE_CHECK(client_ip_.FromString("fd00:0:0:1::1"));
QUICHE_CHECK(self_ip_.FromString("fd00:0:0:4::1"));
QUICHE_CHECK(network_ip_.FromString("fd00:0:0:5::1"));
processor_ = std::make_unique<QbonePacketProcessor>(
self_ip_, client_ip_, 62, &output_,
&stats_);
EXPECT_CALL(stats_, RecordThroughput(_, _, _)).WillRepeatedly(Return());
}
void SendPacketFromClient(absl::string_view packet) {
std::string packet_buffer(packet.data(), packet.size());
processor_->ProcessPacket(&packet_buffer, Direction::FROM_OFF_NETWORK);
}
void SendPacketFromNetwork(absl::string_view packet) {
std::string packet_buffer(packet.data(), packet.size());
processor_->ProcessPacket(&packet_buffer, Direction::FROM_NETWORK);
}
QuicIpAddress client_ip_;
QuicIpAddress self_ip_;
QuicIpAddress network_ip_;
std::unique_ptr<QbonePacketProcessor> processor_;
testing::StrictMock<MockPacketProcessorOutput> output_;
testing::StrictMock<MockPacketProcessorStats> stats_;
};
TEST_F(QbonePacketProcessorTest, EmptyPacket) {
EXPECT_CALL(stats_, OnPacketDroppedSilently(Direction::FROM_OFF_NETWORK, _));
EXPECT_CALL(stats_, RecordThroughput(0, Direction::FROM_OFF_NETWORK, _));
SendPacketFromClient("");
EXPECT_CALL(stats_, OnPacketDroppedSilently(Direction::FROM_NETWORK, _));
EXPECT_CALL(stats_, RecordThroughput(0, Direction::FROM_NETWORK, _));
SendPacketFromNetwork("");
}
TEST_F(QbonePacketProcessorTest, RandomGarbage) {
EXPECT_CALL(stats_, OnPacketDroppedSilently(Direction::FROM_OFF_NETWORK, _));
SendPacketFromClient(std::string(1280, 'a'));
EXPECT_CALL(stats_, OnPacketDroppedSilently(Direction::FROM_NETWORK, _));
SendPacketFromNetwork(std::string(1280, 'a'));
}
TEST_F(QbonePacketProcessorTest, RandomGarbageWithCorrectLengthFields) {
std::string packet(40, 'a');
packet[4] = 0;
packet[5] = 0;
EXPECT_CALL(stats_, OnPacketDroppedWithIcmp(Direction::FROM_OFF_NETWORK, _));
EXPECT_CALL(output_, SendPacketToClient(IsIcmpMessage(ICMP6_DST_UNREACH)));
SendPacketFromClient(packet);
}
TEST_F(QbonePacketProcessorTest, GoodPacketFromClient) {
EXPECT_CALL(stats_, OnPacketForwarded(Direction::FROM_OFF_NETWORK, _));
EXPECT_CALL(output_, SendPacketToNetwork(_));
SendPacketFromClient(kReferenceClientPacket);
}
TEST_F(QbonePacketProcessorTest, GoodPacketFromClientSubnet) {
EXPECT_CALL(stats_, OnPacketForwarded(Direction::FROM_OFF_NETWORK, _));
EXPECT_CALL(output_, SendPacketToNetwork(_));
SendPacketFromClient(kReferenceClientSubnetPacket);
}
TEST_F(QbonePacketProcessorTest, GoodPacketFromNetwork) {
EXPECT_CALL(stats_, OnPacketForwarded(Direction::FROM_NETWORK, _));
EXPECT_CALL(output_, SendPacketToClient(_));
SendPacketFromNetwork(kReferenceNetworkPacket);
}
TEST_F(QbonePacketProcessorTest, GoodPacketFromNetworkWrongDirection) {
EXPECT_CALL(stats_, OnPacketDroppedWithIcmp(Direction::FROM_OFF_NETWORK, _));
EXPECT_CALL(output_, SendPacketToClient(IsIcmpMessage(ICMP6_DST_UNREACH)));
SendPacketFromClient(kReferenceNetworkPacket);
}
TEST_F(QbonePacketProcessorTest, TtlExpired) {
std::string packet(kReferenceNetworkPacket);
packet[7] = 1;
EXPECT_CALL(stats_, OnPacketDroppedWithIcmp(Direction::FROM_NETWORK, _));
EXPECT_CALL(output_, SendPacketToNetwork(IsIcmpMessage(ICMP6_TIME_EXCEEDED)));
SendPacketFromNetwork(packet);
}
TEST_F(QbonePacketProcessorTest, UnknownProtocol) {
std::string packet(kReferenceNetworkPacket);
packet[6] = IPPROTO_SCTP;
EXPECT_CALL(stats_, OnPacketDroppedWithIcmp(Direction::FROM_NETWORK, _));
EXPECT_CALL(output_, SendPacketToNetwork(IsIcmpMessage(ICMP6_PARAM_PROB)));
SendPacketFromNetwork(packet);
}
TEST_F(QbonePacketProcessorTest, FilterFromClient) {
auto filter = std::make_unique<MockPacketFilter>();
EXPECT_CALL(*filter, FilterPacket(_, _, _, _))
.WillRepeatedly(Return(ProcessingResult::SILENT_DROP));
processor_->set_filter(std::move(filter));
EXPECT_CALL(stats_, OnPacketDroppedSilently(Direction::FROM_OFF_NETWORK, _));
SendPacketFromClient(kReferenceClientPacket);
}
class TestFilter : public QbonePacketProcessor::Filter {
public:
TestFilter(QuicIpAddress client_ip, QuicIpAddress network_ip)
: client_ip_(client_ip), network_ip_(network_ip) {}
ProcessingResult FilterPacket(Direction direction,
absl::string_view full_packet,
absl::string_view payload,
icmp6_hdr* icmp_header) override {
EXPECT_EQ(kIPv6HeaderSize, full_packet.size() - payload.size());
EXPECT_EQ(IPPROTO_UDP, TransportProtocolFromHeader(full_packet));
EXPECT_EQ(client_ip_, SourceIpFromHeader(full_packet));
EXPECT_EQ(network_ip_, DestinationIpFromHeader(full_packet));
last_tos_ = QbonePacketProcessor::TrafficClassFromHeader(full_packet);
called_++;
return ProcessingResult::SILENT_DROP;
}
int called() const { return called_; }
uint8_t last_tos() const { return last_tos_; }
private:
int called_ = 0;
uint8_t last_tos_ = 0;
QuicIpAddress client_ip_;
QuicIpAddress network_ip_;
};
TEST_F(QbonePacketProcessorTest, FilterHelperFunctions) {
auto filter_owned = std::make_unique<TestFilter>(client_ip_, network_ip_);
TestFilter* filter = filter_owned.get();
processor_->set_filter(std::move(filter_owned));
EXPECT_CALL(stats_, OnPacketDroppedSilently(Direction::FROM_OFF_NETWORK, _));
SendPacketFromClient(kReferenceClientPacket);
ASSERT_EQ(1, filter->called());
}
TEST_F(QbonePacketProcessorTest, FilterHelperFunctionsTOS) {
auto filter_owned = std::make_unique<TestFilter>(client_ip_, network_ip_);
processor_->set_filter(std::move(filter_owned));
EXPECT_CALL(stats_, OnPacketDroppedSilently(Direction::FROM_OFF_NETWORK, _))
.Times(testing::AnyNumber());
EXPECT_CALL(stats_, RecordThroughput(kReferenceClientPacket.size(),
Direction::FROM_OFF_NETWORK, 0));
SendPacketFromClient(kReferenceClientPacket);
EXPECT_CALL(stats_, RecordThroughput(kReferenceClientPacketAF4.size(),
Direction::FROM_OFF_NETWORK, 0x80));
SendPacketFromClient(kReferenceClientPacketAF4);
EXPECT_CALL(stats_, RecordThroughput(kReferenceClientPacketAF3.size(),
Direction::FROM_OFF_NETWORK, 0x60));
SendPacketFromClient(kReferenceClientPacketAF3);
EXPECT_CALL(stats_, RecordThroughput(kReferenceClientPacketAF2.size(),
Direction::FROM_OFF_NETWORK, 0x40));
SendPacketFromClient(kReferenceClientPacketAF2);
EXPECT_CALL(stats_, RecordThroughput(kReferenceClientPacketAF1.size(),
Direction::FROM_OFF_NETWORK, 0x20));
SendPacketFromClient(kReferenceClientPacketAF1);
}
TEST_F(QbonePacketProcessorTest, Icmp6EchoResponseHasRightPayload) {
auto filter = std::make_unique<MockPacketFilter>();
EXPECT_CALL(*filter, FilterPacket(_, _, _, _))
.WillOnce(WithArgs<2, 3>(
Invoke([](absl::string_view payload, icmp6_hdr* icmp_header) {
icmp_header->icmp6_type = ICMP6_ECHO_REPLY;
icmp_header->icmp6_code = 0;
auto* request_header =
reinterpret_cast<const icmp6_hdr*>(payload.data());
icmp_header->icmp6_id = request_header->icmp6_id;
icmp_header->icmp6_seq = request_header->icmp6_seq;
return ProcessingResult::ICMP;
})));
processor_->set_filter(std::move(filter));
EXPECT_CALL(stats_, OnPacketDroppedWithIcmp(Direction::FROM_OFF_NETWORK, _));
EXPECT_CALL(output_, SendPacketToClient(_))
.WillOnce(Invoke([](absl::string_view packet) {
absl::string_view expected = absl::string_view(
kReferenceEchoReplyData, sizeof(kReferenceEchoReplyData));
EXPECT_THAT(packet, Eq(expected));
QUIC_LOG(INFO) << "ICMP response:\n"
<< quiche::QuicheTextUtils::HexDump(packet);
}));
SendPacketFromClient(kReferenceEchoRequest);
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/qbone_packet_processor.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/qbone_packet_processor_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
0c1e0df2-8f32-4754-b90e-7b6a758a3abc | cpp | tensorflow/tensorflow | all_gather_decomposer | third_party/xla/xla/service/all_gather_decomposer.cc | third_party/xla/xla/service/all_gather_decomposer_test.cc | #include "xla/service/all_gather_decomposer.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/collective_decomposer_utils.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
HloComputation* MakeBinaryAdd(PrimitiveType type, HloModule* module) {
HloComputation::Builder sum_b("add");
auto x = sum_b.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(type, {}), "x"));
auto y = sum_b.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(type, {}), "y"));
if (type == PRED) {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kOr, x, y));
} else {
sum_b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(type, {}), HloOpcode::kAdd, x, y));
}
HloComputation* reduction = module->AddEmbeddedComputation(sum_b.Build());
return reduction;
}
}
HloInstruction* AllGatherDecomposer::TranslateAllGatherToAllReducePerOperand(
CollectiveOpGroupMode group_mode, const HloAllGatherInstruction& ag,
const Shape& output_shape, HloInstruction* operand, HloComputation* comp,
int64_t ag_dim) {
std::vector<HloInstruction*> start_indices =
CreateStartIndicesForCollectiveDecomposition(
group_mode, ag.replica_groups(), operand->shape(), ag_dim, comp)
.value();
auto zero = comp->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(output_shape.element_type())));
zero = comp->AddInstruction(
HloInstruction::CreateBroadcast(output_shape, zero, {}));
auto dus = comp->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
zero->shape(), zero, operand, start_indices));
auto ar = comp->AddInstruction(HloInstruction::CreateAllReduce(
dus->shape(), {dus},
MakeBinaryAdd(dus->shape().element_type(), comp->parent()),
ag.device_list(),
ag.constrain_layout(), ag.channel_id(),
ag.use_global_device_ids()));
return ar;
}
absl::Status AllGatherDecomposer::DecomposeAllGather(
HloAllGatherInstruction* ag, HloComputation* comp) {
TF_ASSIGN_OR_RETURN(CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(ag->channel_id().has_value(),
ag->use_global_device_ids()));
if (ag->operand_count() > 1) {
std::vector<HloInstruction*> tuple_inputs;
for (int i = 0; i < ag->operand_count(); ++i) {
auto* input_operand = ag->mutable_operand(i);
const auto& output_shape = ag->shape().tuple_shapes(i);
auto* ar = TranslateAllGatherToAllReducePerOperand(
group_mode, *ag, output_shape, input_operand, comp,
ag->all_gather_dimension());
tuple_inputs.push_back(ar);
}
auto tup = comp->AddInstruction(HloInstruction::CreateTuple(tuple_inputs));
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(tup));
} else {
auto* ar = TranslateAllGatherToAllReducePerOperand(
group_mode, *ag, ag->shape(), ag->mutable_operand(0), comp,
ag->all_gather_dimension());
TF_RETURN_IF_ERROR(ag->ReplaceAllUsesWith(ar));
}
TF_RETURN_IF_ERROR(comp->RemoveInstructionAndUnusedOperands(ag));
return absl::OkStatus();
}
absl::StatusOr<bool> AllGatherDecomposer::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (auto comp : module->MakeNonfusionComputations(execution_threads)) {
for (auto hlo : comp->MakeInstructionPostOrder()) {
if (hlo->opcode() != HloOpcode::kAllGather) {
continue;
}
auto ag = Cast<HloAllGatherInstruction>(hlo);
if (ShouldDecompose(*ag)) {
TF_RETURN_IF_ERROR(DecomposeAllGather(ag, comp));
changed = true;
}
}
}
return changed;
}
} | #include "xla/service/all_gather_decomposer.h"
#include <memory>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::testing::AllOf;
namespace op = xla::testing::opcode_matchers;
using AllGatherDecomposerTest = HloTestBase;
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGather) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAndPartitionAllGather) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0}}, channel_id=1,
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::PartitionId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTrivialGroup) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0), replica_groups={{0,1,2,3}},
dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroups) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0),
replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto id =
AllOf(op::Shape("u32[]"),
op::Reshape(op::DynamicSlice(op::Constant(), op::ReplicaId())));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0),
op::Constant(), op::Multiply(id, op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithSubgroupsGlobalIds) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
ROOT ag = f32[10,80] all-gather(param0),
replica_groups={{2,1,0,3}, {4,6,7,5}}, dimensions={1}, channel_id=1,
use_global_device_ids=true
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
auto global_id =
op::Add(op::Multiply(op::ReplicaId(), op::Constant()), op::PartitionId());
auto id = AllOf(op::Shape("u32[]"),
op::Reshape(op::DynamicSlice(op::Constant(), global_id)));
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0),
op::Constant(), op::Multiply(id, op::Constant()))));
}
TEST_F(AllGatherDecomposerTest, CrossReplicaAllGatherWithTuple) {
const std::string module_str = R"(
HloModule module
ENTRY entry {
param0 = f32[10,20] parameter(0)
param1 = f32[10,16] parameter(1)
ROOT ag = (f32[10,80], f32[10,64]) all-gather(param0, param1),
replica_groups={}, dimensions={1}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((module_str)));
AllGatherDecomposer decomposer;
TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(
module->entry_computation()->root_instruction(),
op::Tuple(
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(0), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant()))),
op::AllReduce(op::DynamicUpdateSlice(
op::Broadcast(op::Constant()), op::Parameter(1), op::Constant(),
op::Multiply(op::ReplicaId(), op::Constant())))));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_decomposer.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/all_gather_decomposer_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
324f4e92-107f-46ec-881e-dd70ba2de645 | cpp | google/tensorstore | grid_occupancy_map | tensorstore/driver/downsample/grid_occupancy_map.cc | tensorstore/driver/downsample/grid_occupancy_map_test.cc | #include "tensorstore/driver/downsample/grid_occupancy_map.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "tensorstore/array.h"
#include "tensorstore/box.h"
#include "tensorstore/contiguous_layout.h"
#include "tensorstore/data_type.h"
#include "tensorstore/index.h"
#include "tensorstore/index_interval.h"
#include "tensorstore/rank.h"
#include "tensorstore/util/iterate.h"
#include "tensorstore/util/span.h"
namespace tensorstore {
namespace internal_downsample {
GridOccupancyMap::GridOccupancyMap(GridOccupancyTracker&& tracker,
BoxView<> domain)
: partition_points(domain.rank()) {
const DimensionIndex rank = domain.rank();
span<Index> occupied_chunks = tracker.occupied_chunks;
{
absl::flat_hash_map<Index, Index> partition_map;
for (DimensionIndex dim = 0; dim < rank; ++dim) {
partition_map.clear();
IndexInterval bounds = domain[dim];
partition_map.emplace(bounds.inclusive_min(), 0);
partition_map.emplace(bounds.exclusive_max(), 0);
for (ptrdiff_t i = dim; i < occupied_chunks.size(); i += 2 * rank) {
Index begin = occupied_chunks[i];
Index end = begin + occupied_chunks[i + rank];
partition_map.emplace(begin, 0);
partition_map.emplace(end, 0);
}
auto& dim_partition_points = partition_points[dim];
dim_partition_points.reserve(partition_map.size());
for (const auto& p : partition_map) {
dim_partition_points.push_back(p.first);
}
std::sort(dim_partition_points.begin(), dim_partition_points.end());
for (size_t i = 0, size = dim_partition_points.size(); i < size; ++i) {
partition_map.at(dim_partition_points[i]) = i;
}
for (ptrdiff_t i = dim; i < occupied_chunks.size(); i += 2 * rank) {
Index& begin = occupied_chunks[i];
Index& end = occupied_chunks[i + rank];
end = partition_map.at(begin + end);
begin = partition_map.at(begin);
}
}
}
Index grid_cell[kMaxRank];
span<Index> grid_cell_span(&grid_cell[0], rank);
{
for (DimensionIndex dim = 0; dim < rank; ++dim) {
grid_cell[dim] = partition_points[dim].size() - 1;
}
occupied_chunk_mask =
AllocateArray<bool>(grid_cell_span, c_order, value_init);
}
for (ptrdiff_t i = 0; i < occupied_chunks.size(); i += 2 * rank) {
std::copy_n(&occupied_chunks[i], rank, &grid_cell[0]);
do {
occupied_chunk_mask(grid_cell_span) = true;
} while (internal::AdvanceIndices(rank, &grid_cell[0], &occupied_chunks[i],
&occupied_chunks[i + rank]));
}
}
bool GridOccupancyMap::GetGridCellDomain(
span<const Index> grid_cell, MutableBoxView<> grid_cell_domain) const {
assert(grid_cell.size() == grid_cell_domain.rank());
assert(grid_cell.size() == rank());
if (occupied_chunk_mask(grid_cell)) return false;
for (DimensionIndex dim = 0; dim < grid_cell.size(); ++dim) {
const Index partition_index = grid_cell[dim];
grid_cell_domain[dim] = IndexInterval::UncheckedHalfOpen(
partition_points[dim][partition_index],
partition_points[dim][partition_index + 1]);
}
return true;
}
void GridOccupancyMap::InitializeCellIterator(span<Index> grid_cell) const {
std::fill(grid_cell.begin(), grid_cell.end(), 0);
}
bool GridOccupancyMap::AdvanceCellIterator(span<Index> grid_cell) const {
assert(grid_cell.size() == occupied_chunk_mask.rank());
return internal::AdvanceIndices(grid_cell.size(), grid_cell.data(),
occupied_chunk_mask.shape().data());
}
}
} | #include "tensorstore/driver/downsample/grid_occupancy_map.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/box.h"
#include "tensorstore/index.h"
namespace {
using ::tensorstore::Box;
using ::tensorstore::BoxView;
using ::tensorstore::Index;
using ::tensorstore::MakeArray;
using ::tensorstore::internal_downsample::GridOccupancyMap;
using ::tensorstore::internal_downsample::GridOccupancyTracker;
std::vector<Box<>> GetUnoccupiedBoxes(const GridOccupancyMap& map) {
std::vector<Box<>> boxes;
std::vector<Index> grid_cell(map.rank());
map.InitializeCellIterator(grid_cell);
Box<> box(map.rank());
do {
if (map.GetGridCellDomain(grid_cell, box)) {
boxes.push_back(box);
}
} while (map.AdvanceCellIterator(grid_cell));
return boxes;
}
TEST(GridOccupancyMapTest, Rank1) {
GridOccupancyTracker tracker;
tracker.MarkOccupied(BoxView<1>({1}, {3}));
tracker.MarkOccupied(BoxView<1>({5}, {4}));
GridOccupancyMap map(std::move(tracker), BoxView<1>({-1}, {11}));
EXPECT_THAT(
map.partition_points,
::testing::ElementsAre(::testing::ElementsAre(-1, 1, 4, 5, 9, 10)));
EXPECT_EQ(map.occupied_chunk_mask, MakeArray<bool>({0, 1, 0, 1, 0}));
EXPECT_THAT(GetUnoccupiedBoxes(map),
::testing::ElementsAre(Box<>({-1}, {2}), Box<>({4}, {1}),
Box<>({9}, {1})));
}
TEST(GridOccupancyMapTest, Rank2) {
GridOccupancyTracker tracker;
tracker.MarkOccupied(BoxView<2>({0, 0}, {3, 2}));
tracker.MarkOccupied(BoxView<2>({3, 3}, {1, 3}));
tracker.MarkOccupied(BoxView<2>({0, 5}, {2, 3}));
GridOccupancyMap map(std::move(tracker), BoxView<2>({4, 10}));
EXPECT_THAT(
map.partition_points,
::testing::ElementsAre(::testing::ElementsAre(0, 2, 3, 4),
::testing::ElementsAre(0, 2, 3, 5, 6, 8, 10)));
EXPECT_EQ(map.occupied_chunk_mask, MakeArray<bool>({
{1, 0, 0, 1, 1, 0},
{1, 0, 0, 0, 0, 0},
{0, 0, 1, 1, 0, 0},
}));
EXPECT_THAT(
GetUnoccupiedBoxes(map),
::testing::ElementsAre(
Box<>({0, 2}, {2, 1}), Box<>({0, 3}, {2, 2}), Box<>({0, 8}, {2, 2}),
Box<>({2, 2}, {1, 1}), Box<>({2, 3}, {1, 2}), Box<>({2, 5}, {1, 1}),
Box<>({2, 6}, {1, 2}), Box<>({2, 8}, {1, 2}), Box<>({3, 0}, {1, 2}),
Box<>({3, 2}, {1, 1}), Box<>({3, 6}, {1, 2}), Box<>({3, 8}, {1, 2})));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/grid_occupancy_map.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/driver/downsample/grid_occupancy_map_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
cb21734b-9242-415e-ac25-360ddf2d0b25 | cpp | google/quiche | decode_buffer | quiche/http2/decoder/decode_buffer.cc | quiche/http2/decoder/decode_buffer_test.cc | #include "quiche/http2/decoder/decode_buffer.h"
namespace http2 {
uint8_t DecodeBuffer::DecodeUInt8() {
return static_cast<uint8_t>(DecodeChar());
}
uint16_t DecodeBuffer::DecodeUInt16() {
QUICHE_DCHECK_LE(2u, Remaining());
const uint8_t b1 = DecodeUInt8();
const uint8_t b2 = DecodeUInt8();
return b1 << 8 | b2;
}
uint32_t DecodeBuffer::DecodeUInt24() {
QUICHE_DCHECK_LE(3u, Remaining());
const uint8_t b1 = DecodeUInt8();
const uint8_t b2 = DecodeUInt8();
const uint8_t b3 = DecodeUInt8();
return b1 << 16 | b2 << 8 | b3;
}
uint32_t DecodeBuffer::DecodeUInt31() {
QUICHE_DCHECK_LE(4u, Remaining());
const uint8_t b1 = DecodeUInt8() & 0x7f;
const uint8_t b2 = DecodeUInt8();
const uint8_t b3 = DecodeUInt8();
const uint8_t b4 = DecodeUInt8();
return b1 << 24 | b2 << 16 | b3 << 8 | b4;
}
uint32_t DecodeBuffer::DecodeUInt32() {
QUICHE_DCHECK_LE(4u, Remaining());
const uint8_t b1 = DecodeUInt8();
const uint8_t b2 = DecodeUInt8();
const uint8_t b3 = DecodeUInt8();
const uint8_t b4 = DecodeUInt8();
return b1 << 24 | b2 << 16 | b3 << 8 | b4;
}
#ifndef NDEBUG
void DecodeBuffer::set_subset_of_base(DecodeBuffer* base,
const DecodeBufferSubset* subset) {
QUICHE_DCHECK_EQ(this, subset);
base->set_subset(subset);
}
void DecodeBuffer::clear_subset_of_base(DecodeBuffer* base,
const DecodeBufferSubset* subset) {
QUICHE_DCHECK_EQ(this, subset);
base->clear_subset(subset);
}
void DecodeBuffer::set_subset(const DecodeBufferSubset* subset) {
QUICHE_DCHECK(subset != nullptr);
QUICHE_DCHECK_EQ(subset_, nullptr) << "There is already a subset";
subset_ = subset;
}
void DecodeBuffer::clear_subset(const DecodeBufferSubset* subset) {
QUICHE_DCHECK(subset != nullptr);
QUICHE_DCHECK_EQ(subset_, subset);
subset_ = nullptr;
}
void DecodeBufferSubset::DebugSetup() {
start_base_offset_ = base_buffer_->Offset();
max_base_offset_ = start_base_offset_ + FullSize();
QUICHE_DCHECK_LE(max_base_offset_, base_buffer_->FullSize());
set_subset_of_base(base_buffer_, this);
}
void DecodeBufferSubset::DebugTearDown() {
QUICHE_DCHECK_EQ(start_base_offset_, base_buffer_->Offset())
<< "The base buffer was modified";
size_t offset = Offset();
QUICHE_DCHECK_LE(offset, FullSize());
QUICHE_DCHECK_LE(start_base_offset_ + offset, max_base_offset_);
QUICHE_DCHECK_LE(max_base_offset_, base_buffer_->FullSize());
clear_subset_of_base(base_buffer_, this);
}
#endif
} | #include "quiche/http2/decoder/decode_buffer.h"
#include <functional>
#include "quiche/http2/test_tools/http2_random.h"
#include "quiche/common/platform/api/quiche_logging.h"
#include "quiche/common/platform/api/quiche_test.h"
namespace http2 {
namespace test {
namespace {
enum class TestEnumClass32 {
kValue1 = 1,
kValue99 = 99,
kValue1M = 1000000,
};
enum class TestEnumClass8 {
kValue1 = 1,
kValue2 = 1,
kValue99 = 99,
kValue255 = 255,
};
enum TestEnum8 {
kMaskLo = 0x01,
kMaskHi = 0x80,
};
struct TestStruct {
uint8_t f1;
uint16_t f2;
uint32_t f3;
uint32_t f4;
uint32_t f5;
TestEnumClass32 f6;
TestEnumClass8 f7;
TestEnum8 f8;
};
class DecodeBufferTest : public quiche::test::QuicheTest {
protected:
Http2Random random_;
uint32_t decode_offset_;
};
TEST_F(DecodeBufferTest, DecodesFixedInts) {
const char data[] = "\x01\x12\x23\x34\x45\x56\x67\x78\x89\x9a";
DecodeBuffer b1(data, strlen(data));
EXPECT_EQ(1, b1.DecodeUInt8());
EXPECT_EQ(0x1223u, b1.DecodeUInt16());
EXPECT_EQ(0x344556u, b1.DecodeUInt24());
EXPECT_EQ(0x6778899Au, b1.DecodeUInt32());
}
TEST_F(DecodeBufferTest, HasNotCopiedInput) {
const char data[] = "ab";
DecodeBuffer b1(data, 2);
EXPECT_EQ(2u, b1.Remaining());
EXPECT_EQ(0u, b1.Offset());
EXPECT_FALSE(b1.Empty());
EXPECT_EQ(data, b1.cursor());
EXPECT_TRUE(b1.HasData());
b1.AdvanceCursor(1);
EXPECT_EQ(1u, b1.Remaining());
EXPECT_EQ(1u, b1.Offset());
EXPECT_FALSE(b1.Empty());
EXPECT_EQ(&data[1], b1.cursor());
EXPECT_TRUE(b1.HasData());
b1.AdvanceCursor(1);
EXPECT_EQ(0u, b1.Remaining());
EXPECT_EQ(2u, b1.Offset());
EXPECT_TRUE(b1.Empty());
EXPECT_EQ(&data[2], b1.cursor());
EXPECT_FALSE(b1.HasData());
DecodeBuffer b2(data, 0);
EXPECT_EQ(0u, b2.Remaining());
EXPECT_EQ(0u, b2.Offset());
EXPECT_TRUE(b2.Empty());
EXPECT_EQ(data, b2.cursor());
EXPECT_FALSE(b2.HasData());
}
TEST_F(DecodeBufferTest, DecodeBufferSubsetLimited) {
const char data[] = "abc";
DecodeBuffer base(data, 3);
base.AdvanceCursor(1);
DecodeBufferSubset subset(&base, 100);
EXPECT_EQ(2u, subset.FullSize());
}
TEST_F(DecodeBufferTest, DecodeBufferSubsetAdvancesCursor) {
const char data[] = "abc";
const size_t size = sizeof(data) - 1;
EXPECT_EQ(3u, size);
DecodeBuffer base(data, size);
{
DecodeBufferSubset subset(&base, size + 100);
EXPECT_EQ(size, subset.FullSize());
EXPECT_EQ(base.FullSize(), subset.FullSize());
EXPECT_EQ(0u, subset.Offset());
}
EXPECT_EQ(0u, base.Offset());
EXPECT_EQ(size, base.Remaining());
}
#if GTEST_HAS_DEATH_TEST && !defined(NDEBUG)
TEST(DecodeBufferDeathTest, NonNullBufferRequired) {
EXPECT_QUICHE_DEBUG_DEATH({ DecodeBuffer b(nullptr, 3); }, "nullptr");
}
TEST(DecodeBufferDeathTest, ModestBufferSizeRequired) {
EXPECT_QUICHE_DEBUG_DEATH(
{
constexpr size_t kLength = DecodeBuffer::kMaxDecodeBufferLength + 1;
auto data = std::make_unique<char[]>(kLength);
DecodeBuffer b(data.get(), kLength);
},
"Max.*Length");
}
TEST(DecodeBufferDeathTest, LimitedAdvance) {
{
const char data[] = "abc";
DecodeBuffer b(data, 3);
b.AdvanceCursor(3);
EXPECT_TRUE(b.Empty());
}
EXPECT_QUICHE_DEBUG_DEATH(
{
const char data[] = "abc";
DecodeBuffer b(data, 3);
b.AdvanceCursor(4);
},
"Remaining");
}
TEST(DecodeBufferDeathTest, DecodeUInt8PastEnd) {
const char data[] = {0x12, 0x23};
DecodeBuffer b(data, sizeof data);
EXPECT_EQ(2u, b.FullSize());
EXPECT_EQ(0x1223, b.DecodeUInt16());
EXPECT_QUICHE_DEBUG_DEATH({ b.DecodeUInt8(); }, "Remaining");
}
TEST(DecodeBufferDeathTest, DecodeUInt16OverEnd) {
const char data[] = {0x12, 0x23, 0x34};
DecodeBuffer b(data, sizeof data);
EXPECT_EQ(3u, b.FullSize());
EXPECT_EQ(0x1223, b.DecodeUInt16());
EXPECT_QUICHE_DEBUG_DEATH({ b.DecodeUInt16(); }, "Remaining");
}
TEST(DecodeBufferSubsetDeathTest, TwoSubsets) {
const char data[] = "abc";
DecodeBuffer base(data, 3);
DecodeBufferSubset subset1(&base, 1);
EXPECT_QUICHE_DEBUG_DEATH({ DecodeBufferSubset subset2(&base, 1); },
"There is already a subset");
}
TEST(DecodeBufferSubsetDeathTest, BaseCursorAdvanced) {
const char data[] = "abc";
DecodeBuffer base(data, 3);
base.AdvanceCursor(1);
EXPECT_QUICHE_DEBUG_DEATH(
{
DecodeBufferSubset subset1(&base, 2);
base.AdvanceCursor(1);
},
"Access via subset only when present");
}
#endif
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/decode_buffer.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/http2/decoder/decode_buffer_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
63728d0f-b84d-4e18-bb17-5fb065cf6c33 | cpp | tensorflow/tensorflow | concatenate_dataset_op | tensorflow/core/kernels/data/concatenate_dataset_op.cc | tensorflow/core/kernels/data/concatenate_dataset_op_test.cc | #include "tensorflow/core/kernels/data/concatenate_dataset_op.h"
#include <algorithm>
#include <cstddef>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/split_utils.h"
#include "tensorflow/core/framework/dataset.h"
#include "tensorflow/core/framework/tensor.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/mutex.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/thread_annotations.h"
namespace tensorflow {
namespace data {
constexpr const char* const ConcatenateDatasetOp::kDatasetType;
constexpr const char* const ConcatenateDatasetOp::kInputDataset;
constexpr const char* const ConcatenateDatasetOp::kAnotherDataset;
constexpr const char* const ConcatenateDatasetOp::kOutputTypes;
constexpr const char* const ConcatenateDatasetOp::kOutputShapes;
constexpr char kIndex[] = "i";
constexpr char kInputImplUninitialized[] = "input_impl_uninitialized";
constexpr char kElementCount[] = "element_count";
namespace {
absl::StatusOr<size_t> GetNextShuffledIndex(const IndexMapperFn& index_mapper,
size_t& element_count) {
absl::StatusOr<size_t> shuffled_index = absl::NotFoundError("default");
while (absl::IsNotFound(shuffled_index.status())) {
shuffled_index = index_mapper(element_count++);
if (absl::IsOutOfRange(shuffled_index.status())) {
return shuffled_index.status();
}
if (!absl::IsNotFound(shuffled_index.status()) && !shuffled_index.ok()) {
return shuffled_index.status();
}
}
return shuffled_index;
}
}
class ConcatenateDatasetOp::Dataset : public DatasetBase {
public:
explicit Dataset(OpKernelContext* ctx, const DatasetBase* input,
const DatasetBase* to_concatenate)
: DatasetBase(DatasetContext(ctx)),
input_(input),
to_concatenate_(to_concatenate),
input_cardinality_(input->Cardinality()),
to_concatenate_cardinality_(to_concatenate_->Cardinality()) {
input_->Ref();
to_concatenate_->Ref();
auto os_input = input->output_shapes();
auto os_concatenate = to_concatenate->output_shapes();
for (int i = 0; i < os_input.size(); i++) {
PartialTensorShape output_tensorshape({});
OP_REQUIRES_OK(ctx,
MostSpecificCompatibleShape(os_input[i], os_concatenate[i],
&output_tensorshape));
output_shapes_.push_back(output_tensorshape);
}
if (input_ != nullptr && !input_->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = input->RandomIndexingCompatible();
} else if (to_concatenate_ != nullptr &&
!to_concatenate_->RandomIndexingCompatible().ok()) {
random_indexing_compatible_ = to_concatenate_->RandomIndexingCompatible();
}
}
~Dataset() override {
input_->Unref();
to_concatenate_->Unref();
}
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
Status MakeSplitProviders(std::vector<std::unique_ptr<SplitProvider>>*
split_providers) const override {
TF_ASSIGN_OR_RETURN(*split_providers, GetSplitProviders(this));
return absl::OkStatus();
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return output_shapes_;
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
int64_t CardinalityInternal(CardinalityOptions options) const override {
int64_t input_cardinality = input_->Cardinality(options);
int64_t to_concatenate_cardinality = to_concatenate_->Cardinality(options);
if (input_cardinality == kInfiniteCardinality ||
to_concatenate_cardinality == kInfiniteCardinality) {
return kInfiniteCardinality;
}
if (input_cardinality == kUnknownCardinality ||
to_concatenate_cardinality == kUnknownCardinality) {
return kUnknownCardinality;
}
return input_cardinality + to_concatenate_cardinality;
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
inputs->push_back(to_concatenate_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(input_->CheckExternalState());
return to_concatenate_->CheckExternalState();
}
Status Get(OpKernelContext* ctx, int64 index,
std::vector<Tensor>* out_tensors) const override {
TF_RETURN_IF_ERROR(CheckRandomAccessCompatible(index));
if (index < input_cardinality_) {
TF_RETURN_IF_ERROR(input_->Get(ctx, index, out_tensors));
} else {
TF_RETURN_IF_ERROR(
to_concatenate_->Get(ctx, index - input_cardinality_, out_tensors));
}
return absl::OkStatus();
}
absl::Status RandomIndexingCompatible() const override {
return random_indexing_compatible_;
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph = nullptr;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph));
Node* to_concatenate_graph = nullptr;
TF_RETURN_IF_ERROR(
b->AddInputDataset(ctx, to_concatenate_, &to_concatenate_graph));
TF_RETURN_IF_ERROR(
b->AddDataset(this, {input_graph, to_concatenate_graph}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params), i_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
mutex_lock l(mu_);
input_impls_.resize(2);
TF_ASSIGN_OR_RETURN(input_contexts_,
CreateInputIteratorContexts(ctx, dataset()));
TF_RETURN_IF_ERROR(dataset()->input_->MakeIterator(
&input_contexts_[0], this, strings::StrCat(prefix(), "[0]"),
&input_impls_[0]));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
return absl::OkStatus();
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
mutex_lock l(mu_);
if (!input_impls_[0] && !input_impls_[1]) {
*end_of_sequence = true;
return absl::OkStatus();
}
if (ctx->index_mapper()) {
if (input_impls_[1] == nullptr) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[1], this, strings::StrCat(prefix(), "[1]"),
&input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
if (input_contexts_[0].index_mapper() == nullptr) {
IndexMapperFn left_index_mapper =
[index_mapper = ctx->index_mapper(),
left_cardinality = dataset()->input_cardinality_,
right_cardinality = dataset()->to_concatenate_cardinality_](
size_t to_idx) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t from_idx, index_mapper(to_idx));
if (from_idx >= left_cardinality + right_cardinality) {
return absl::OutOfRangeError("Running out of elements.");
}
if (from_idx >= left_cardinality) {
return absl::NotFoundError("Skipping this element.");
}
return from_idx;
};
IndexMapperFn right_index_mapper =
[index_mapper = ctx->index_mapper(),
left_cardinality = dataset()->input_cardinality_,
right_cardinality = dataset()->to_concatenate_cardinality_](
size_t to_idx) -> absl::StatusOr<size_t> {
TF_ASSIGN_OR_RETURN(size_t from_idx, index_mapper(to_idx));
if (from_idx >= left_cardinality + right_cardinality) {
return absl::OutOfRangeError("Running out of elements.");
}
if (from_idx < left_cardinality) {
return absl::NotFoundError("Skipping this element.");
}
return from_idx - left_cardinality;
};
input_contexts_[0].SetIndexMapper(left_index_mapper);
input_contexts_[1].SetIndexMapper(right_index_mapper);
}
absl::StatusOr<size_t> shuffled_index =
GetNextShuffledIndex(ctx->index_mapper(), element_count_);
if (absl::IsOutOfRange(shuffled_index.status())) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(shuffled_index.status());
bool temp_end_of_sequence = false;
absl::Status status = absl::OkStatus();
if (shuffled_index.value() < dataset()->input_cardinality_) {
status = input_impls_[0]->GetNext(&input_contexts_[0], out_tensors,
&temp_end_of_sequence);
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
} else {
status = input_impls_[1]->GetNext(&input_contexts_[1], out_tensors,
&temp_end_of_sequence);
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
TF_RETURN_IF_ERROR(status);
if (temp_end_of_sequence) {
*end_of_sequence = temp_end_of_sequence;
return absl::OkStatus();
}
return absl::OkStatus();
}
for (; i_ < 2; ++i_) {
TF_RETURN_IF_ERROR(input_impls_[i_]->GetNext(
&input_contexts_[i_], out_tensors, end_of_sequence));
ctx->MergeCheckpoint(input_contexts_[i_].checkpoint());
if (!*end_of_sequence) {
return absl::OkStatus();
}
if (i_ == 0) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[1], this, strings::StrCat(prefix(), "[1]"),
&input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
}
*end_of_sequence = true;
input_impls_[0].reset();
input_impls_[1].reset();
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeKnownRatioNode(std::move(args),
1);
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(prefix(), kIndex, i_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kElementCount, element_count_));
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrFormat("%s[%d]", kInputImplUninitialized, 0),
static_cast<int64_t>(!input_impls_[0])));
if (input_impls_[0]) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impls_[0]));
}
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), absl::StrFormat("%s[%d]", kInputImplUninitialized, 1),
static_cast<int64_t>(!input_impls_[1])));
if (input_impls_[1]) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impls_[1]));
}
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_uninitialized[2];
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), absl::StrFormat("%s[%d]", kInputImplUninitialized, 0),
&input_uninitialized[0]));
if (static_cast<bool>(input_uninitialized[0])) {
input_impls_[0].reset();
}
TF_RETURN_IF_ERROR(reader->ReadScalar(
prefix(), absl::StrFormat("%s[%d]", kInputImplUninitialized, 1),
&input_uninitialized[1]));
if (static_cast<bool>(input_uninitialized[1])) {
input_impls_[1].reset();
}
if (ctx->restored_element_count()) {
if (input_impls_.size() != 2) {
return absl::FailedPreconditionError(
"`Initialize` should be called before restoring from the "
"checkpoint.");
}
{
int64_t tmp_element_count;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kElementCount, &tmp_element_count));
if (tmp_element_count < 0) {
return absl::FailedPreconditionError(absl::StrFormat(
"element_count should be >= 0. Got %d", tmp_element_count));
}
element_count_ = static_cast<size_t>(tmp_element_count);
}
if (!static_cast<bool>(input_uninitialized[0])) {
if (!input_impls_[0]) {
return absl::FailedPreconditionError(
"Something went wrong internally. The first iterator should "
"exist because of `Initialize`.");
}
input_contexts_[0].set_restored_element_count(
*ctx->restored_element_count());
TF_RETURN_IF_ERROR(
RestoreInput(&input_contexts_[0], reader, input_impls_[0]));
ctx->MergeCheckpoint(input_contexts_[0].checkpoint());
}
if (!static_cast<bool>(input_uninitialized[1])) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[1], this, strings::StrCat(prefix(), "[1]"),
&input_impls_[1]));
input_contexts_[1].set_restored_element_count(
*ctx->restored_element_count());
TF_RETURN_IF_ERROR(
RestoreInput(&input_contexts_[1], reader, input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(reader->ReadScalar(prefix(), kIndex, &i_));
if (!TF_PREDICT_TRUE(i_ >= 0 && i_ <= 2))
return errors::InvalidArgument("i_ must be in range [0, 2].");
if (!static_cast<bool>(input_uninitialized[0])) {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impls_[0]));
}
if (!static_cast<bool>(input_uninitialized[1])) {
TF_RETURN_IF_ERROR(dataset()->to_concatenate_->MakeIterator(
&input_contexts_[1], this, strings::StrCat(prefix(), "[1]"),
&input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
TF_RETURN_IF_ERROR(
RestoreInput(&input_contexts_[1], reader, input_impls_[1]));
ctx->MergeCheckpoint(input_contexts_[1].checkpoint());
}
return absl::OkStatus();
}
private:
mutex mu_;
int64_t i_ TF_GUARDED_BY(mu_);
std::vector<std::unique_ptr<IteratorBase>> input_impls_ TF_GUARDED_BY(mu_);
std::vector<IteratorContext> input_contexts_ TF_GUARDED_BY(mu_);
size_t element_count_ TF_GUARDED_BY(mu_) = 0;
};
Status MostSpecificCompatibleShape(const PartialTensorShape& ts1,
const PartialTensorShape& ts2,
PartialTensorShape* output_tensorshape) {
if (ts1.dims() != ts2.dims() || ts1.unknown_rank() || ts2.unknown_rank())
return absl::OkStatus();
auto dims1 = ts1.dim_sizes();
auto dims2 = ts2.dim_sizes();
for (int d = 0; d < ts1.dims(); d++) {
if (dims1[d] == dims2[d])
TF_RETURN_IF_ERROR(output_tensorshape->AddDimWithStatus(dims1[d]));
else
TF_RETURN_IF_ERROR(output_tensorshape->AddDimWithStatus(-1));
}
return absl::OkStatus();
}
const DatasetBase* input_;
const DatasetBase* to_concatenate_;
const int64_t input_cardinality_;
const int64_t to_concatenate_cardinality_;
std::vector<PartialTensorShape> output_shapes_;
absl::Status random_indexing_compatible_ = absl::OkStatus();
};
ConcatenateDatasetOp::ConcatenateDatasetOp(OpKernelConstruction* ctx)
: BinaryDatasetOpKernel(ctx) {}
void ConcatenateDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase* to_concatenate,
DatasetBase** output) {
OP_REQUIRES(ctx, input->output_dtypes() == to_concatenate->output_dtypes(),
errors::InvalidArgument(
"input dataset and dataset to concatenate"
" have different output_types %s and %s",
(DataTypeVectorString(input->output_dtypes()),
DataTypeVectorString(to_concatenate->output_dtypes()))));
*output = new Dataset(ctx, input, to_concatenate);
}
namespace {
REGISTER_KERNEL_BUILDER(Name("ConcatenateDataset").Device(DEVICE_CPU),
ConcatenateDatasetOp);
}
}
} | #include "tensorflow/core/kernels/data/concatenate_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "concatenate_dataset";
ConcatenateDatasetParams SameShapeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{2, 2},
{{1, 2, 3, 4}, {5, 6, 7, 8}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<int64_t>(
TensorShape{2, 2}, {{11, 12, 13, 14}, {15, 16, 17, 18}}),
"tensor_slice_1");
return ConcatenateDatasetParams(
std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64, DT_INT64},
{PartialTensorShape({2}), PartialTensorShape({2})},
kNodeName);
}
ConcatenateDatasetParams DifferentShapeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 3}, {1, 2, 3, 4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2, 2}, {7, 8, 9, 10})},
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{2, 2}, {11, 12, 13, 14}),
CreateTensor<int64_t>(TensorShape{2, 1}, {15, 16})},
"tensor_slice_1");
return ConcatenateDatasetParams(
std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64, DT_INT64},
{PartialTensorShape({-1}), PartialTensorShape({-1})},
kNodeName);
}
ConcatenateDatasetParams DifferentDtypeConcatenateDatasetParams() {
auto tensor_slice_dataset_params_0 = TensorSliceDatasetParams(
CreateTensors<int64_t>(TensorShape{2, 2}, {{1, 2, 3, 4}}),
"tensor_slice_0");
auto tensor_slice_dataset_params_1 = TensorSliceDatasetParams(
CreateTensors<double>(TensorShape{2, 2}, {{1.0, 2.0, 3.0, 4.0}}),
"tensor_slice_1");
return ConcatenateDatasetParams(std::move(tensor_slice_dataset_params_0),
std::move(tensor_slice_dataset_params_1),
{DT_INT64},
{PartialTensorShape({2})},
kNodeName);
}
class ConcatenateDatasetOpTest : public DatasetOpsTestBase {};
std::vector<GetNextTestCase<ConcatenateDatasetParams>> GetNextTestCases() {
return {{SameShapeConcatenateDatasetParams(),
CreateTensors<int64_t>(TensorShape({2}), {{1, 2},
{5, 6},
{3, 4},
{7, 8},
{11, 12},
{15, 16},
{13, 14},
{17, 18}})},
{DifferentShapeConcatenateDatasetParams(),
{CreateTensor<int64_t>(TensorShape{3}, {1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2}, {7, 8}),
CreateTensor<int64_t>(TensorShape{3}, {4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2}, {9, 10}),
CreateTensor<int64_t>(TensorShape{2}, {11, 12}),
CreateTensor<int64_t>(TensorShape{1}, {15}),
CreateTensor<int64_t>(TensorShape{2}, {13, 14}),
CreateTensor<int64_t>(TensorShape{1}, {16})}}};
}
ITERATOR_GET_NEXT_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
GetNextTestCases())
TEST_F(ConcatenateDatasetOpTest, DifferentDtypes) {
auto dataset_params = DifferentDtypeConcatenateDatasetParams();
EXPECT_EQ(Initialize(dataset_params).code(),
absl::StatusCode::kInvalidArgument);
}
TEST_F(ConcatenateDatasetOpTest, DatasetNodeName) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(ConcatenateDatasetOpTest, DatasetTypeString) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(ConcatenateDatasetOp::kDatasetType)));
}
std::vector<DatasetOutputDtypesTestCase<ConcatenateDatasetParams>>
DatasetOutputDtypesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_dtypes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_dtypes()}};
}
DATASET_OUTPUT_DTYPES_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
DatasetOutputDtypesTestCases())
std::vector<DatasetOutputShapesTestCase<ConcatenateDatasetParams>>
DatasetOutputShapesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_shapes()},
{
DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_shapes()}};
}
DATASET_OUTPUT_SHAPES_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<ConcatenateDatasetParams>>
CardinalityTestCases() {
return {{SameShapeConcatenateDatasetParams(),
4},
{DifferentShapeConcatenateDatasetParams(),
4}};
}
DATASET_CARDINALITY_TEST_P(ConcatenateDatasetOpTest, ConcatenateDatasetParams,
CardinalityTestCases())
std::vector<IteratorOutputDtypesTestCase<ConcatenateDatasetParams>>
IteratorOutputDtypesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_dtypes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_dtypes()}};
}
ITERATOR_OUTPUT_DTYPES_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorOutputDtypesTestCases())
std::vector<IteratorOutputShapesTestCase<ConcatenateDatasetParams>>
IteratorOutputShapesTestCases() {
return {{SameShapeConcatenateDatasetParams(),
SameShapeConcatenateDatasetParams().output_shapes()},
{DifferentShapeConcatenateDatasetParams(),
DifferentShapeConcatenateDatasetParams().output_shapes()}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(ConcatenateDatasetOpTest, IteratorPrefix) {
auto dataset_params = SameShapeConcatenateDatasetParams();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
ConcatenateDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<ConcatenateDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{SameShapeConcatenateDatasetParams(),
{0, 2, 5},
CreateTensors<int64_t>(TensorShape({2}), {{1, 2},
{5, 6},
{3, 4},
{7, 8},
{11, 12},
{15, 16},
{13, 14},
{17, 18}})},
{DifferentShapeConcatenateDatasetParams(),
{0, 2, 5},
{CreateTensor<int64_t>(TensorShape{3}, {1, 2, 3}),
CreateTensor<int64_t>(TensorShape{2}, {7, 8}),
CreateTensor<int64_t>(TensorShape{3}, {4, 5, 6}),
CreateTensor<int64_t>(TensorShape{2}, {9, 10}),
CreateTensor<int64_t>(TensorShape{2}, {11, 12}),
CreateTensor<int64_t>(TensorShape{1}, {15}),
CreateTensor<int64_t>(TensorShape{2}, {13, 14}),
CreateTensor<int64_t>(TensorShape{1}, {16})}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(ConcatenateDatasetOpTest,
ConcatenateDatasetParams,
IteratorSaveAndRestoreTestCases())
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/concatenate_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/concatenate_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bcacdd99-1c10-474c-bf53-07e8abaf672b | cpp | google/quiche | aes_128_gcm_decrypter | quiche/quic/core/crypto/aes_128_gcm_decrypter.cc | quiche/quic/core/crypto/aes_128_gcm_decrypter_test.cc | #include "quiche/quic/core/crypto/aes_128_gcm_decrypter.h"
#include "openssl/aead.h"
#include "openssl/tls1.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
namespace {
const size_t kKeySize = 16;
const size_t kNonceSize = 12;
}
Aes128GcmDecrypter::Aes128GcmDecrypter()
: AesBaseDecrypter(EVP_aead_aes_128_gcm, kKeySize, kAuthTagSize, kNonceSize,
true) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
Aes128GcmDecrypter::~Aes128GcmDecrypter() {}
uint32_t Aes128GcmDecrypter::cipher_id() const {
return TLS1_CK_AES_128_GCM_SHA256;
}
} | #include "quiche/quic/core/crypto/aes_128_gcm_decrypter.h"
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestGroupInfo {
size_t key_len;
size_t iv_len;
size_t pt_len;
size_t aad_len;
size_t tag_len;
};
struct TestVector {
const char* key;
const char* iv;
const char* ct;
const char* aad;
const char* tag;
const char* pt;
};
const TestGroupInfo test_group_info[] = {
{128, 96, 0, 0, 128}, {128, 96, 0, 128, 128}, {128, 96, 128, 0, 128},
{128, 96, 408, 160, 128}, {128, 96, 408, 720, 128}, {128, 96, 104, 0, 128},
};
const TestVector test_group_0[] = {
{"cf063a34d4a9a76c2c86787d3f96db71", "113b9785971864c83b01c787", "", "",
"72ac8493e3a5228b5d130a69d2510e42", ""},
{
"a49a5e26a2f8cb63d05546c2a62f5343", "907763b19b9b4ab6bd4f0281", "", "",
"a2be08210d8c470a8df6e8fbd79ec5cf",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_1[] = {
{
"d1f6af919cde85661208bdce0c27cb22", "898c6929b435017bf031c3c5", "",
"7c5faa40e636bbc91107e68010c92b9f", "ae45f11777540a2caeb128be8092468a",
nullptr
},
{"2370e320d4344208e0ff5683f243b213", "04dbb82f044d30831c441228", "",
"d43a8e5089eea0d026c03a85178b27da", "2a049c049d25aa95969b451d93c31c6e",
""},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_2[] = {
{"e98b72a9881a84ca6b76e0f43e68647a", "8b23299fde174053f3d652ba",
"5a3c1cf1985dbb8bed818036fdd5ab42", "", "23c7ab0f952b7091cd324835043b5eb5",
"28286a321293253c3e0aa2704a278032"},
{"33240636cd3236165f1a553b773e728e", "17c4d61493ecdc8f31700b12",
"47bb7e23f7bdfe05a8091ac90e4f8b2e", "", "b723c70e931d9785f40fd4ab1d612dc9",
"95695a5b12f2870b9cc5fdc8f218a97d"},
{
"5164df856f1e9cac04a79b808dc5be39", "e76925d5355e0584ce871b2b",
"0216c899c88d6e32c958c7e553daa5bc", "",
"a145319896329c96df291f64efbe0e3a",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_3[] = {
{"af57f42c60c0fc5a09adb81ab86ca1c3", "a2dc01871f37025dc0fc9a79",
"b9a535864f48ea7b6b1367914978f9bfa087d854bb0e269bed8d279d2eea1210e48947"
"338b22f9bad09093276a331e9c79c7f4",
"41dc38988945fcb44faf2ef72d0061289ef8efd8",
"4f71e72bde0018f555c5adcce062e005",
"3803a0727eeb0ade441e0ec107161ded2d425ec0d102f21f51bf2cf9947c7ec4aa7279"
"5b2f69b041596e8817d0a3c16f8fadeb"},
{"ebc753e5422b377d3cb64b58ffa41b61", "2e1821efaced9acf1f241c9b",
"069567190554e9ab2b50a4e1fbf9c147340a5025fdbd201929834eaf6532325899ccb9"
"f401823e04b05817243d2142a3589878",
"b9673412fd4f88ba0e920f46dd6438ff791d8eef",
"534d9234d2351cf30e565de47baece0b",
"39077edb35e9c5a4b1e4c2a6b9bb1fce77f00f5023af40333d6d699014c2bcf4209c18"
"353a18017f5b36bfc00b1f6dcb7ed485"},
{
"52bdbbf9cf477f187ec010589cb39d58", "d3be36d3393134951d324b31",
"700188da144fa692cf46e4a8499510a53d90903c967f7f13e8a1bd8151a74adc4fe63e"
"32b992760b3a5f99e9a47838867000a9",
"93c4fc6a4135f54d640b0c976bf755a06a292c33",
"8ca4e38aa3dfa6b1d0297021ccf3ea5f",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_4[] = {
{"da2bb7d581493d692380c77105590201", "44aa3e7856ca279d2eb020c6",
"9290d430c9e89c37f0446dbd620c9a6b34b1274aeb6f911f75867efcf95b6feda69f1a"
"f4ee16c761b3c9aeac3da03aa9889c88",
"4cd171b23bddb3a53cdf959d5c1710b481eb3785a90eb20a2345ee00d0bb7868c367ab"
"12e6f4dd1dee72af4eee1d197777d1d6499cc541f34edbf45cda6ef90b3c024f9272d7"
"2ec1909fb8fba7db88a4d6f7d3d925980f9f9f72",
"9e3ac938d3eb0cadd6f5c9e35d22ba38",
"9bbf4c1a2742f6ac80cb4e8a052e4a8f4f07c43602361355b717381edf9fabd4cb7e3a"
"d65dbd1378b196ac270588dd0621f642"},
{"d74e4958717a9d5c0e235b76a926cae8", "0b7471141e0c70b1995fd7b1",
"e701c57d2330bf066f9ff8cf3ca4343cafe4894651cd199bdaaa681ba486b4a65c5a22"
"b0f1420be29ea547d42c713bc6af66aa",
"4a42b7aae8c245c6f1598a395316e4b8484dbd6e64648d5e302021b1d3fa0a38f46e22"
"bd9c8080b863dc0016482538a8562a4bd0ba84edbe2697c76fd039527ac179ec5506cf"
"34a6039312774cedebf4961f3978b14a26509f96",
"e192c23cb036f0b31592989119eed55d",
"840d9fb95e32559fb3602e48590280a172ca36d9b49ab69510f5bd552bfab7a306f85f"
"f0a34bc305b88b804c60b90add594a17"},
{
"1986310c725ac94ecfe6422e75fc3ee7", "93ec4214fa8e6dc4e3afc775",
"b178ec72f85a311ac4168f42a4b2c23113fbea4b85f4b9dabb74e143eb1b8b0a361e02"
"43edfd365b90d5b325950df0ada058f9",
"e80b88e62c49c958b5e0b8b54f532d9ff6aa84c8a40132e93e55b59fc24e8decf28463"
"139f155d1e8ce4ee76aaeefcd245baa0fc519f83a5fb9ad9aa40c4b21126013f576c42"
"72c2cb136c8fd091cc4539877a5d1e72d607f960",
"8b347853f11d75e81e8a95010be81f17",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_5[] = {
{"387218b246c1a8257748b56980e50c94", "dd7e014198672be39f95b69d",
"cdba9e73eaf3d38eceb2b04a8d", "", "ecf90f4a47c9c626d6fb2c765d201556",
"48f5b426baca03064554cc2b30"},
{"294de463721e359863887c820524b3d4", "3338b35c9d57a5d28190e8c9",
"2f46634e74b8e4c89812ac83b9", "", "dabd506764e68b82a7e720aa18da0abe",
"46a2e55c8e264df211bd112685"},
{"28ead7fd2179e0d12aa6d5d88c58c2dc", "5055347f18b4d5add0ae5c41",
"142d8210c3fb84774cdbd0447a", "", "5fd321d9cdb01952dc85f034736c2a7d",
"3b95b981086ee73cc4d0cc1422"},
{
"7d7b6c988137b8d470c57bf674a09c87", "9edf2aa970d016ac962e1fd8",
"a85b66c3cb5eab91d5bdc8bc0e", "", "dc054efc01f3afd21d9c2484819f569a",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector* const test_group_array[] = {
test_group_0, test_group_1, test_group_2,
test_group_3, test_group_4, test_group_5,
};
}
namespace quic {
namespace test {
QuicData* DecryptWithNonce(Aes128GcmDecrypter* decrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view ciphertext) {
decrypter->SetIV(nonce);
std::unique_ptr<char[]> output(new char[ciphertext.length()]);
size_t output_length = 0;
const bool success =
decrypter->DecryptPacket(0, associated_data, ciphertext, output.get(),
&output_length, ciphertext.length());
if (!success) {
return nullptr;
}
return new QuicData(output.release(), output_length, true);
}
class Aes128GcmDecrypterTest : public QuicTest {};
TEST_F(Aes128GcmDecrypterTest, Decrypt) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(test_group_array); i++) {
SCOPED_TRACE(i);
const TestVector* test_vectors = test_group_array[i];
const TestGroupInfo& test_info = test_group_info[i];
for (size_t j = 0; test_vectors[j].key != nullptr; j++) {
bool has_pt = test_vectors[j].pt;
std::string key;
std::string iv;
std::string ct;
std::string aad;
std::string tag;
std::string pt;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].ct, &ct));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].tag, &tag));
if (has_pt) {
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].pt, &pt));
}
EXPECT_EQ(test_info.key_len, key.length() * 8);
EXPECT_EQ(test_info.iv_len, iv.length() * 8);
EXPECT_EQ(test_info.pt_len, ct.length() * 8);
EXPECT_EQ(test_info.aad_len, aad.length() * 8);
EXPECT_EQ(test_info.tag_len, tag.length() * 8);
if (has_pt) {
EXPECT_EQ(test_info.pt_len, pt.length() * 8);
}
std::string ciphertext = ct + tag;
Aes128GcmDecrypter decrypter;
ASSERT_TRUE(decrypter.SetKey(key));
std::unique_ptr<QuicData> decrypted(DecryptWithNonce(
&decrypter, iv,
aad.length() ? aad : absl::string_view(), ciphertext));
if (!decrypted) {
EXPECT_FALSE(has_pt);
continue;
}
EXPECT_TRUE(has_pt);
ASSERT_EQ(pt.length(), decrypted->length());
quiche::test::CompareCharArraysWithHexError(
"plaintext", decrypted->data(), pt.length(), pt.data(), pt.length());
}
}
}
TEST_F(Aes128GcmDecrypterTest, GenerateHeaderProtectionMask) {
Aes128GcmDecrypter decrypter;
std::string key;
std::string sample;
std::string expected_mask;
ASSERT_TRUE(absl::HexStringToBytes("d9132370cb18476ab833649cf080d970", &key));
ASSERT_TRUE(
absl::HexStringToBytes("d1d7998068517adb769b48b924a32c47", &sample));
ASSERT_TRUE(absl::HexStringToBytes("b132c37d6164da4ea4dc9b763aceec27",
&expected_mask));
QuicDataReader sample_reader(sample.data(), sample.size());
ASSERT_TRUE(decrypter.SetHeaderProtectionKey(key));
std::string mask = decrypter.GenerateHeaderProtectionMask(&sample_reader);
quiche::test::CompareCharArraysWithHexError(
"header protection mask", mask.data(), mask.size(), expected_mask.data(),
expected_mask.size());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_128_gcm_decrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_128_gcm_decrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
145ec2e0-d3a0-4ee7-9f72-6f42a78ef99f | cpp | tensorflow/tensorflow | onednn_softmax | third_party/xla/xla/service/cpu/onednn_softmax.cc | third_party/xla/xla/service/cpu/tests/onednn_softmax_test.cc | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include "xla/service/cpu/onednn_softmax.h"
#include <algorithm>
#include <cmath>
#include <initializer_list>
#include <vector>
#include "absl/base/dynamic_annotations.h"
#include "dnnl.hpp"
#include "xla/executable_run_options.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_memory_util.h"
#include "xla/service/cpu/runtime_lightweight_check.h"
#include "xla/tsl/util/onednn_threadpool.h"
#include "unsupported/Eigen/CXX11/Tensor"
namespace xla {
namespace cpu {
ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY void __xla_cpu_runtime_OneDnnSoftmax(
const void* run_options_ptr, void* input, void* result,
void* softmax_config_ptr) {
const xla::ExecutableRunOptions* run_options =
static_cast<const xla::ExecutableRunOptions*>(run_options_ptr);
XLA_LIGHTWEIGHT_CHECK(run_options != nullptr);
XLA_LIGHTWEIGHT_CHECK(run_options->intra_op_thread_pool() != nullptr);
tsl::OneDnnThreadPool thread_pool(
run_options->intra_op_thread_pool()->getPool(), false);
dnnl::engine cpu_engine(dnnl::engine::kind::cpu, 0);
#ifndef ENABLE_ONEDNN_OPENMP
auto onednn_stream = dnnl::stream(
dnnl::threadpool_interop::make_stream(cpu_engine, &thread_pool));
#else
auto onednn_stream = dnnl::stream(cpu_engine);
#endif
std::string config_str(static_cast<const char*>(softmax_config_ptr));
OneDnnSoftmaxConfig softmax_config;
softmax_config.ParseFromString(config_str);
MemrefInfo input_minfo(input);
MemrefInfo result_minfo(result);
auto src_md = input_minfo.GetOneDnnMemDesc();
auto dst_md = result_minfo.GetOneDnnMemDesc();
auto src_mem = dnnl::memory(src_md, cpu_engine, input_minfo.Data());
auto dst_mem = dnnl::memory(dst_md, cpu_engine, result_minfo.Data());
int axis = softmax_config.softmax_axis();
auto softmax_pd = dnnl::softmax_forward::primitive_desc(
cpu_engine, dnnl::prop_kind::forward_inference,
dnnl::algorithm::softmax_accurate, src_md, dst_md, axis);
auto softmax_prim = dnnl::softmax_forward(softmax_pd);
std::unordered_map<int, dnnl::memory> softmax_args;
softmax_args.insert({DNNL_ARG_SRC, src_mem});
softmax_args.insert({DNNL_ARG_DST, dst_mem});
softmax_prim.execute(onednn_stream, softmax_args);
}
}
}
#endif | #if defined(INTEL_MKL) && defined(ENABLE_ONEDNN_V3)
#include <utility>
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "xla/literal.h"
#include "xla/service/cpu/backend_config.pb.h"
#include "xla/service/cpu/onednn_config.pb.h"
#include "xla/service/cpu/onednn_ops_rewriter.h"
#include "xla/service/cpu/onednn_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
namespace xla {
namespace cpu {
std::string TestParamsToString(
const ::testing::TestParamInfo<std::tuple<PrimitiveType, int>>& data) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = data.param;
return absl::StrCat(primitive_util::LowercasePrimitiveTypeName(data_type),
"_BatchSize", std::to_string(batch_size));
}
class OneDnnSoftmaxTest
: public HloTestBase,
public ::testing::WithParamInterface<std::tuple<PrimitiveType, int>> {
protected:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_cpu_use_thunk_runtime(false);
return debug_options;
}
const char* onednn_softmax_ =
R"(
; CHECK: custom_call_target="__onednn$softmax"
)";
const std::string GetGenericSoftmaxHLORawText(PrimitiveType data_type,
int batch_size) {
const std::string softmax_hlo_template_string = R"(
HloModule softmax_module
region_max {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = $0[] parameter(0)
Arg_1 = $0[] parameter(1)
ROOT add = $0[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = $0[$1,128,30522]{2,1,0} parameter(0)
neg_inf = $0[] constant(-inf)
reduce_max = $0[$1,128]{1,0} reduce(Arg_0, neg_inf), dimensions={2}, to_apply=region_max
reshape.0 = $0[$1,128,1]{2,1,0} reshape(reduce_max)
broadcast.0 = $0[$1,128,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = $0[$1,128]{1,0} reshape(broadcast.0)
broadcast.1 = $0[$1,128,30522]{2,1,0} broadcast(reshape.1), dimensions={0,1}
subtract.0 = $0[$1,128,30522]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = $0[$1,128,30522]{2,1,0} exponential(subtract.0)
const_zero = $0[] constant(0)
reduce_add = $0[$1,128]{1,0} reduce(exponential, const_zero), dimensions={2}, to_apply=region_add
reshape.2 = $0[$1,128,1]{2,1,0} reshape(reduce_add)
broadcast.2 = $0[$1,128,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = $0[$1,128]{1,0} reshape(broadcast.2)
broadcast.3 = $0[$1,128,30522]{2,1,0} broadcast(reshape.3), dimensions={0,1}
ROOT divide = $0[$1,128,30522]{2,1,0} divide(exponential, broadcast.3)
}
)";
const std::string softmax_hlo_string = absl::Substitute(
softmax_hlo_template_string,
primitive_util::LowercasePrimitiveTypeName(data_type), batch_size);
return softmax_hlo_string;
}
void TestSoftmaxPatternMatching(std::string input_hlo_string,
int expected_softmax_axis) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(input_hlo_string));
OneDnnOpsRewriter softmax_rewrite_pass;
HloInstruction* onednn_softmax;
OneDnnSoftmaxConfig softmax_config;
TF_ASSERT_OK_AND_ASSIGN(
bool changed, this->RunHloPass(&softmax_rewrite_pass, module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(::xla::match::CustomCall(&onednn_softmax,
{"__onednn$softmax"})));
auto backend_config = onednn_softmax->backend_config<BackendConfig>();
softmax_config.CopyFrom(backend_config->onednn_softmax_config());
int axis_after_rewrite = softmax_config.softmax_axis();
EXPECT_EQ(expected_softmax_axis, axis_after_rewrite);
}
};
TEST_P(OneDnnSoftmaxTest, SoftmaxGenericTest) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = GetParam();
if (!IsSupportedType(data_type)) {
GTEST_SKIP() << "CPU does not support "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
}
const std::string softmax_hlo_string =
GetGenericSoftmaxHLORawText(data_type, batch_size);
TestSoftmaxPatternMatching(softmax_hlo_string, 2);
}
TEST_P(OneDnnSoftmaxTest, SoftmaxGenericNumericalCorrectnessTest) {
PrimitiveType data_type;
int batch_size;
std::tie(data_type, batch_size) = GetParam();
if (!IsSupportedType(data_type)) {
GTEST_SKIP() << "CPU does not support "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
}
const std::string onednn_softmax_hlo_template_string = R"(
HloModule softmax_module
ENTRY main {
Arg_0 = $0[$1,128,30522]{2,1,0} parameter(0)
ROOT custom-call = $0[$1,128,30522]{2,1,0} custom-call(Arg_0), custom_call_target="$2", backend_config={"onednn_softmax_config":{"softmax_axis":2}}
}
)";
auto onednn_softmax_hlo_string =
absl::Substitute(onednn_softmax_hlo_template_string,
primitive_util::LowercasePrimitiveTypeName(data_type),
batch_size, "__onednn$softmax");
const std::string hlo_string_ref =
GetGenericSoftmaxHLORawText(data_type, batch_size);
float atol = (data_type == F32) ? 1e-4 : 1e-2;
float rtol = (data_type == F32) ? 1e-4 : 1e-2;
EXPECT_TRUE(RunAndCompareTwoModules(onednn_softmax_hlo_string, hlo_string_ref,
ErrorSpec{atol, rtol},
false));
}
INSTANTIATE_TEST_SUITE_P(OneDnnSoftmaxTestSuite, OneDnnSoftmaxTest,
::testing::Combine(::testing::ValuesIn({F32, BF16,
F16}),
::testing::Values(1, 16)),
TestParamsToString);
TEST_F(OneDnnSoftmaxTest, SoftmaxFP32OnAxisZero) {
const std::string softmax_hlo_string = R"(
HloModule softmax_module
region_max {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = f32[3,1,1]{2,1,0} parameter(0)
neg_inf = f32[] constant(-inf)
reduce_max = f32[1,1]{1,0} reduce(Arg_0, neg_inf), dimensions={0}, to_apply=region_max
neg_inf.1 = f32[1,1]{1,0} constant({ {-inf} })
maximum = f32[1,1]{1,0} maximum(reduce_max, neg_inf.1)
reshape.0 = f32[1,1,1]{2,1,0} reshape(maximum)
broadcast.0 = f32[1,1,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = f32[1,1]{1,0} reshape(broadcast.0)
broadcast.1 = f32[3,1,1]{2,1,0} broadcast(reshape.1), dimensions={1,2}
subtract = f32[3,1,1]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = f32[3,1,1]{2,1,0} exponential(subtract)
const_zero = f32[] constant(0)
reduce_add = f32[1,1]{1,0} reduce(exponential, const_zero), dimensions={0}, to_apply=region_add
reshape.2 = f32[1,1,1]{2,1,0} reshape(reduce_add)
broadcast.2 = f32[1,1,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = f32[1,1]{1,0} reshape(broadcast.2)
broadcast.3 = f32[3,1,1]{2,1,0} broadcast(reshape.3), dimensions={1,2}
ROOT divide = f32[3,1,1]{2,1,0} divide(exponential, broadcast.3)
}
)";
TestSoftmaxPatternMatching(softmax_hlo_string, 0);
}
TEST_F(OneDnnSoftmaxTest, SoftmaxWithBF16ConvertOutputFP32Pattern) {
if (!IsSupportedType(PrimitiveType::BF16)) {
GTEST_SKIP() << "CPU does not support BF16.";
}
const std::string softmax_hlo_string = R"(
HloModule softmax_module
region_max {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(Arg_0, Arg_1)
}
region_add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
ENTRY main {
Arg_0 = f32[16,128,30522]{2,1,0} parameter(0)
neg_inf = f32[] constant(-inf)
reduce_max = f32[16,128]{1,0} reduce(Arg_0, neg_inf), dimensions={2}, to_apply=region_max
reshape.0 = f32[16,128,1]{2,1,0} reshape(reduce_max)
broadcast.0 = f32[16,128,1]{2,1,0} broadcast(reshape.0), dimensions={0,1,2}
reshape.1 = f32[16,128]{1,0} reshape(broadcast.0)
broadcast.1 = f32[16,128,30522]{2,1,0} broadcast(reshape.1), dimensions={0,1}
subtract = f32[16,128,30522]{2,1,0} subtract(Arg_0, broadcast.1)
exponential = f32[16,128,30522]{2,1,0} exponential(subtract)
const_zero = f32[] constant(0)
reduce_add = f32[16,128]{1,0} reduce(exponential, const_zero), dimensions={2}, to_apply=region_add
reshape.2 = f32[16,128,1]{2,1,0} reshape(reduce_add)
broadcast.2 = f32[16,128,1]{2,1,0} broadcast(reshape.2), dimensions={0,1,2}
reshape.3 = f32[16,128]{1,0} reshape(broadcast.2)
broadcast.3 = f32[16,128,30522]{2,1,0} broadcast(reshape.3), dimensions={0,1}
divide = f32[16,128,30522]{2,1,0} divide(exponential, broadcast.3)
ROOT convert = bf16[16,128,30522]{2,1,0} convert(divide)
}
)";
TestSoftmaxPatternMatching(softmax_hlo_string, 2);
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/onednn_softmax.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/cpu/tests/onednn_softmax_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
94d65507-653b-432e-949d-172867d6e0b4 | cpp | google/quiche | aes_256_gcm_decrypter | quiche/quic/core/crypto/aes_256_gcm_decrypter.cc | quiche/quic/core/crypto/aes_256_gcm_decrypter_test.cc | #include "quiche/quic/core/crypto/aes_256_gcm_decrypter.h"
#include "openssl/aead.h"
#include "openssl/tls1.h"
#include "quiche/quic/platform/api/quic_flag_utils.h"
#include "quiche/quic/platform/api/quic_flags.h"
namespace quic {
namespace {
const size_t kKeySize = 32;
const size_t kNonceSize = 12;
}
Aes256GcmDecrypter::Aes256GcmDecrypter()
: AesBaseDecrypter(EVP_aead_aes_256_gcm, kKeySize, kAuthTagSize, kNonceSize,
true) {
static_assert(kKeySize <= kMaxKeySize, "key size too big");
static_assert(kNonceSize <= kMaxNonceSize, "nonce size too big");
}
Aes256GcmDecrypter::~Aes256GcmDecrypter() {}
uint32_t Aes256GcmDecrypter::cipher_id() const {
return TLS1_CK_AES_256_GCM_SHA384;
}
} | #include "quiche/quic/core/crypto/aes_256_gcm_decrypter.h"
#include <memory>
#include <string>
#include "absl/base/macros.h"
#include "absl/strings/escaping.h"
#include "absl/strings/string_view.h"
#include "quiche/quic/core/quic_utils.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/common/test_tools/quiche_test_utils.h"
namespace {
struct TestGroupInfo {
size_t key_len;
size_t iv_len;
size_t pt_len;
size_t aad_len;
size_t tag_len;
};
struct TestVector {
const char* key;
const char* iv;
const char* ct;
const char* aad;
const char* tag;
const char* pt;
};
const TestGroupInfo test_group_info[] = {
{256, 96, 0, 0, 128}, {256, 96, 0, 128, 128}, {256, 96, 128, 0, 128},
{256, 96, 408, 160, 128}, {256, 96, 408, 720, 128}, {256, 96, 104, 0, 128},
};
const TestVector test_group_0[] = {
{"f5a2b27c74355872eb3ef6c5feafaa740e6ae990d9d48c3bd9bb8235e589f010",
"58d2240f580a31c1d24948e9", "", "", "15e051a5e4a5f5da6cea92e2ebee5bac",
""},
{
"e5a8123f2e2e007d4e379ba114a2fb66e6613f57c72d4e4f024964053028a831",
"51e43385bf533e168427e1ad", "", "", "38fe845c66e66bdd884c2aecafd280e6",
nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_1[] = {
{"6dfdafd6703c285c01f14fd10a6012862b2af950d4733abb403b2e745b26945d",
"3749d0b3d5bacb71be06ade6", "", "c0d249871992e70302ae008193d1e89f",
"4aa4cc69f84ee6ac16d9bfb4e05de500", ""},
{
"2c392a5eb1a9c705371beda3a901c7c61dca4d93b4291de1dd0dd15ec11ffc45",
"0723fb84a08f4ea09841f32a", "", "140be561b6171eab942c486a94d33d43",
"aa0e1c9b57975bfc91aa137231977d2c", nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_2[] = {
{"4c8ebfe1444ec1b2d503c6986659af2c94fafe945f72c1e8486a5acfedb8a0f8",
"473360e0ad24889959858995", "d2c78110ac7e8f107c0df0570bd7c90c", "",
"c26a379b6d98ef2852ead8ce83a833a7", "7789b41cb3ee548814ca0b388c10b343"},
{"3934f363fd9f771352c4c7a060682ed03c2864223a1573b3af997e2ababd60ab",
"efe2656d878c586e41c539c4", "e0de64302ac2d04048d65a87d2ad09fe", "",
"33cbd8d2fb8a3a03e30c1eb1b53c1d99", "697aff2d6b77e5ed6232770e400c1ead"},
{
"c997768e2d14e3d38259667a6649079de77beb4543589771e5068e6cd7cd0b14",
"835090aed9552dbdd45277e2", "9f6607d68e22ccf21928db0986be126e", "",
"f32617f67c574fd9f44ef76ff880ab9f", nullptr
},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_3[] = {
{
"e9d381a9c413bee66175d5586a189836e5c20f5583535ab4d3f3e612dc21700e",
"23e81571da1c7821c681c7ca",
"a25f3f580306cd5065d22a6b7e9660110af7204bb77d370f7f34bee547feeff7b32a59"
"6fce29c9040e68b1589aad48da881990",
"6f39c9ae7b8e8a58a95f0dd8ea6a9087cbccdfd6",
"5b6dcd70eefb0892fab1539298b92a4b",
nullptr
},
{"6450d4501b1e6cfbe172c4c8570363e96b496591b842661c28c2f6c908379cad",
"7e4262035e0bf3d60e91668a",
"5a99b336fd3cfd82f10fb08f7045012415f0d9a06bb92dcf59c6f0dbe62d433671aacb8a1"
"c52ce7bbf6aea372bf51e2ba79406",
"f1c522f026e4c5d43851da516a1b78768ab18171",
"fe93b01636f7bb0458041f213e98de65",
"17449e236ef5858f6d891412495ead4607bfae2a2d735182a2a0242f9d52fc5345ef912db"
"e16f3bb4576fe3bcafe336dee6085"},
{"90f2e71ccb1148979cb742efc8f921de95457d898c84ce28edeed701650d3a26",
"aba58ad60047ba553f6e4c98",
"3fc77a5fe9203d091c7916587c9763cf2e4d0d53ca20b078b851716f1dab4873fe342b7b3"
"01402f015d00263bf3f77c58a99d6",
"2abe465df6e5be47f05b92c9a93d76ae3611fac5",
"9cb3d04637048bc0bddef803ffbb56cf",
"1d21639640e11638a2769e3fab78778f84be3f4a8ce28dfd99cb2e75171e05ea8e94e30aa"
"78b54bb402b39d613616a8ed951dc"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_4[] = {
{
"e36aca93414b13f5313e76a7244588ee116551d1f34c32859166f2eb0ac1a9b7",
"e9e701b1ccef6bddd03391d8",
"5b059ac6733b6de0e8cf5b88b7301c02c993426f71bb12abf692e9deeacfac1ff1644c"
"87d4df130028f515f0feda636309a24d",
"6a08fe6e55a08f283cec4c4b37676e770f402af6102f548ad473ec6236da764f7076ff"
"d41bbd9611b439362d899682b7b0f839fc5a68d9df54afd1e2b3c4e7d072454ee27111"
"d52193d28b9c4f925d2a8b451675af39191a2cba",
"43c7c9c93cc265fc8e192000e0417b5b",
nullptr
},
{"5f72046245d3f4a0877e50a86554bfd57d1c5e073d1ed3b5451f6d0fc2a8507a",
"ea6f5b391e44b751b26bce6f",
"0e6e0b2114c40769c15958d965a14dcf50b680e0185a4409d77d894ca15b1e698dd83b353"
"6b18c05d8cd0873d1edce8150ecb5",
"9b3a68c941d42744673fb60fea49075eae77322e7e70e34502c115b6495ebfc796d629080"
"7653c6b53cd84281bd0311656d0013f44619d2748177e99e8f8347c989a7b59f9d8dcf00f"
"31db0684a4a83e037e8777bae55f799b0d",
"fdaaff86ceb937502cd9012d03585800",
"b0a881b751cc1eb0c912a4cf9bd971983707dbd2411725664503455c55db25cdb19bc669c"
"2654a3a8011de6bf7eff3f9f07834"},
{"ab639bae205547607506522bd3cdca7861369e2b42ef175ff135f6ba435d5a8e",
"5fbb63eb44bd59fee458d8f6",
"9a34c62bed0972285503a32812877187a54dedbd55d2317fed89282bf1af4ba0b6bb9f9e1"
"6dd86da3b441deb7841262bc6bd63",
"1ef2b1768b805587935ffaf754a11bd2a305076d6374f1f5098b1284444b78f55408a786d"
"a37e1b7f1401c330d3585ef56f3e4d35eaaac92e1381d636477dc4f4beaf559735e902d6b"
"e58723257d4ac1ed9bd213de387f35f3c4",
"e0299e079bff46fd12e36d1c60e41434",
"e5a3ce804a8516cdd12122c091256b789076576040dbf3c55e8be3c016025896b8a72532b"
"fd51196cc82efca47aa0fd8e2e0dc"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector test_group_5[] = {
{
"8b37c4b8cf634704920059866ad96c49e9da502c63fca4a3a7a4dcec74cb0610",
"cb59344d2b06c4ae57cd0ea4", "66ab935c93555e786b775637a3", "",
"d8733acbb564d8afaa99d7ca2e2f92a9", nullptr
},
{"a71dac1377a3bf5d7fb1b5e36bee70d2e01de2a84a1c1009ba7448f7f26131dc",
"c5b60dda3f333b1146e9da7c", "43af49ec1ae3738a20755034d6", "",
"6f80b6ef2d8830a55eb63680a8dff9e0", "5b87141335f2becac1a559e05f"},
{"dc1f64681014be221b00793bbcf5a5bc675b968eb7a3a3d5aa5978ef4fa45ecc",
"056ae9a1a69e38af603924fe", "33013a48d9ea0df2911d583271", "",
"5b8f9cc22303e979cd1524187e9f70fe", "2a7e05612191c8bce2f529dca9"},
{nullptr, nullptr, nullptr, nullptr, nullptr, nullptr}};
const TestVector* const test_group_array[] = {
test_group_0, test_group_1, test_group_2,
test_group_3, test_group_4, test_group_5,
};
}
namespace quic {
namespace test {
QuicData* DecryptWithNonce(Aes256GcmDecrypter* decrypter,
absl::string_view nonce,
absl::string_view associated_data,
absl::string_view ciphertext) {
decrypter->SetIV(nonce);
std::unique_ptr<char[]> output(new char[ciphertext.length()]);
size_t output_length = 0;
const bool success =
decrypter->DecryptPacket(0, associated_data, ciphertext, output.get(),
&output_length, ciphertext.length());
if (!success) {
return nullptr;
}
return new QuicData(output.release(), output_length, true);
}
class Aes256GcmDecrypterTest : public QuicTest {};
TEST_F(Aes256GcmDecrypterTest, Decrypt) {
for (size_t i = 0; i < ABSL_ARRAYSIZE(test_group_array); i++) {
SCOPED_TRACE(i);
const TestVector* test_vectors = test_group_array[i];
const TestGroupInfo& test_info = test_group_info[i];
for (size_t j = 0; test_vectors[j].key != nullptr; j++) {
bool has_pt = test_vectors[j].pt;
std::string key;
std::string iv;
std::string ct;
std::string aad;
std::string tag;
std::string pt;
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].key, &key));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].iv, &iv));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].ct, &ct));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].aad, &aad));
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].tag, &tag));
if (has_pt) {
ASSERT_TRUE(absl::HexStringToBytes(test_vectors[j].pt, &pt));
}
EXPECT_EQ(test_info.key_len, key.length() * 8);
EXPECT_EQ(test_info.iv_len, iv.length() * 8);
EXPECT_EQ(test_info.pt_len, ct.length() * 8);
EXPECT_EQ(test_info.aad_len, aad.length() * 8);
EXPECT_EQ(test_info.tag_len, tag.length() * 8);
if (has_pt) {
EXPECT_EQ(test_info.pt_len, pt.length() * 8);
}
std::string ciphertext = ct + tag;
Aes256GcmDecrypter decrypter;
ASSERT_TRUE(decrypter.SetKey(key));
std::unique_ptr<QuicData> decrypted(DecryptWithNonce(
&decrypter, iv,
aad.length() ? aad : absl::string_view(), ciphertext));
if (!decrypted) {
EXPECT_FALSE(has_pt);
continue;
}
EXPECT_TRUE(has_pt);
ASSERT_EQ(pt.length(), decrypted->length());
quiche::test::CompareCharArraysWithHexError(
"plaintext", decrypted->data(), pt.length(), pt.data(), pt.length());
}
}
}
TEST_F(Aes256GcmDecrypterTest, GenerateHeaderProtectionMask) {
Aes256GcmDecrypter decrypter;
std::string key;
std::string sample;
std::string expected_mask;
ASSERT_TRUE(absl::HexStringToBytes(
"ed23ecbf54d426def5c52c3dcfc84434e62e57781d3125bb21ed91b7d3e07788",
&key));
ASSERT_TRUE(
absl::HexStringToBytes("4d190c474be2b8babafb49ec4e38e810", &sample));
ASSERT_TRUE(absl::HexStringToBytes("db9ed4e6ccd033af2eae01407199c56e",
&expected_mask));
QuicDataReader sample_reader(sample.data(), sample.size());
ASSERT_TRUE(decrypter.SetHeaderProtectionKey(key));
std::string mask = decrypter.GenerateHeaderProtectionMask(&sample_reader);
quiche::test::CompareCharArraysWithHexError(
"header protection mask", mask.data(), mask.size(), expected_mask.data(),
expected_mask.size());
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_256_gcm_decrypter.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/core/crypto/aes_256_gcm_decrypter_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |