ID
stringlengths 36
36
| Language
stringclasses 1
value | Repository Name
stringclasses 13
values | File Name
stringlengths 2
44
| File Path in Repository
stringlengths 11
111
| File Path for Unit Test
stringlengths 16
116
| Code
stringlengths 0
278k
| Unit Test - (Ground Truth)
stringlengths 127
663k
| Code Url
stringlengths 91
198
| Test Code Url
stringlengths 96
203
| Commit Hash
stringclasses 13
values |
---|---|---|---|---|---|---|---|---|---|---|
a3c2a240-90d4-4864-80a2-208148d1b716 | cpp | tensorflow/tensorflow | horizontal_loop_fusion | third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion.cc | third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion_test.cc | #include "xla/service/gpu/transforms/horizontal_loop_fusion.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/sub_byte_normalization.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
PrimitiveType GetUniqueOutputTypeOfFusible(const HloInstruction& fusible) {
auto outputs = GetOutputsOfFusible(fusible);
CHECK(!outputs.empty());
PrimitiveType first_output_type = outputs[0]->shape().element_type();
for (size_t i = 1; i < outputs.size(); ++i) {
PrimitiveType cur_output_type = outputs[i]->shape().element_type();
CHECK(first_output_type == cur_output_type)
<< "Output types are expected to be unique, but see "
<< PrimitiveType_Name(first_output_type) << " and "
<< PrimitiveType_Name(cur_output_type);
}
return first_output_type;
}
class HorizontalLoopFusionImpl {
public:
explicit HorizontalLoopFusionImpl(HloComputation* computation,
absl::string_view prefix)
: computation_(computation), prefix_(prefix) {}
~HorizontalLoopFusionImpl() = default;
absl::StatusOr<bool> Run();
private:
absl::Status Fuse(absl::Span<HloInstruction*> fused_fusion_instrs,
bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates);
absl::Status CreateFusedComputation(
absl::Span<HloInstruction*> fused_fusion_instrs,
std::unique_ptr<HloComputation>* uniq_computation,
std::vector<HloInstruction*>* bound_operands, bool sliced_input_fusion);
absl::StatusOr<bool> FuseConsumerOperands(
HloInstruction* consumer, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates);
class FusionCandidates {
public:
explicit FusionCandidates(HloInstruction* consumer,
bool sliced_input_fusion)
: fusible_instrs_(),
pos_(0),
sliced_input_fusion_(sliced_input_fusion) {
Initialize(consumer);
}
absl::Span<HloInstruction*> GetNextSpanOfFusions();
private:
void Initialize(HloInstruction*);
std::vector<HloInstruction*> fusible_instrs_;
size_t pos_;
bool sliced_input_fusion_;
};
HloComputation* computation_;
std::string prefix_;
};
bool IsFusibleCandidate(const HloInstruction& instr) {
if (!instr.control_successors().empty() ||
!instr.control_predecessors().empty()) {
return false;
}
if (IsNestableVariadicReduction(instr)) {
return false;
}
if (instr.IsElementwise() && instr.operand_count() > 0) {
return true;
}
if (!instr.IsLoopFusion()) {
return false;
}
auto outputs = GetOutputsOfFusible(instr);
CHECK(!outputs.empty());
const HloInstruction* first_output = outputs[0];
for (size_t i = 1; i < outputs.size(); ++i) {
if (first_output->shape().element_type() !=
outputs[i]->shape().element_type()) {
return false;
}
}
return true;
}
bool IsProfitableFusionCandidate(const HloInstruction& instr,
bool sliced_input_fusion) {
const int64_t kShapeThreshold =
sliced_input_fusion ? 128 * 2048 : 8192 * 8192;
const int64_t kInstrCountThreshold = sliced_input_fusion ? 30 : 128;
const HloInstruction* root = (instr.opcode() == HloOpcode::kFusion)
? instr.fused_expression_root()
: &instr;
if (root->opcode() == HloOpcode::kTuple) {
Shape shape = root->operand(0)->shape();
if (ShapeUtil::ElementsIn(shape) > kShapeThreshold) {
VLOG(2) << "Profitable check failed due to element count with "
"sliced_input_fusion="
<< sliced_input_fusion;
return false;
}
} else {
Shape shape = root->shape();
if (ShapeUtil::ElementsIn(shape) > kShapeThreshold) {
VLOG(2) << "Profiltable check failed due to element size with "
"sliced_input_fusion="
<< sliced_input_fusion;
return false;
}
}
if (instr.opcode() == HloOpcode::kFusion &&
instr.fused_instruction_count() > kInstrCountThreshold) {
return false;
}
return true;
}
bool HasOnlyRowMajorLayout(const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kFusion) {
return LayoutUtil::IsMonotonicWithDim0Major(instr.shape().layout());
}
auto fused_instrs = instr.fused_instructions_computation()->instructions();
for (HloInstruction* i : fused_instrs) {
if (!LayoutUtil::IsDenseArray(i->shape())) {
continue;
}
if (!LayoutUtil::IsMonotonicWithDim0Major(i->shape().layout())) {
return false;
}
}
return true;
}
bool AnyOpndIsParamSharedAmongFusions(
const HloInstruction* instr,
const absl::flat_hash_set<HloInstruction*>& fusion_instrs) {
return absl::c_any_of(instr->operands(), [&](const HloInstruction* opnd) {
return opnd->opcode() == HloOpcode::kParameter &&
absl::c_any_of(opnd->users(), [&](const HloInstruction* user) {
return user != instr && fusion_instrs.contains(user);
});
});
}
void HorizontalLoopFusionImpl::FusionCandidates::Initialize(
HloInstruction* consumer) {
absl::flat_hash_set<HloInstruction*> fusible_candidates;
std::vector<HloInstruction*> ordered_fusible_candidates;
for (HloInstruction* opnd : consumer->operands()) {
HloInstruction* predecessor = opnd->LatestNonGteAncestor();
if (IsFusibleCandidate(*predecessor)) {
if (fusible_candidates.insert(predecessor).second) {
ordered_fusible_candidates.push_back(predecessor);
}
}
}
for (HloInstruction* instr : ordered_fusible_candidates) {
if (!IsConsumerTheOnlyNonRootUser(*instr, *consumer)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects maybe illegal instr " << instr->ToString()
<< "; including it may create cycles in HLO.";
continue;
} else if (!IsProfitableFusionCandidate(*instr, sliced_input_fusion_)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects may-not-be profitable fusion instr"
<< instr->ToString();
continue;
} else if (!HasOnlyRowMajorLayout(*instr)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects non-row-major fusion instr " << instr->ToString();
continue;
} else if (AnyOpndIsParamSharedAmongFusions(instr, fusible_candidates)) {
VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_
<< " rejects the fusion instr because it shares parameter with"
<< " other fusion candidates, instr: " << instr->ToString();
continue;
} else {
VLOG(2) << "Find a fusion candidate " << instr->ToString();
fusible_instrs_.push_back(instr);
}
}
std::stable_sort(
fusible_instrs_.begin(), fusible_instrs_.end(),
[&](const HloInstruction* a, const HloInstruction* b) {
if (GetUniqueOutputTypeOfFusible(*a) !=
GetUniqueOutputTypeOfFusible(*b)) {
return GetUniqueOutputTypeOfFusible(*a) <
GetUniqueOutputTypeOfFusible(*b);
} else if (GetOutputSizeOfFusible(*a) != GetOutputSizeOfFusible(*b)) {
return GetOutputSizeOfFusible(*a) < GetOutputSizeOfFusible(*b);
} else if (GetInstrCountOfFusible(*a) != GetInstrCountOfFusible(*b)) {
return GetInstrCountOfFusible(*a) < GetInstrCountOfFusible(*b);
} else {
return ShapeUtil::ElementsIn(GetOutputsOfFusible(*a)[0]->shape()) <
ShapeUtil::ElementsIn(GetOutputsOfFusible(*b)[0]->shape());
}
});
}
absl::Span<HloInstruction*>
HorizontalLoopFusionImpl::FusionCandidates::GetNextSpanOfFusions() {
if (pos_ >= fusible_instrs_.size()) {
return absl::Span<HloInstruction*>();
}
const auto kMaxFusionBatchSize = [&]() -> int64_t {
if (sliced_input_fusion_) {
return 32;
} else {
if (fusible_instrs_[pos_]->opcode() == HloOpcode::kFusion) {
return 32;
} else {
return 64;
}
}
}();
size_t left = pos_;
size_t right = pos_ + 1;
size_t first_output_size = GetOutputSizeOfFusible(*fusible_instrs_[left]);
PrimitiveType first_output_type =
GetUniqueOutputTypeOfFusible(*fusible_instrs_[left]);
constexpr int64_t kMaxCudaParamSize = 4000;
size_t accum_io_size = 0;
size_t accum_num_outputs = 0;
for (; right < fusible_instrs_.size(); ++right) {
PrimitiveType cur_output_type =
GetUniqueOutputTypeOfFusible(*fusible_instrs_[right]);
if (first_output_type != cur_output_type) {
break;
}
if (first_output_size != GetOutputSizeOfFusible(*fusible_instrs_[right])) {
break;
}
if (GetInstrCountOfFusible(*fusible_instrs_[left]) !=
GetInstrCountOfFusible(*fusible_instrs_[right])) {
break;
}
if (!sliced_input_fusion_ &&
!ShapeUtil::EqualIgnoringElementType(
GetOutputsOfFusible(*fusible_instrs_[left])[0]->shape(),
GetOutputsOfFusible(*fusible_instrs_[right])[0]->shape())) {
break;
}
size_t num_outputs = GetOutputSizeOfFusible(*fusible_instrs_[right]);
accum_num_outputs += num_outputs;
if (accum_num_outputs >= kMaxFusionBatchSize) {
break;
}
accum_io_size += fusible_instrs_.at(right)->operand_count() + num_outputs;
if (accum_io_size * 8 >= kMaxCudaParamSize) {
break;
}
}
VLOG(2) << "horizontal fuse get instruction span with " << (right - left)
<< " instructions for sliced_input_fusion=" << sliced_input_fusion_
<< " fusion";
pos_ = right;
return absl::MakeSpan(fusible_instrs_).subspan(left, right - left);
}
absl::StatusOr<bool> HorizontalLoopFusionImpl::FuseConsumerOperands(
HloInstruction* consumer, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates) {
bool changed = false;
FusionCandidates loop_fusion_candidates(consumer, sliced_input_fusion);
while (true) {
auto fusibles = loop_fusion_candidates.GetNextSpanOfFusions();
if (fusibles.empty()) {
break;
} else if (fusibles.size() == 1) {
continue;
}
changed = true;
std::vector<HloInstruction*> fusion_instrs;
for (HloInstruction* instr : fusibles) {
if (instr->opcode() == HloOpcode::kFusion) {
fusion_instrs.push_back(instr);
} else {
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion_instr,
MakeFusionInstruction(instr, HloInstruction::FusionKind::kLoop));
fusion_instrs.push_back(fusion_instr);
}
}
TF_RETURN_IF_ERROR(Fuse(absl::MakeSpan(fusion_instrs), sliced_input_fusion,
to_fuse_candidates));
}
return changed;
}
absl::Status HorizontalLoopFusionImpl::CreateFusedComputation(
absl::Span<HloInstruction*> fused_fusion_instrs,
std::unique_ptr<HloComputation>* uniq_computation,
std::vector<HloInstruction*>* bound_operands, bool sliced_input_fusion) {
HloComputation::Builder b(prefix_ + "horizontally_fused_computation");
size_t fused_comp_param_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto old_params = fused_fusion_instrs[i]->fused_parameters();
for (size_t j = 0; j < old_params.size(); ++j) {
HloInstruction* bound_opnd = fused_fusion_instrs[i]->mutable_operand(j);
b.AddInstruction(HloInstruction::CreateParameter(
fused_comp_param_id++, bound_opnd->shape(),
absl::StrCat("param_", i, "_", j)));
bound_operands->push_back(bound_opnd);
}
}
HloInstruction* dummy_root = b.AddInstruction(
HloInstruction::CreateTuple(std::vector<HloInstruction*>{}));
*uniq_computation = b.Build(dummy_root);
HloComputation* comp = uniq_computation->get();
absl::flat_hash_map<const HloInstruction*, HloInstruction*> clone_map;
size_t new_param_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto old_params = fused_fusion_instrs[i]->fused_parameters();
for (size_t j = 0; j < old_params.size(); ++j) {
HloInstruction* old_param = old_params[j];
HloInstruction* new_param = comp->parameter_instruction(new_param_id++);
clone_map.insert({old_param, new_param});
}
}
const OpMetadata* metadata = nullptr;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
auto def_to_use_order = fused_fusion_instrs[i]
->fused_instructions_computation()
->MakeInstructionPostOrder();
for (HloInstruction* old_instr : def_to_use_order) {
if (old_instr->opcode() == HloOpcode::kParameter ||
(sliced_input_fusion && old_instr->opcode() == HloOpcode::kTuple &&
old_instr == fused_fusion_instrs[i]->fused_expression_root())) {
continue;
}
std::vector<HloInstruction*> new_opnds;
const auto& old_opnds = old_instr->operands();
new_opnds.reserve(old_opnds.size());
for (HloInstruction* old_opnd : old_opnds) {
CHECK(clone_map.find(old_opnd) != clone_map.end());
new_opnds.push_back(clone_map[old_opnd]);
}
HloInstruction* new_instr = comp->AddInstruction(
old_instr->CloneWithNewOperands(old_instr->shape(), new_opnds));
clone_map.insert({old_instr, new_instr});
metadata = &old_instr->metadata();
}
}
size_t fused_instr_output_size =
GetOutputSizeOfFusible(*fused_fusion_instrs[0]);
if (sliced_input_fusion) {
std::vector<HloInstruction*> concated_outputs;
for (size_t i = 0; i < fused_instr_output_size; ++i) {
std::vector<HloInstruction*> instr_outputs(fused_fusion_instrs.size());
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
HloInstruction* new_output = clone_map[old_output];
if (new_output->shape().dimensions_size() == 1) {
instr_outputs[j] = new_output;
} else {
Shape new_shape = ShapeUtil::MakeShapeWithDenseLayout(
new_output->shape().element_type(),
{ShapeUtil::ElementsIn(new_output->shape())},
std::vector<int64_t>(1, 0));
TF_ASSIGN_OR_RETURN(instr_outputs[j],
MakeReshapeHlo(new_shape, new_output));
}
}
TF_ASSIGN_OR_RETURN(HloInstruction * concated_output,
MakeConcatHlo(instr_outputs, 0));
concated_outputs.push_back(concated_output);
}
std::vector<HloInstruction*> output_slices(concated_outputs.size() *
fused_fusion_instrs.size());
for (size_t i = 0; i < concated_outputs.size(); ++i) {
HloInstruction* concated_output = concated_outputs[i];
int64_t slice_start = 0;
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
Shape shape = old_output->shape();
int64_t slice_limit = slice_start + ShapeUtil::ElementsIn(shape);
TF_ASSIGN_OR_RETURN(
output_slices[concated_outputs.size() * j + i],
MakeSliceHlo(concated_output, {slice_start}, {slice_limit},
{1}));
slice_start = slice_limit;
}
}
HloInstruction* tuple = comp->AddInstruction(
HloInstruction::CreateTuple(output_slices), metadata);
comp->set_root_instruction(tuple, true);
TF_RETURN_IF_ERROR(comp->RemoveInstruction(dummy_root));
} else {
std::vector<HloInstruction*> tuple_operands(fused_instr_output_size *
fused_fusion_instrs.size());
for (size_t i = 0; i < fused_instr_output_size; ++i) {
for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) {
const HloInstruction* old_output =
GetOutputsOfFusible(*fused_fusion_instrs[j])[i];
HloInstruction* new_output = clone_map[old_output];
tuple_operands[fused_instr_output_size * j + i] = new_output;
}
}
HloInstruction* tuple =
comp->AddInstruction(HloInstruction::CreateTuple(tuple_operands));
comp->set_root_instruction(tuple, true);
TF_RETURN_IF_ERROR(comp->RemoveInstruction(dummy_root));
}
return absl::OkStatus();
}
absl::Status HorizontalLoopFusionImpl::Fuse(
absl::Span<HloInstruction*> fused_fusion_instrs, bool sliced_input_fusion,
std::vector<HloInstruction*>& to_fuse_candidates) {
std::unique_ptr<HloComputation> uniq_computation;
std::vector<HloInstruction*> bound_operands;
TF_RETURN_IF_ERROR(CreateFusedComputation(fused_fusion_instrs,
&uniq_computation, &bound_operands,
sliced_input_fusion));
HloComputation* fused_comp = computation_->parent()->AddEmbeddedComputation(
std::move(uniq_computation));
HloInstruction* hori_fusion_instr = computation_->AddInstruction(
HloInstruction::CreateFusion(fused_comp->root_instruction()->shape(),
sliced_input_fusion
? HloInstruction::FusionKind::kInput
: HloInstruction::FusionKind::kLoop,
bound_operands, fused_comp, prefix_),
&fused_comp->root_instruction()->metadata());
fused_comp->SetFusionInstruction(hori_fusion_instr);
to_fuse_candidates.push_back(hori_fusion_instr);
size_t total_output_id = 0;
for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) {
std::vector<HloInstruction*> bitcasts_or_gte;
HloInstruction* fused_instr = fused_fusion_instrs[i];
size_t num_outputs = GetOutputSizeOfFusible(*fused_instr);
for (size_t j = 0; j < num_outputs; ++j) {
const HloInstruction* output = GetOutputsOfFusible(*fused_instr)[j];
TF_ASSIGN_OR_RETURN(
HloInstruction * gep,
MakeGetTupleElementHlo(hori_fusion_instr, total_output_id++));
if (output->shape().dimensions_size() == 1) {
bitcasts_or_gte.push_back(gep);
} else {
bitcasts_or_gte.push_back(computation_->AddInstruction(
HloInstruction::CreateBitcast(output->shape(), gep)));
}
}
HloInstruction* bitcast_or_tuple =
(bitcasts_or_gte.size() == 1)
? bitcasts_or_gte.at(0)
: computation_->AddInstruction(
HloInstruction::CreateTuple(bitcasts_or_gte));
HloComputation* old_computation =
fused_instr->fused_instructions_computation();
HloModule* module = old_computation->parent();
TF_RETURN_IF_ERROR(
computation_->ReplaceInstruction(fused_instr, bitcast_or_tuple));
TF_RETURN_IF_ERROR(module->RemoveEmbeddedComputation(old_computation));
}
TF_RETURN_IF_ERROR(Cast<HloFusionInstruction>(hori_fusion_instr)
->DeduplicateFusionOperands());
VLOG(1) << "Fused " << fused_fusion_instrs.size()
<< " instructions into: " << hori_fusion_instr->ToString();
return absl::OkStatus();
}
absl::StatusOr<bool> HorizontalLoopFusionImpl::Run() {
bool changed = false;
XLA_VLOG_LINES(3, computation_->ToString());
std::vector<HloInstruction*> to_fuse_candidates =
computation_->MakeInstructionPostOrder();
while (!to_fuse_candidates.empty()) {
HloInstruction* consumer = to_fuse_candidates.back();
to_fuse_candidates.pop_back();
if (consumer->IsDead()) {
continue;
}
TF_ASSIGN_OR_RETURN(
bool loop_fusion_changed,
FuseConsumerOperands(consumer, false, to_fuse_candidates));
TF_ASSIGN_OR_RETURN(
bool sliced_input_fusion_changed,
FuseConsumerOperands(consumer, true, to_fuse_candidates));
changed = changed || loop_fusion_changed || sliced_input_fusion_changed;
}
return changed;
}
}
absl::StatusOr<bool> HorizontalLoopFusion::RunOnComputation(
HloComputation* computation) {
HorizontalLoopFusionImpl horizontal_fusion_impl(computation, prefix_);
return horizontal_fusion_impl.Run();
}
absl::StatusOr<bool> HorizontalLoopFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Run horizontal fusion.";
TF_ASSIGN_OR_RETURN(bool changed,
RunOnComputation(module->entry_computation()));
if (changed) {
TF_ASSIGN_OR_RETURN(
[[maybe_unused]] bool unused,
SubByteNormalization{SubByteNormalization::SET_ELEMENT_SIZE}.Run(
module));
}
return changed;
}
}
} | #include "xla/service/gpu/transforms/horizontal_loop_fusion.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/pass/hlo_pass_fix.h"
#include "xla/hlo/pass/hlo_pass_pipeline.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/transforms/instruction_fusion.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class HorizontalLoopFusionTest : public HloTestBase {
public:
static bool IsFusion(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kFusion;
}
};
TEST_F(HorizontalLoopFusionTest, BasicTest) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
ROOT mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
fusion.1 = f16[1024]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[123]{0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[1024]{0}, f16[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(m::Fusion()))));
ASSERT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Slice(m::Concatenate()),
m::Slice(m::Concatenate()))));
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForCycle) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForCycle
fused_computation.1 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT mul.1 = f16[123]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
fusion.1 = f16[123]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
add.2 = f16[123]{0} add(fusion.1, arg.4)
fusion.2 = f16[123]{0}
fusion(add.2, arg.3), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[123]{0}, f16[123]{0}, f16[123]{0})
tuple(fusion.1, fusion.2, add.2)
}
)")
.value();
EXPECT_FALSE(HorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForIncompatibleTypes) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForIncompatibleTypes
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
ROOT mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = s32[123]{0} parameter(0)
arg.2 = s32[123]{0} parameter(1)
ROOT add.1 = s32[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = s32[123]{0} parameter(2)
arg.4 = s32[123]{0} parameter(3)
fusion.1 = f16[1024]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = s32[123]{0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[1024]{0}, s32[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(HorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, FusingIntoKLoopAndKInputTogether) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule FusingIntoKLoopAndKInputTogether
fused_computation.1 {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[129,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[129,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.3 {
arg.1 = f16[130, 2048]{1, 0} parameter(0)
arg.2 = f16[130, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[130,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.4 {
arg.1 = f16[130, 2048]{1, 0} parameter(0)
arg.2 = f16[130, 2048]{1, 0} parameter(1)
ROOT mul.1 = f16[130,2048]{1, 0} multiply(arg.1, arg.2)
}
fused_computation.5 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
fused_computation.6 {
arg.1 = f16[128]{0} parameter(0)
arg.2 = f16[128]{0} parameter(1)
ROOT add.1 = f16[128]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[129, 2048]{1, 0} parameter(0)
arg.2 = f16[129, 2048]{1, 0} parameter(1)
arg.3 = f16[129, 2048]{1, 0} parameter(2)
arg.4 = f16[129, 2048]{1, 0} parameter(3)
arg.5 = f16[130, 2048]{1, 0} parameter(4)
arg.6 = f16[130, 2048]{1, 0} parameter(5)
arg.7 = f16[130, 2048]{1, 0} parameter(6)
arg.8 = f16[130, 2048]{1, 0} parameter(7)
arg.9 = f16[123]{0} parameter(8)
arg.10 = f16[123]{0} parameter(9)
arg.11 = f16[128]{0} parameter(10)
arg.12 = f16[128]{0} parameter(11)
fusion.1 = f16[129,2048]{1, 0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[129,2048]{1, 0}
fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2
fusion.3 = f16[130,2048]{1, 0}
fusion(arg.5, arg.6), kind=kLoop, calls=fused_computation.3
fusion.4 = f16[130,2048]{1, 0}
fusion(arg.7, arg.8), kind=kLoop, calls=fused_computation.4
fusion.5 = f16[123]{0}
fusion(arg.9, arg.10), kind=kLoop, calls=fused_computation.5
fusion.6 = f16[128]{0}
fusion(arg.11, arg.12), kind=kLoop, calls=fused_computation.6
ROOT tuple.1 = (f16[129,2048]{1, 0}, f16[129,2048]{1, 0},
f16[130,2048]{1, 0}, f16[130,2048]{1, 0},
f16[123]{0}, f16[128]{0})
tuple(fusion.1, fusion.2, fusion.3, fusion.4, fusion.5, fusion.6)
}
)")
.value();
EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value());
int input_fusion_count = 0;
int loop_fusion_count = 0;
for (auto inst : module->entry_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kFusion) {
input_fusion_count +=
(inst->fusion_kind() == HloInstruction::FusionKind::kInput) ? 1 : 0;
loop_fusion_count +=
(inst->fusion_kind() == HloInstruction::FusionKind::kLoop) ? 1 : 0;
}
}
EXPECT_EQ(input_fusion_count, 1);
EXPECT_EQ(loop_fusion_count, 2);
}
TEST_F(HorizontalLoopFusionTest, HorizontalLoopFusionAfterVerticalFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule MergeSharedFusionInstruction
ENTRY MergeSharedFusionInstruction.Computation0 {
param.1.1 = f32[4,1024]{1,0} parameter(0)
param.1.2 = f32[4,1024]{1,0} parameter(1)
param.1.3 = f32[4,1024]{1,0} parameter(2)
param.2.1 = f32[321,5]{1,0} parameter(3)
param.2.2 = f32[321,5]{1,0} parameter(4)
param.2.3 = f32[321,5]{1,0} parameter(5)
const.1 = f32[] constant(3)
const.2 = f32[] constant(3)
broadcast.1 = f32[4,1024]{1,0} broadcast(const.1), dimensions={}
broadcast.2 = f32[321,5]{1,0} broadcast(const.2), dimensions={}
mul.1.1 = f32[4,1024]{1,0} multiply(param.1.1, param.1.2)
mul.1.2 = f32[4,1024]{1,0} multiply(param.1.3, broadcast.1)
add.1 = f32[4,1024]{1,0} add(mul.1.1, mul.1.2)
mul.2.1 = f32[321,5]{1,0} multiply(param.2.1, param.2.2)
mul.2.2 = f32[321,5]{1,0} multiply(param.2.3, broadcast.2)
add.2 = f32[321,5]{1,0} add(mul.2.1, mul.2.2)
ROOT tuple = (f32[4,1024]{1,0}, f32[321,5]{1,0}) tuple(add.1, add.2)
})")
.value();
HloPassPipeline fusion("fusion");
const se::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
fusion.AddPass<xla::gpu::GpuInstructionFusion>(false,
device_info);
fusion.AddPass<xla::gpu::GpuInstructionFusion>(true,
device_info);
EXPECT_TRUE(fusion.Run(module.get()).value());
EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_instr = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(
m::Bitcast(m::GetTupleElement(m::Fusion(&fusion_instr))),
m::Bitcast(m::GetTupleElement(m::Fusion())))));
ASSERT_TRUE(fusion_instr->IsMultiOutputFusion());
EXPECT_THAT(fusion_instr->fused_expression_root(),
GmockMatch(m::Tuple(
m::Slice(m::Concatenate(m::Reshape(), m::Reshape())),
m::Slice(m::Concatenate(m::Reshape(), m::Reshape())))));
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, GradientDescentOptimizerLike) {
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> var_outs;
for (int64_t i = 0; i < 128; ++i) {
Shape shape = ShapeUtil::MakeShape(F32, {i + 1, 1024});
HloInstruction* param_var_in = builder.AddInstruction(
HloInstruction::CreateParameter(i * 3 + 0, shape, "var.in"));
HloInstruction* param_alpha =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 3 + 1, ShapeUtil::MakeShape(F32, {}), "alpha"));
HloInstruction* param_delta = builder.AddInstruction(
HloInstruction::CreateParameter(i * 3 + 2, shape, "delta"));
HloInstruction* alpha_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, param_alpha, {}));
HloInstruction* alpha_delta =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, alpha_broadcasted, param_delta));
HloInstruction* var_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, param_var_in, alpha_delta));
var_outs.push_back(var_out);
}
builder.AddInstruction(HloInstruction::CreateTuple(var_outs));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, FusingDifferentOutputs) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule HeterogeneousMultiOutputFusions
fused_computation.1 {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[1024]{0} parameter(2)
arg.4 = f16[1024]{0} parameter(3)
mul.1 = f16[1024]{0} multiply(arg.1, arg.2)
mul.2 = f16[1024]{0} multiply(arg.3, arg.4)
add.1 = f16[1024]{0} add(mul.1, mul.2)
ROOT tuple.1 = (f16[1024]{0}, f16[1024]{0}) tuple(add.1, mul.1)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
arg.4 = f16[123]{0} parameter(3)
add.1 = f16[123]{0} add(arg.1, arg.2)
add.2 = f16[123]{0} add(arg.3, arg.4)
mul.1 = f16[123]{0} multiply(add.1, add.2)
ROOT tuple.1 = (f16[123]{0}, f16[123]{0}) tuple(mul.1, add.1)
}
ENTRY entry_computation {
arg.1 = f16[1024]{0} parameter(0)
arg.2 = f16[1024]{0} parameter(1)
arg.3 = f16[1024]{0} parameter(2)
arg.4 = f16[1024]{0} parameter(3)
arg.5 = f16[123]{0} parameter(4)
arg.6 = f16[123]{0} parameter(5)
arg.7 = f16[123]{0} parameter(6)
arg.8 = f16[123]{0} parameter(7)
fusion.1 = (f16[1024]{0}, f16[1024]{0})
fusion(arg.1, arg.2, arg.3, arg.4),
kind=kLoop, calls=fused_computation.1
fusion.2 = (f16[123]{0}, f16[123]{0})
fusion(arg.5, arg.6, arg.7, arg.8),
kind=kLoop, calls=fused_computation.2
gte.1 = f16[1024]{0} get-tuple-element(fusion.1), index=0
gte.2 = f16[1024]{0} get-tuple-element(fusion.1), index=1
gte.3 = f16[123]{0} get-tuple-element(fusion.2), index=0
gte.4 = f16[123]{0} get-tuple-element(fusion.2), index=1
ROOT tuple.1 = (f16[1024]{0}, f16[1024]{0}, f16[123]{0}, f16[123]{0})
tuple(gte.1, gte.2, gte.3, gte.4)
}
)")
.value();
EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, RMSPropLike) {
HloComputation::Builder builder(TestName());
std::vector<HloInstruction*> all_outputs;
for (int64_t i = 0; i < 48; ++i) {
Shape shape = ShapeUtil::MakeShape(F32, {2, 1024 + i});
HloInstruction* grad = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 0, shape, "grad"));
HloInstruction* ms = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 1, shape, "ms"));
HloInstruction* rho =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 2, ShapeUtil::MakeShape(F32, {}), "rho"));
HloInstruction* one_minus_rho =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 3, ShapeUtil::MakeShape(F32, {}), "one_minus_rho"));
HloInstruction* rho_broadcasted =
builder.AddInstruction(HloInstruction::CreateBroadcast(shape, rho, {}));
HloInstruction* one_mins_rho_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, one_minus_rho, {}));
HloInstruction* grad_squared = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, grad, grad));
HloInstruction* ms_1st_term = builder.AddInstruction(
HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, grad_squared,
one_mins_rho_broadcasted));
HloInstruction* ms_2nd_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, ms, rho_broadcasted));
HloInstruction* ms_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, ms_1st_term, ms_2nd_term));
HloInstruction* momentum = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 4, shape, "momemtum"));
HloInstruction* mom = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 5, shape, "mom"));
HloInstruction* lr = builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 6, ShapeUtil::MakeShape(F32, {}), "lr"));
HloInstruction* epsilon =
builder.AddInstruction(HloInstruction::CreateParameter(
i * 9 + 7, ShapeUtil::MakeShape(F32, {}), "epsilon"));
HloInstruction* lr_broadcasted =
builder.AddInstruction(HloInstruction::CreateBroadcast(shape, lr, {}));
HloInstruction* epsilon_broadcasted = builder.AddInstruction(
HloInstruction::CreateBroadcast(shape, epsilon, {}));
HloInstruction* mom_1st_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, momentum, mom));
HloInstruction* ms_eps =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, ms_out, epsilon_broadcasted));
HloInstruction* ms_eps_rsq = builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kRsqrt, ms_eps));
HloInstruction* grad_ms_eps_rsq =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, grad, ms_eps_rsq));
HloInstruction* mom_2nd_term =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, lr_broadcasted, grad_ms_eps_rsq));
HloInstruction* mom_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, mom_1st_term, mom_2nd_term));
HloInstruction* var = builder.AddInstruction(
HloInstruction::CreateParameter(i * 9 + 8, shape, "var"));
HloInstruction* var_out =
builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, var, mom_out));
all_outputs.push_back(ms_out);
all_outputs.push_back(mom_out);
all_outputs.push_back(var_out);
}
builder.AddInstruction(HloInstruction::CreateTuple(all_outputs));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1.0e-5, 1.0e-5}));
}
TEST_F(HorizontalLoopFusionTest, DynamicUpdateSlice) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NegativeTestForDynamicUpdateSlice
fusion.1 {
p.0 = f16[5,9,10]{2,1,0} parameter(0)
p.1 = s32[] parameter(1)
p.2 = f16[1,9,10]{2,1,0} parameter(2)
c.0 = s32[] constant(0)
ROOT %dynamic-update-slice =
f16[5,9,10]{2,1,0} dynamic-update-slice(p.0, p.2, p.1, c.0, c.0)
}
fusion.2 {
p.0 = f16[5,9,10]{2,1,0} parameter(0)
p.1 = s32[] parameter(1)
p.2 = f16[1,9,10]{2,1,0} parameter(2)
c.0 = s32[] constant(0)
ROOT %dynamic-update-slice =
f16[5,9,10]{2,1,0} dynamic-update-slice(p.0, p.2, p.1, c.0, c.0)
}
ENTRY entry {
p.00 = f16[5,9,10]{2,1,0} parameter(0)
p.01 = f16[5,9,10]{2,1,0} parameter(1)
p.10 = s32[] parameter(2)
p.11 = s32[] parameter(3)
p.20 = f16[1,9,10]{2,1,0} parameter(4)
p.21 = f16[1,9,10]{2,1,0} parameter(5)
f1 = f16[5,9,10] fusion(p.00, p.10, p.20), kind=kLoop, calls=fusion.1
f2 = f16[5,9,10] fusion(p.01, p.11, p.21), kind=kLoop, calls=fusion.2
ROOT tuple = (f16[5,9,10],f16[5,9,10]) tuple(f1, f2)
})")
.value();
EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value());
TF_ASSERT_OK(verifier().Run(module.get()).status());
EXPECT_FALSE(HloDCE().Run(module.get()).value());
VLOG(2) << "Dump after horizontal fusion:";
VLOG(2) << module->ToString();
EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0}));
}
TEST_F(HorizontalLoopFusionTest, NegativeTestForSharedParam) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule BasicTest
fused_computation.1 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT mul.1 = f16[123]{0} multiply(arg.1, arg.2)
}
fused_computation.2 {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
ROOT add.1 = f16[123]{0} add(arg.1, arg.2)
}
ENTRY entry_computation {
arg.1 = f16[123]{0} parameter(0)
arg.2 = f16[123]{0} parameter(1)
arg.3 = f16[123]{0} parameter(2)
fusion.1 = f16[123]{0}
fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1
fusion.2 = f16[123]{0}
fusion(arg.3, arg.2), kind=kLoop, calls=fused_computation.2
ROOT tuple.1 = (f16[123]{0}, f16[123]{0})
tuple(fusion.1, fusion.2)
}
)")
.value();
EXPECT_FALSE(HorizontalLoopFusion().Run(module.get()).value());
}
TEST_F(HorizontalLoopFusionTest, IterativeHorizontalFusion) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule NonfusionInstrs
fused_computation.0 {
arg.0 = f16[] parameter(0)
arg.1 = f16[123]{0} parameter(1)
broadcast.0 = f16[123]{0} broadcast(arg.0), dimensions={}
ROOT mul.1 = f16[123]{0} multiply(broadcast.0, arg.1)
}
fused_computation.1 {
arg.0 = f16[] parameter(0)
arg.1 = f16[456]{0} parameter(1)
broadcast.0 = f16[456]{0} broadcast(arg.0), dimensions={}
ROOT add.1 = f16[456]{0} add(broadcast.0, arg.1)
}
ENTRY entry_computation {
arg.0 = f16[] parameter(0)
arg.1 = f16[] parameter(1)
arg.2 = f16[123]{0} parameter(2)
arg.3 = f16[456]{0} parameter(3)
sqrt.0 = f16[] sqrt(arg.0)
sqrt.1 = f16[] sqrt(arg.1)
fusion.0 = f16[123]{0}
fusion(sqrt.0, arg.2), kind=kLoop, calls=fused_computation.0
fusion.1 = f16[456]{0}
fusion(sqrt.1, arg.3), kind=kLoop, calls=fused_computation.1
ROOT tuple.1 = (f16[123]{0}, f16[456]{0}) tuple(fusion.0, fusion.1)
}
)")
.value();
HloPassFix<HloPassPipeline> iterative_h_fusion("iterative_h_fusion");
iterative_h_fusion.AddPass<HorizontalLoopFusion>();
iterative_h_fusion.AddPass<HloDCE>();
EXPECT_TRUE(iterative_h_fusion.Run(module.get()).value());
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(entry_root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(m::Fusion()))));
EXPECT_TRUE(fusion->IsMultiOutputFusion());
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
2);
}
TEST_F(HorizontalLoopFusionTest, TraversalOrder) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule cluster
%fused_computation (param_0: f32[256,256], param_1: f32[], param_2: f32[])
-> f32[256,256] {
%param_0 = f32[256,256]{1,0} parameter(0)
%param_1 = f32[] parameter(1)
%param_2 = f32[] parameter(2)
%multiply.0 = f32[] multiply(f32[] %param_1, f32[] %param_2)
%broadcast.0 = f32[256,256]{1,0} broadcast(f32[] %multiply.0), dimensions={}
ROOT %multiply.1 = f32[256,256]{1,0}
multiply(f32[256,256]{1,0} %param_0, f32[256,256]{1,0} %broadcast.0)
}
%fused_computation.1 (param_0: f32[256,256], param_1: f32[], param_2: f32[])
-> f32[256,256] {
%param_0 = f32[256,256]{1,0} parameter(0)
%param_1 = f32[] parameter(1)
%param_2 = f32[] parameter(2)
%multiply.0 = f32[] multiply(f32[] %param_1, f32[] %param_2)
%broadcast.0 = f32[256,256]{1,0} broadcast(f32[] %multiply.0), dimensions={}
ROOT %multiply.1 = f32[256,256]{1,0}
multiply(f32[256,256]{1,0} %param_0, f32[256,256]{1,0} %broadcast.0)
}
ENTRY %entry_computation (arg0: f32[256,256], arg1: f32[256,256], arg2: f32[],
arg3: f32[], arg4: f32[], arg5: f32[])
-> (f32[256,256], f32[256,256]) {
%arg0 = f32[256,256]{1,0} parameter(0), parameter_replication={false}
%arg1 = f32[256,256]{1,0} parameter(1), parameter_replication={false}
%arg2 = f32[] parameter(2), parameter_replication={false}
%arg3 = f32[] parameter(3), parameter_replication={false}
%arg4 = f32[] parameter(4), parameter_replication={false}
%arg5 = f32[] parameter(5), parameter_replication={false}
%sqrt = f32[] sqrt(f32[] %arg2)
%sqrt.1 = f32[] sqrt(f32[] %arg3)
%fusion = f32[256,256]{1,0}
fusion(f32[256,256]{1,0} %arg0, f32[] %sqrt, f32[] %sqrt.1),
kind=kLoop, calls=%fused_computation
%sqrt.2 = f32[] sqrt(f32[] %arg4)
%sqrt.3 = f32[] sqrt(f32[] %arg5)
%fusion.1 = f32[256,256]{1,0}
fusion(f32[256,256]{1,0} %arg1, f32[] %sqrt.2, f32[] %sqrt.3),
kind=kLoop, calls=%fused_computation.1
ROOT %tuple.163 = (f32[256,256]{1,0}, f32[256,256]{1,0})
tuple(f32[256,256]{1,0} %fusion.1, f32[256,256]{1,0} %fusion)
}
)")
.value();
HloPassFix<HloPassPipeline> iterative_h_fusion("iterative_h_fusion");
iterative_h_fusion.AddPass<HorizontalLoopFusion>();
EXPECT_TRUE(iterative_h_fusion.Run(module.get()).value());
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
2);
}
TEST_F(HorizontalLoopFusionTest, NoBufferAliasingOfDuplicateParameter) {
const char* hlo_text = R"(
HloModule m
branch_a {
p0 = s32[] parameter(0)
c0 = s32[] constant(1)
c1 = s32[] constant(2)
b0 = s32[4096] broadcast(c0), dimensions={}
b1 = s32[4096] broadcast(c1), dimensions={}
ROOT r = (s32[4096], s32[4096]) tuple(b0, b1)
}
branch_b {
p0 = s32[] parameter(0)
c0 = s32[] constant(1)
c1 = s32[] constant(2)
b0 = s32[4096] broadcast(c0), dimensions={}
b1 = s32[4096] broadcast(c1), dimensions={}
ROOT r = (s32[4096], s32[4096]) tuple(b0, b1)
}
ENTRY e {
p0 = s32[] parameter(0)
c0 = s32[] constant(0)
cond = (s32[4096], s32[4096]) conditional(p0, c0, c0), branch_computations={branch_a, branch_b}
p1 = s32[4096] parameter(1)
gte0 = s32[4096] get-tuple-element(cond), index=0
gte1 = s32[4096] get-tuple-element(cond), index=1
a0 = s32[4096] add(gte1, gte0)
m0 = s32[4096] multiply(gte1, gte0)
ROOT r = (s32[4096], s32[4096]) tuple(m0, a0)
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, std::nullopt));
}
TEST_F(HorizontalLoopFusionTest, CopyInsertionFusionControlFlow) {
const char* hlo_text = R"(
HloModule cluster
ENTRY main {
cst = f32[1]{0} constant({0})
cp1 = f32[1]{0} copy(cst)
cp2 = f32[1]{0} copy(cst)
cp3 = f32[1]{0} copy(cst)
cp4 = f32[1]{0} copy(cst), control-predecessors={cp1}
ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cp1, cp2, cp3, cp4)
}
)";
auto module = ParseAndReturnUnverifiedModule(hlo_text).value();
EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value());
VLOG(2) << module->ToString();
EXPECT_EQ(
absl::c_count_if(module->entry_computation()->instructions(), IsFusion),
1);
const HloInstruction* entry_root =
module->entry_computation()->root_instruction();
EXPECT_THAT(entry_root,
GmockMatch(m::Tuple(m::Copy(), m::GetTupleElement(m::Fusion()),
m::GetTupleElement(m::Fusion()), m::Copy())));
}
TEST_F(HorizontalLoopFusionTest, DoNotMergeVariadicReductions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.94 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(1)
tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE
tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1)
tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ
tmp_5 = s32[] parameter(2)
tmp_6 = s32[] parameter(3)
tmp_7 = s32[] minimum(tmp_5, tmp_6)
tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6)
tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8)
ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9)
}
minmax_func.1536 {
tmp_0 = f32[] parameter(0)
tmp_1 = f32[] parameter(2)
tmp_2 = s32[] parameter(1)
tmp_3 = s32[] parameter(3)
ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=fused_computation.94
}
fused_computation {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(-inf)
tmp_3 = s32[] constant(0)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
fused_computation2 {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1
tmp_2 = f32[] constant(inf)
tmp_3 = s32[] constant(1)
ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536
}
ENTRY e {
tmp_0 = f32[554112,10]{1,0} parameter(0)
tmp_1 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation
tmp_2 = s32[554112]{0} get-tuple-element(tmp_1), index=1
tmp_3 = f32[554112,10]{1,0} parameter(1)
tmp_4 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_3), kind=kLoop, calls=fused_computation2
tmp_5 = s32[554112]{0} get-tuple-element(tmp_4), index=1
ROOT tmp_6 = s32[554112]{0} add(tmp_2, tmp_5)
})")
.value();
EXPECT_FALSE(HorizontalLoopFusion().Run(module.get()).value());
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
e9360b62-e10d-4dc4-b8bd-f135d545de17 | cpp | tensorflow/tensorflow | tfprof_advisor | tensorflow/core/profiler/internal/advisor/tfprof_advisor.h | tensorflow/core/profiler/internal/advisor/tfprof_advisor_test.cc | #ifndef TENSORFLOW_CORE_PROFILER_INTERNAL_ADVISOR_TFPROF_ADVISOR_H_
#define TENSORFLOW_CORE_PROFILER_INTERNAL_ADVISOR_TFPROF_ADVISOR_H_
#include <vector>
#include "absl/strings/str_format.h"
#include "tensorflow/core/profiler/internal/advisor/accelerator_utilization_checker.h"
#include "tensorflow/core/profiler/internal/advisor/checker.h"
#include "tensorflow/core/profiler/internal/advisor/expensive_operation_checker.h"
#include "tensorflow/core/profiler/internal/advisor/internal_checker_runner.h"
#include "tensorflow/core/profiler/internal/advisor/operation_checker.h"
#include "tensorflow/core/profiler/tfprof_options.pb.h"
namespace tensorflow {
namespace tfprof {
class Advisor {
public:
Advisor(const TFStats* stats) : stats_(stats) {}
static AdvisorOptionsProto DefaultOptions() {
AdvisorOptionsProto options;
std::vector<string> checkers(
kCheckers, kCheckers + sizeof(kCheckers) / sizeof(*kCheckers));
for (const string& checker : checkers) {
(*options.mutable_checkers())[checker];
}
return options;
}
AdviceProto Advise(const AdvisorOptionsProto& options) {
AdviceProto ret = RunInternalCheckers(options, stats_);
if (options.checkers().find(kCheckers[0]) != options.checkers().end()) {
AcceleratorUtilizationChecker au_checker;
(*ret.mutable_checkers())[kCheckers[0]].MergeFrom(
au_checker.Run(options.checkers().at(kCheckers[0]), stats_));
}
if (options.checkers().find(kCheckers[1]) != options.checkers().end()) {
OperationChecker op_checker;
(*ret.mutable_checkers())[kCheckers[1]].MergeFrom(
op_checker.Run(options.checkers().at(kCheckers[1]), stats_));
}
if (options.checkers().find(kCheckers[2]) != options.checkers().end()) {
ExpensiveOperationChecker expensive_op_checker;
(*ret.mutable_checkers())[kCheckers[2]].MergeFrom(
expensive_op_checker.Run(options.checkers().at(kCheckers[2]),
stats_));
}
for (const auto& checker : ret.checkers()) {
absl::FPrintF(stdout, "\n%s:\n", checker.first);
for (const string& r : checker.second.reports()) {
absl::FPrintF(stdout, "%s\n", r);
}
}
fflush(stdout);
return ret;
}
private:
const TFStats* stats_;
};
}
}
#endif | #include "tensorflow/core/profiler/internal/advisor/tfprof_advisor.h"
#include <map>
#include <memory>
#include <vector>
#include "absl/strings/match.h"
#include "tensorflow/core/framework/graph.pb.h"
#include "tensorflow/core/framework/node_def.pb.h"
#include "tensorflow/core/framework/step_stats.pb.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/types.h"
#include "tensorflow/core/profiler/internal/advisor/checker.h"
#include "tensorflow/core/profiler/internal/tfprof_node.h"
#include "tensorflow/core/profiler/internal/tfprof_stats.h"
#include "tensorflow/core/profiler/tfprof_options.pb.h"
#include "tensorflow/core/profiler/tfprof_output.pb.h"
namespace tensorflow {
namespace tfprof {
class TFProfAdvisorTest : public ::testing::Test {
protected:
TFProfAdvisorTest() {
stats_ = std::make_unique<TFStats>(std::make_unique<GraphDef>(), nullptr,
nullptr, nullptr);
stats_->AddNodeForTest(
0, CreateNode("n1", "Conv2D", {{"data_format", "NHWC"}}, 0, 10, 2));
stats_->AddNodeForTest(0, CreateNode("n2", "Conv2D", {}, 0, 20, 2));
stats_->BuildAllViews();
advisor_ = std::make_unique<Advisor>(stats_.get());
}
std::unique_ptr<TFGraphNode> CreateNode(const string& name,
const string& type,
std::map<string, string> attrs,
int64_t step, int64_t start_miros,
int64_t end_rel_micros) {
node_defs_.push_back(std::make_unique<NodeDef>());
NodeDef* def = node_defs_.back().get();
def->set_name(name);
def->set_op(type);
for (const auto& attr : attrs) {
(*def->mutable_attr())[attr.first].set_s(attr.second);
}
std::unique_ptr<TFGraphNode> node(new TFGraphNode(def, -1, nullptr));
NodeExecStats node_stat;
node_stat.set_all_start_micros(start_miros);
node_stat.set_op_end_rel_micros(end_rel_micros);
node->AddStepStat(step, "/job:localhost/replica:0/task:0/device:GPU:0",
node_stat);
node->AddStepStat(step,
"/job:localhost/replica:0/task:0/device:GPU:0:stream:all",
node_stat);
node->AddStepStat(step,
"/job:localhost/replica:0/task:0/device:GPU:0:stream:0",
node_stat);
return node;
}
std::unique_ptr<TFStats> stats_;
std::unique_ptr<Advisor> advisor_;
std::vector<std::unique_ptr<NodeDef>> node_defs_;
};
TEST_F(TFProfAdvisorTest, Basics) {
AdvisorOptionsProto options = Advisor::DefaultOptions();
AdviceProto advice = advisor_->Advise(options);
EXPECT_TRUE(advice.checkers().find(kCheckers[0]) != advice.checkers().end());
EXPECT_TRUE(advice.checkers().find(kCheckers[1]) != advice.checkers().end());
EXPECT_TRUE(advice.checkers().find(kCheckers[2]) != advice.checkers().end());
}
TEST_F(TFProfAdvisorTest, OperationChecker) {
AdvisorOptionsProto options;
(*options.mutable_checkers())[kCheckers[1]];
AdviceProto advice = advisor_->Advise(options);
EXPECT_EQ(advice.checkers().at(kCheckers[1]).reports_size(), 1);
EXPECT_TRUE(
absl::StrContains(advice.checkers().at(kCheckers[1]).reports(0), "NCHW"));
}
TEST_F(TFProfAdvisorTest, UtilizationChecker) {
AdvisorOptionsProto options;
(*options.mutable_checkers())[kCheckers[0]];
AdviceProto advice = advisor_->Advise(options);
EXPECT_EQ(advice.checkers().at(kCheckers[0]).reports_size(), 1);
EXPECT_TRUE(absl::StrContains(advice.checkers().at(kCheckers[0]).reports(0),
"low utilization"));
}
TEST_F(TFProfAdvisorTest, ExpensiveOperationChecker) {
AdvisorOptionsProto options;
(*options.mutable_checkers())[kCheckers[2]];
AdviceProto advice = advisor_->Advise(options);
EXPECT_TRUE(absl::StrContains(advice.checkers().at(kCheckers[2]).reports(0),
"top 1 operation type: Conv2D"));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/advisor/tfprof_advisor.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/profiler/internal/advisor/tfprof_advisor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
98b84864-4604-4f7e-9c73-6891124d7ebb | cpp | google/cel-cpp | time | internal/time.cc | internal/time_test.cc | #include "internal/time.h"
#include <cstdint>
#include <string>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
#include "internal/status_macros.h"
namespace cel::internal {
namespace {
std::string RawFormatTimestamp(absl::Time timestamp) {
return absl::FormatTime("%Y-%m-%d%ET%H:%M:%E*SZ", timestamp,
absl::UTCTimeZone());
}
}
absl::Status ValidateDuration(absl::Duration duration) {
if (duration < MinDuration()) {
return absl::InvalidArgumentError(
absl::StrCat("Duration \"", absl::FormatDuration(duration),
"\" below minimum allowed duration \"",
absl::FormatDuration(MinDuration()), "\""));
}
if (duration > MaxDuration()) {
return absl::InvalidArgumentError(
absl::StrCat("Duration \"", absl::FormatDuration(duration),
"\" above maximum allowed duration \"",
absl::FormatDuration(MaxDuration()), "\""));
}
return absl::OkStatus();
}
absl::StatusOr<absl::Duration> ParseDuration(absl::string_view input) {
absl::Duration duration;
if (!absl::ParseDuration(input, &duration)) {
return absl::InvalidArgumentError("Failed to parse duration from string");
}
return duration;
}
absl::StatusOr<std::string> FormatDuration(absl::Duration duration) {
CEL_RETURN_IF_ERROR(ValidateDuration(duration));
return absl::FormatDuration(duration);
}
std::string DebugStringDuration(absl::Duration duration) {
return absl::FormatDuration(duration);
}
absl::Status ValidateTimestamp(absl::Time timestamp) {
if (timestamp < MinTimestamp()) {
return absl::InvalidArgumentError(
absl::StrCat("Timestamp \"", RawFormatTimestamp(timestamp),
"\" below minimum allowed timestamp \"",
RawFormatTimestamp(MinTimestamp()), "\""));
}
if (timestamp > MaxTimestamp()) {
return absl::InvalidArgumentError(
absl::StrCat("Timestamp \"", RawFormatTimestamp(timestamp),
"\" above maximum allowed timestamp \"",
RawFormatTimestamp(MaxTimestamp()), "\""));
}
return absl::OkStatus();
}
absl::StatusOr<absl::Time> ParseTimestamp(absl::string_view input) {
absl::Time timestamp;
std::string err;
if (!absl::ParseTime(absl::RFC3339_full, input, absl::UTCTimeZone(),
×tamp, &err)) {
return err.empty() ? absl::InvalidArgumentError(
"Failed to parse timestamp from string")
: absl::InvalidArgumentError(absl::StrCat(
"Failed to parse timestamp from string: ", err));
}
CEL_RETURN_IF_ERROR(ValidateTimestamp(timestamp));
return timestamp;
}
absl::StatusOr<std::string> FormatTimestamp(absl::Time timestamp) {
CEL_RETURN_IF_ERROR(ValidateTimestamp(timestamp));
return RawFormatTimestamp(timestamp);
}
std::string FormatNanos(int32_t nanos) {
constexpr int32_t kNanosPerMillisecond = 1000000;
constexpr int32_t kNanosPerMicrosecond = 1000;
if (nanos % kNanosPerMillisecond == 0) {
return absl::StrFormat("%03d", nanos / kNanosPerMillisecond);
} else if (nanos % kNanosPerMicrosecond == 0) {
return absl::StrFormat("%06d", nanos / kNanosPerMicrosecond);
}
return absl::StrFormat("%09d", nanos);
}
absl::StatusOr<std::string> EncodeDurationToJson(absl::Duration duration) {
CEL_RETURN_IF_ERROR(ValidateDuration(duration));
std::string result;
int64_t seconds = absl::IDivDuration(duration, absl::Seconds(1), &duration);
int64_t nanos = absl::IDivDuration(duration, absl::Nanoseconds(1), &duration);
if (seconds < 0 || nanos < 0) {
result = "-";
seconds = -seconds;
nanos = -nanos;
}
absl::StrAppend(&result, seconds);
if (nanos != 0) {
absl::StrAppend(&result, ".", FormatNanos(nanos));
}
absl::StrAppend(&result, "s");
return result;
}
absl::StatusOr<std::string> EncodeTimestampToJson(absl::Time timestamp) {
static constexpr absl::string_view kTimestampFormat = "%E4Y-%m-%dT%H:%M:%S";
CEL_RETURN_IF_ERROR(ValidateTimestamp(timestamp));
absl::Time unix_seconds =
absl::FromUnixSeconds(absl::ToUnixSeconds(timestamp));
int64_t n = (timestamp - unix_seconds) / absl::Nanoseconds(1);
std::string result =
absl::FormatTime(kTimestampFormat, unix_seconds, absl::UTCTimeZone());
if (n > 0) {
absl::StrAppend(&result, ".", FormatNanos(n));
}
absl::StrAppend(&result, "Z");
return result;
}
std::string DebugStringTimestamp(absl::Time timestamp) {
return RawFormatTimestamp(timestamp);
}
} | #include "internal/time.h"
#include <string>
#include "google/protobuf/util/time_util.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "internal/testing.h"
namespace cel::internal {
namespace {
using ::absl_testing::StatusIs;
TEST(MaxDuration, ProtoEquiv) {
EXPECT_EQ(MaxDuration(),
absl::Seconds(google::protobuf::util::TimeUtil::kDurationMaxSeconds) +
absl::Nanoseconds(999999999));
}
TEST(MinDuration, ProtoEquiv) {
EXPECT_EQ(MinDuration(),
absl::Seconds(google::protobuf::util::TimeUtil::kDurationMinSeconds) +
absl::Nanoseconds(-999999999));
}
TEST(MaxTimestamp, ProtoEquiv) {
EXPECT_EQ(MaxTimestamp(),
absl::UnixEpoch() +
absl::Seconds(google::protobuf::util::TimeUtil::kTimestampMaxSeconds) +
absl::Nanoseconds(999999999));
}
TEST(MinTimestamp, ProtoEquiv) {
EXPECT_EQ(MinTimestamp(),
absl::UnixEpoch() +
absl::Seconds(google::protobuf::util::TimeUtil::kTimestampMinSeconds));
}
TEST(ParseDuration, Conformance) {
absl::Duration parsed;
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseDuration("1s"));
EXPECT_EQ(parsed, absl::Seconds(1));
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseDuration("0.010s"));
EXPECT_EQ(parsed, absl::Milliseconds(10));
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseDuration("0.000010s"));
EXPECT_EQ(parsed, absl::Microseconds(10));
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseDuration("0.000000010s"));
EXPECT_EQ(parsed, absl::Nanoseconds(10));
EXPECT_THAT(internal::ParseDuration("abc"),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(internal::ParseDuration("1c"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(FormatDuration, Conformance) {
std::string formatted;
ASSERT_OK_AND_ASSIGN(formatted, internal::FormatDuration(absl::Seconds(1)));
EXPECT_EQ(formatted, "1s");
ASSERT_OK_AND_ASSIGN(formatted,
internal::FormatDuration(absl::Milliseconds(10)));
EXPECT_EQ(formatted, "10ms");
ASSERT_OK_AND_ASSIGN(formatted,
internal::FormatDuration(absl::Microseconds(10)));
EXPECT_EQ(formatted, "10us");
ASSERT_OK_AND_ASSIGN(formatted,
internal::FormatDuration(absl::Nanoseconds(10)));
EXPECT_EQ(formatted, "10ns");
EXPECT_THAT(internal::FormatDuration(absl::InfiniteDuration()),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(internal::FormatDuration(-absl::InfiniteDuration()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(ParseTimestamp, Conformance) {
absl::Time parsed;
ASSERT_OK_AND_ASSIGN(parsed, internal::ParseTimestamp("1-01-01T00:00:00Z"));
EXPECT_EQ(parsed, MinTimestamp());
ASSERT_OK_AND_ASSIGN(
parsed, internal::ParseTimestamp("9999-12-31T23:59:59.999999999Z"));
EXPECT_EQ(parsed, MaxTimestamp());
ASSERT_OK_AND_ASSIGN(parsed,
internal::ParseTimestamp("1970-01-01T00:00:00Z"));
EXPECT_EQ(parsed, absl::UnixEpoch());
ASSERT_OK_AND_ASSIGN(parsed,
internal::ParseTimestamp("1970-01-01T00:00:00.010Z"));
EXPECT_EQ(parsed, absl::UnixEpoch() + absl::Milliseconds(10));
ASSERT_OK_AND_ASSIGN(parsed,
internal::ParseTimestamp("1970-01-01T00:00:00.000010Z"));
EXPECT_EQ(parsed, absl::UnixEpoch() + absl::Microseconds(10));
ASSERT_OK_AND_ASSIGN(
parsed, internal::ParseTimestamp("1970-01-01T00:00:00.000000010Z"));
EXPECT_EQ(parsed, absl::UnixEpoch() + absl::Nanoseconds(10));
EXPECT_THAT(internal::ParseTimestamp("abc"),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(internal::ParseTimestamp("10000-01-01T00:00:00Z"),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(FormatTimestamp, Conformance) {
std::string formatted;
ASSERT_OK_AND_ASSIGN(formatted, internal::FormatTimestamp(MinTimestamp()));
EXPECT_EQ(formatted, "1-01-01T00:00:00Z");
ASSERT_OK_AND_ASSIGN(formatted, internal::FormatTimestamp(MaxTimestamp()));
EXPECT_EQ(formatted, "9999-12-31T23:59:59.999999999Z");
ASSERT_OK_AND_ASSIGN(formatted, internal::FormatTimestamp(absl::UnixEpoch()));
EXPECT_EQ(formatted, "1970-01-01T00:00:00Z");
ASSERT_OK_AND_ASSIGN(
formatted,
internal::FormatTimestamp(absl::UnixEpoch() + absl::Milliseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.01Z");
ASSERT_OK_AND_ASSIGN(
formatted,
internal::FormatTimestamp(absl::UnixEpoch() + absl::Microseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.00001Z");
ASSERT_OK_AND_ASSIGN(
formatted,
internal::FormatTimestamp(absl::UnixEpoch() + absl::Nanoseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.00000001Z");
EXPECT_THAT(internal::FormatTimestamp(absl::InfiniteFuture()),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(internal::FormatTimestamp(absl::InfinitePast()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(EncodeDurationToJson, Conformance) {
std::string formatted;
ASSERT_OK_AND_ASSIGN(formatted, EncodeDurationToJson(absl::Seconds(1)));
EXPECT_EQ(formatted, "1s");
ASSERT_OK_AND_ASSIGN(formatted, EncodeDurationToJson(absl::Milliseconds(10)));
EXPECT_EQ(formatted, "0.010s");
ASSERT_OK_AND_ASSIGN(formatted, EncodeDurationToJson(absl::Microseconds(10)));
EXPECT_EQ(formatted, "0.000010s");
ASSERT_OK_AND_ASSIGN(formatted, EncodeDurationToJson(absl::Nanoseconds(10)));
EXPECT_EQ(formatted, "0.000000010s");
EXPECT_THAT(EncodeDurationToJson(absl::InfiniteDuration()),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(EncodeDurationToJson(-absl::InfiniteDuration()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
TEST(EncodeTimestampToJson, Conformance) {
std::string formatted;
ASSERT_OK_AND_ASSIGN(formatted, EncodeTimestampToJson(MinTimestamp()));
EXPECT_EQ(formatted, "0001-01-01T00:00:00Z");
ASSERT_OK_AND_ASSIGN(formatted, EncodeTimestampToJson(MaxTimestamp()));
EXPECT_EQ(formatted, "9999-12-31T23:59:59.999999999Z");
ASSERT_OK_AND_ASSIGN(formatted, EncodeTimestampToJson(absl::UnixEpoch()));
EXPECT_EQ(formatted, "1970-01-01T00:00:00Z");
ASSERT_OK_AND_ASSIGN(
formatted,
EncodeTimestampToJson(absl::UnixEpoch() + absl::Milliseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.010Z");
ASSERT_OK_AND_ASSIGN(
formatted,
EncodeTimestampToJson(absl::UnixEpoch() + absl::Microseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.000010Z");
ASSERT_OK_AND_ASSIGN(formatted, EncodeTimestampToJson(absl::UnixEpoch() +
absl::Nanoseconds(10)));
EXPECT_EQ(formatted, "1970-01-01T00:00:00.000000010Z");
EXPECT_THAT(EncodeTimestampToJson(absl::InfiniteFuture()),
StatusIs(absl::StatusCode::kInvalidArgument));
EXPECT_THAT(EncodeTimestampToJson(absl::InfinitePast()),
StatusIs(absl::StatusCode::kInvalidArgument));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/time.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/internal/time_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
255b3361-bf35-4cb8-8d6d-b19ad06cd67a | cpp | google/quiche | tun_device_controller | quiche/quic/qbone/bonnet/tun_device_controller.cc | quiche/quic/qbone/bonnet/tun_device_controller_test.cc | #include "quiche/quic/qbone/bonnet/tun_device_controller.h"
#include <linux/rtnetlink.h>
#include <utility>
#include <vector>
#include "absl/flags/flag.h"
#include "absl/time/clock.h"
#include "quiche/quic/platform/api/quic_logging.h"
#include "quiche/quic/qbone/qbone_constants.h"
#include "quiche/common/quiche_callbacks.h"
ABSL_FLAG(bool, qbone_tun_device_replace_default_routing_rules, true,
"If true, will define a rule that points packets sourced from the "
"qbone interface to the qbone table. This is unnecessary in "
"environments with no other ipv6 route.");
ABSL_RETIRED_FLAG(int, qbone_route_init_cwnd, 0,
"Deprecated. Code no longer modifies initcwnd.");
namespace quic {
bool TunDeviceController::UpdateAddress(const IpRange& desired_range) {
if (!setup_tun_) {
return true;
}
NetlinkInterface::LinkInfo link_info{};
if (!netlink_->GetLinkInfo(ifname_, &link_info)) {
return false;
}
std::vector<NetlinkInterface::AddressInfo> addresses;
if (!netlink_->GetAddresses(link_info.index, 0, &addresses, nullptr)) {
return false;
}
QuicIpAddress desired_address = desired_range.FirstAddressInRange();
for (const auto& address : addresses) {
if (!netlink_->ChangeLocalAddress(
link_info.index, NetlinkInterface::Verb::kRemove,
address.interface_address, address.prefix_length, 0, 0, {})) {
return false;
}
}
bool address_updated = netlink_->ChangeLocalAddress(
link_info.index, NetlinkInterface::Verb::kAdd, desired_address,
desired_range.prefix_length(), IFA_F_PERMANENT | IFA_F_NODAD,
RT_SCOPE_LINK, {});
if (address_updated) {
current_address_ = desired_address;
for (const auto& cb : address_update_cbs_) {
cb(current_address_);
}
}
return address_updated;
}
bool TunDeviceController::UpdateRoutes(
const IpRange& desired_range, const std::vector<IpRange>& desired_routes) {
if (!setup_tun_) {
return true;
}
NetlinkInterface::LinkInfo link_info{};
if (!netlink_->GetLinkInfo(ifname_, &link_info)) {
QUIC_LOG(ERROR) << "Could not get link info for interface <" << ifname_
<< ">";
return false;
}
std::vector<NetlinkInterface::RoutingRule> routing_rules;
if (!netlink_->GetRouteInfo(&routing_rules)) {
QUIC_LOG(ERROR) << "Unable to get route info";
return false;
}
for (const auto& rule : routing_rules) {
if (rule.out_interface == link_info.index &&
rule.table == QboneConstants::kQboneRouteTableId) {
if (!netlink_->ChangeRoute(NetlinkInterface::Verb::kRemove, rule.table,
rule.destination_subnet, rule.scope,
rule.preferred_source, rule.out_interface)) {
QUIC_LOG(ERROR) << "Unable to remove old route to <"
<< rule.destination_subnet.ToString() << ">";
return false;
}
}
}
if (!UpdateRules(desired_range)) {
return false;
}
QuicIpAddress desired_address = desired_range.FirstAddressInRange();
std::vector<IpRange> routes(desired_routes.begin(), desired_routes.end());
routes.emplace_back(*QboneConstants::TerminatorLocalAddressRange());
for (const auto& route : routes) {
if (!netlink_->ChangeRoute(NetlinkInterface::Verb::kReplace,
QboneConstants::kQboneRouteTableId, route,
RT_SCOPE_LINK, desired_address,
link_info.index)) {
QUIC_LOG(ERROR) << "Unable to add route <" << route.ToString() << ">";
return false;
}
}
return true;
}
bool TunDeviceController::UpdateRoutesWithRetries(
const IpRange& desired_range, const std::vector<IpRange>& desired_routes,
int retries) {
while (retries-- > 0) {
if (UpdateRoutes(desired_range, desired_routes)) {
return true;
}
absl::SleepFor(absl::Milliseconds(100));
}
return false;
}
bool TunDeviceController::UpdateRules(IpRange desired_range) {
if (!absl::GetFlag(FLAGS_qbone_tun_device_replace_default_routing_rules)) {
return true;
}
std::vector<NetlinkInterface::IpRule> ip_rules;
if (!netlink_->GetRuleInfo(&ip_rules)) {
QUIC_LOG(ERROR) << "Unable to get rule info";
return false;
}
for (const auto& rule : ip_rules) {
if (rule.table == QboneConstants::kQboneRouteTableId) {
if (!netlink_->ChangeRule(NetlinkInterface::Verb::kRemove, rule.table,
rule.source_range)) {
QUIC_LOG(ERROR) << "Unable to remove old rule for table <" << rule.table
<< "> from source <" << rule.source_range.ToString()
<< ">";
return false;
}
}
}
if (!netlink_->ChangeRule(NetlinkInterface::Verb::kAdd,
QboneConstants::kQboneRouteTableId,
desired_range)) {
QUIC_LOG(ERROR) << "Unable to add rule for <" << desired_range.ToString()
<< ">";
return false;
}
return true;
}
QuicIpAddress TunDeviceController::current_address() {
return current_address_;
}
void TunDeviceController::RegisterAddressUpdateCallback(
quiche::MultiUseCallback<void(QuicIpAddress)> cb) {
address_update_cbs_.push_back(std::move(cb));
}
} | #include "quiche/quic/qbone/bonnet/tun_device_controller.h"
#include <linux/if_addr.h>
#include <linux/rtnetlink.h>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/qbone/platform/mock_netlink.h"
#include "quiche/quic/qbone/qbone_constants.h"
ABSL_DECLARE_FLAG(bool, qbone_tun_device_replace_default_routing_rules);
ABSL_DECLARE_FLAG(int, qbone_route_init_cwnd);
namespace quic::test {
namespace {
using ::testing::Eq;
constexpr int kIfindex = 42;
constexpr char kIfname[] = "qbone0";
const IpRange kIpRange = []() {
IpRange range;
QCHECK(range.FromString("2604:31c0:2::/64"));
return range;
}();
constexpr char kOldAddress[] = "1.2.3.4";
constexpr int kOldPrefixLen = 24;
using ::testing::_;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrictMock;
MATCHER_P(IpRangeEq, range,
absl::StrCat("expected IpRange to equal ", range.ToString())) {
return arg == range;
}
class TunDeviceControllerTest : public QuicTest {
public:
TunDeviceControllerTest()
: controller_(kIfname, true, &netlink_),
link_local_range_(*QboneConstants::TerminatorLocalAddressRange()) {
controller_.RegisterAddressUpdateCallback(
[this](QuicIpAddress address) { notified_address_ = address; });
}
protected:
void ExpectLinkInfo(const std::string& interface_name, int ifindex) {
EXPECT_CALL(netlink_, GetLinkInfo(interface_name, _))
.WillOnce(Invoke([ifindex](absl::string_view ifname,
NetlinkInterface::LinkInfo* link_info) {
link_info->index = ifindex;
return true;
}));
}
MockNetlink netlink_;
TunDeviceController controller_;
QuicIpAddress notified_address_;
IpRange link_local_range_;
};
TEST_F(TunDeviceControllerTest, AddressAppliedWhenNoneExisted) {
ExpectLinkInfo(kIfname, kIfindex);
EXPECT_CALL(netlink_, GetAddresses(kIfindex, _, _, _)).WillOnce(Return(true));
EXPECT_CALL(netlink_,
ChangeLocalAddress(
kIfindex, NetlinkInterface::Verb::kAdd,
kIpRange.FirstAddressInRange(), kIpRange.prefix_length(),
IFA_F_PERMANENT | IFA_F_NODAD, RT_SCOPE_LINK, _))
.WillOnce(Return(true));
EXPECT_TRUE(controller_.UpdateAddress(kIpRange));
EXPECT_THAT(notified_address_, Eq(kIpRange.FirstAddressInRange()));
}
TEST_F(TunDeviceControllerTest, OldAddressesAreRemoved) {
ExpectLinkInfo(kIfname, kIfindex);
EXPECT_CALL(netlink_, GetAddresses(kIfindex, _, _, _))
.WillOnce(Invoke([](int interface_index, uint8_t unwanted_flags,
std::vector<NetlinkInterface::AddressInfo>* addresses,
int* num_ipv6_nodad_dadfailed_addresses) {
NetlinkInterface::AddressInfo info{};
info.interface_address.FromString(kOldAddress);
info.prefix_length = kOldPrefixLen;
addresses->emplace_back(info);
return true;
}));
QuicIpAddress old_address;
old_address.FromString(kOldAddress);
EXPECT_CALL(netlink_,
ChangeLocalAddress(kIfindex, NetlinkInterface::Verb::kRemove,
old_address, kOldPrefixLen, _, _, _))
.WillOnce(Return(true));
EXPECT_CALL(netlink_,
ChangeLocalAddress(
kIfindex, NetlinkInterface::Verb::kAdd,
kIpRange.FirstAddressInRange(), kIpRange.prefix_length(),
IFA_F_PERMANENT | IFA_F_NODAD, RT_SCOPE_LINK, _))
.WillOnce(Return(true));
EXPECT_TRUE(controller_.UpdateAddress(kIpRange));
EXPECT_THAT(notified_address_, Eq(kIpRange.FirstAddressInRange()));
}
TEST_F(TunDeviceControllerTest, UpdateRoutesRemovedOldRoutes) {
ExpectLinkInfo(kIfname, kIfindex);
const int num_matching_routes = 3;
EXPECT_CALL(netlink_, GetRouteInfo(_))
.WillOnce(
Invoke([](std::vector<NetlinkInterface::RoutingRule>* routing_rules) {
NetlinkInterface::RoutingRule non_matching_route{};
non_matching_route.table = QboneConstants::kQboneRouteTableId;
non_matching_route.out_interface = kIfindex + 1;
routing_rules->push_back(non_matching_route);
NetlinkInterface::RoutingRule matching_route{};
matching_route.table = QboneConstants::kQboneRouteTableId;
matching_route.out_interface = kIfindex;
for (int i = 0; i < num_matching_routes; i++) {
routing_rules->push_back(matching_route);
}
NetlinkInterface::RoutingRule non_matching_table{};
non_matching_table.table = QboneConstants::kQboneRouteTableId + 1;
non_matching_table.out_interface = kIfindex;
routing_rules->push_back(non_matching_table);
return true;
}));
EXPECT_CALL(netlink_, ChangeRoute(NetlinkInterface::Verb::kRemove,
QboneConstants::kQboneRouteTableId, _, _, _,
kIfindex))
.Times(num_matching_routes)
.WillRepeatedly(Return(true));
EXPECT_CALL(netlink_, GetRuleInfo(_)).WillOnce(Return(true));
EXPECT_CALL(netlink_, ChangeRule(NetlinkInterface::Verb::kAdd,
QboneConstants::kQboneRouteTableId,
IpRangeEq(kIpRange)))
.WillOnce(Return(true));
EXPECT_CALL(netlink_,
ChangeRoute(NetlinkInterface::Verb::kReplace,
QboneConstants::kQboneRouteTableId,
IpRangeEq(link_local_range_), _, _, kIfindex))
.WillOnce(Return(true));
EXPECT_TRUE(controller_.UpdateRoutes(kIpRange, {}));
}
TEST_F(TunDeviceControllerTest, UpdateRoutesAddsNewRoutes) {
ExpectLinkInfo(kIfname, kIfindex);
EXPECT_CALL(netlink_, GetRouteInfo(_)).WillOnce(Return(true));
EXPECT_CALL(netlink_, GetRuleInfo(_)).WillOnce(Return(true));
EXPECT_CALL(netlink_, ChangeRoute(NetlinkInterface::Verb::kReplace,
QboneConstants::kQboneRouteTableId,
IpRangeEq(kIpRange), _, _, kIfindex))
.Times(2)
.WillRepeatedly(Return(true))
.RetiresOnSaturation();
EXPECT_CALL(netlink_, ChangeRule(NetlinkInterface::Verb::kAdd,
QboneConstants::kQboneRouteTableId,
IpRangeEq(kIpRange)))
.WillOnce(Return(true));
EXPECT_CALL(netlink_,
ChangeRoute(NetlinkInterface::Verb::kReplace,
QboneConstants::kQboneRouteTableId,
IpRangeEq(link_local_range_), _, _, kIfindex))
.WillOnce(Return(true));
EXPECT_TRUE(controller_.UpdateRoutes(kIpRange, {kIpRange, kIpRange}));
}
TEST_F(TunDeviceControllerTest, EmptyUpdateRouteKeepsLinkLocalRoute) {
ExpectLinkInfo(kIfname, kIfindex);
EXPECT_CALL(netlink_, GetRouteInfo(_)).WillOnce(Return(true));
EXPECT_CALL(netlink_, GetRuleInfo(_)).WillOnce(Return(true));
EXPECT_CALL(netlink_, ChangeRule(NetlinkInterface::Verb::kAdd,
QboneConstants::kQboneRouteTableId,
IpRangeEq(kIpRange)))
.WillOnce(Return(true));
EXPECT_CALL(netlink_,
ChangeRoute(NetlinkInterface::Verb::kReplace,
QboneConstants::kQboneRouteTableId,
IpRangeEq(link_local_range_), _, _, kIfindex))
.WillOnce(Return(true));
EXPECT_TRUE(controller_.UpdateRoutes(kIpRange, {}));
}
TEST_F(TunDeviceControllerTest, DisablingRoutingRulesSkipsRuleCreation) {
absl::SetFlag(&FLAGS_qbone_tun_device_replace_default_routing_rules, false);
ExpectLinkInfo(kIfname, kIfindex);
EXPECT_CALL(netlink_, GetRouteInfo(_)).WillOnce(Return(true));
EXPECT_CALL(netlink_, ChangeRoute(NetlinkInterface::Verb::kReplace,
QboneConstants::kQboneRouteTableId,
IpRangeEq(kIpRange), _, _, kIfindex))
.Times(2)
.WillRepeatedly(Return(true))
.RetiresOnSaturation();
EXPECT_CALL(netlink_,
ChangeRoute(NetlinkInterface::Verb::kReplace,
QboneConstants::kQboneRouteTableId,
IpRangeEq(link_local_range_), _, _, kIfindex))
.WillOnce(Return(true));
EXPECT_TRUE(controller_.UpdateRoutes(kIpRange, {kIpRange, kIpRange}));
}
class DisabledTunDeviceControllerTest : public QuicTest {
public:
DisabledTunDeviceControllerTest()
: controller_(kIfname, false, &netlink_),
link_local_range_(*QboneConstants::TerminatorLocalAddressRange()) {}
StrictMock<MockNetlink> netlink_;
TunDeviceController controller_;
IpRange link_local_range_;
};
TEST_F(DisabledTunDeviceControllerTest, UpdateRoutesIsNop) {
EXPECT_THAT(controller_.UpdateRoutes(kIpRange, {}), Eq(true));
}
TEST_F(DisabledTunDeviceControllerTest, UpdateAddressIsNop) {
EXPECT_THAT(controller_.UpdateAddress(kIpRange), Eq(true));
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/bonnet/tun_device_controller.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/qbone/bonnet/tun_device_controller_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
dfb8b94c-5fdf-4d12-a88c-7babeb7d3253 | cpp | abseil/abseil-cpp | charset | absl/strings/charset.h | absl/strings/charset_test.cc | #ifndef ABSL_STRINGS_CHARSET_H_
#define ABSL_STRINGS_CHARSET_H_
#include <cstddef>
#include <cstdint>
#include <cstring>
#include "absl/base/macros.h"
#include "absl/base/port.h"
#include "absl/strings/string_view.h"
namespace absl {
class CharSet {
public:
constexpr CharSet() : m_() {}
constexpr explicit CharSet(absl::string_view str) : m_() {
for (char c : str) {
SetChar(static_cast<unsigned char>(c));
}
}
constexpr bool contains(char c) const {
return ((m_[static_cast<unsigned char>(c) / 64] >>
(static_cast<unsigned char>(c) % 64)) &
0x1) == 0x1;
}
constexpr bool empty() const {
for (uint64_t c : m_) {
if (c != 0) return false;
}
return true;
}
static constexpr CharSet Char(char x) {
return CharSet(CharMaskForWord(x, 0), CharMaskForWord(x, 1),
CharMaskForWord(x, 2), CharMaskForWord(x, 3));
}
static constexpr CharSet Range(char lo, char hi) {
return CharSet(RangeForWord(lo, hi, 0), RangeForWord(lo, hi, 1),
RangeForWord(lo, hi, 2), RangeForWord(lo, hi, 3));
}
friend constexpr CharSet operator&(const CharSet& a, const CharSet& b) {
return CharSet(a.m_[0] & b.m_[0], a.m_[1] & b.m_[1], a.m_[2] & b.m_[2],
a.m_[3] & b.m_[3]);
}
friend constexpr CharSet operator|(const CharSet& a, const CharSet& b) {
return CharSet(a.m_[0] | b.m_[0], a.m_[1] | b.m_[1], a.m_[2] | b.m_[2],
a.m_[3] | b.m_[3]);
}
friend constexpr CharSet operator~(const CharSet& a) {
return CharSet(~a.m_[0], ~a.m_[1], ~a.m_[2], ~a.m_[3]);
}
static constexpr CharSet AsciiUppercase() { return CharSet::Range('A', 'Z'); }
static constexpr CharSet AsciiLowercase() { return CharSet::Range('a', 'z'); }
static constexpr CharSet AsciiDigits() { return CharSet::Range('0', '9'); }
static constexpr CharSet AsciiAlphabet() {
return AsciiLowercase() | AsciiUppercase();
}
static constexpr CharSet AsciiAlphanumerics() {
return AsciiDigits() | AsciiAlphabet();
}
static constexpr CharSet AsciiHexDigits() {
return AsciiDigits() | CharSet::Range('A', 'F') | CharSet::Range('a', 'f');
}
static constexpr CharSet AsciiPrintable() {
return CharSet::Range(0x20, 0x7e);
}
static constexpr CharSet AsciiWhitespace() { return CharSet("\t\n\v\f\r "); }
static constexpr CharSet AsciiPunctuation() {
return AsciiPrintable() & ~AsciiWhitespace() & ~AsciiAlphanumerics();
}
private:
constexpr CharSet(uint64_t b0, uint64_t b1, uint64_t b2, uint64_t b3)
: m_{b0, b1, b2, b3} {}
static constexpr uint64_t RangeForWord(char lo, char hi, uint64_t word) {
return OpenRangeFromZeroForWord(static_cast<unsigned char>(hi) + 1, word) &
~OpenRangeFromZeroForWord(static_cast<unsigned char>(lo), word);
}
static constexpr uint64_t OpenRangeFromZeroForWord(uint64_t upper,
uint64_t word) {
return (upper <= 64 * word) ? 0
: (upper >= 64 * (word + 1))
? ~static_cast<uint64_t>(0)
: (~static_cast<uint64_t>(0) >> (64 - upper % 64));
}
static constexpr uint64_t CharMaskForWord(char x, uint64_t word) {
return (static_cast<unsigned char>(x) / 64 == word)
? (static_cast<uint64_t>(1)
<< (static_cast<unsigned char>(x) % 64))
: 0;
}
constexpr void SetChar(unsigned char c) {
m_[c / 64] |= static_cast<uint64_t>(1) << (c % 64);
}
uint64_t m_[4];
};
}
#endif | #include "absl/strings/charset.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <vector>
#include "gtest/gtest.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
namespace {
constexpr absl::CharSet everything_map = ~absl::CharSet();
constexpr absl::CharSet nothing_map = absl::CharSet();
TEST(Charmap, AllTests) {
const absl::CharSet also_nothing_map("");
EXPECT_TRUE(everything_map.contains('\0'));
EXPECT_FALSE(nothing_map.contains('\0'));
EXPECT_FALSE(also_nothing_map.contains('\0'));
for (unsigned char ch = 1; ch != 0; ++ch) {
SCOPED_TRACE(ch);
EXPECT_TRUE(everything_map.contains(ch));
EXPECT_FALSE(nothing_map.contains(ch));
EXPECT_FALSE(also_nothing_map.contains(ch));
}
const absl::CharSet symbols(absl::string_view("&@#@^!@?", 5));
EXPECT_TRUE(symbols.contains('&'));
EXPECT_TRUE(symbols.contains('@'));
EXPECT_TRUE(symbols.contains('#'));
EXPECT_TRUE(symbols.contains('^'));
EXPECT_FALSE(symbols.contains('!'));
EXPECT_FALSE(symbols.contains('?'));
int cnt = 0;
for (unsigned char ch = 1; ch != 0; ++ch) cnt += symbols.contains(ch);
EXPECT_EQ(cnt, 4);
const absl::CharSet lets(absl::string_view("^abcde", 3));
const absl::CharSet lets2(absl::string_view("fghij\0klmnop", 10));
const absl::CharSet lets3("fghij\0klmnop");
EXPECT_TRUE(lets2.contains('k'));
EXPECT_FALSE(lets3.contains('k'));
EXPECT_FALSE((symbols & lets).empty());
EXPECT_TRUE((lets2 & lets).empty());
EXPECT_FALSE((lets & symbols).empty());
EXPECT_TRUE((lets & lets2).empty());
EXPECT_TRUE(nothing_map.empty());
EXPECT_FALSE(lets.empty());
}
std::string Members(const absl::CharSet& m) {
std::string r;
for (size_t i = 0; i < 256; ++i)
if (m.contains(i)) r.push_back(i);
return r;
}
std::string ClosedRangeString(unsigned char lo, unsigned char hi) {
std::string s;
while (true) {
s.push_back(lo);
if (lo == hi) break;
++lo;
}
return s;
}
TEST(Charmap, Constexpr) {
constexpr absl::CharSet kEmpty = absl::CharSet();
EXPECT_EQ(Members(kEmpty), "");
constexpr absl::CharSet kA = absl::CharSet::Char('A');
EXPECT_EQ(Members(kA), "A");
constexpr absl::CharSet kAZ = absl::CharSet::Range('A', 'Z');
EXPECT_EQ(Members(kAZ), "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
constexpr absl::CharSet kIdentifier =
absl::CharSet::Range('0', '9') | absl::CharSet::Range('A', 'Z') |
absl::CharSet::Range('a', 'z') | absl::CharSet::Char('_');
EXPECT_EQ(Members(kIdentifier),
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"_"
"abcdefghijklmnopqrstuvwxyz");
constexpr absl::CharSet kAll = ~absl::CharSet();
for (size_t i = 0; i < 256; ++i) {
SCOPED_TRACE(i);
EXPECT_TRUE(kAll.contains(i));
}
constexpr absl::CharSet kHello = absl::CharSet("Hello, world!");
EXPECT_EQ(Members(kHello), " !,Hdelorw");
constexpr absl::CharSet kABC =
absl::CharSet::Range('A', 'Z') & ~absl::CharSet::Range('D', 'Z');
EXPECT_EQ(Members(kABC), "ABC");
constexpr bool kContainsA = absl::CharSet("abc").contains('a');
EXPECT_TRUE(kContainsA);
constexpr bool kContainsD = absl::CharSet("abc").contains('d');
EXPECT_FALSE(kContainsD);
constexpr bool kEmptyIsEmpty = absl::CharSet().empty();
EXPECT_TRUE(kEmptyIsEmpty);
constexpr bool kNotEmptyIsEmpty = absl::CharSet("abc").empty();
EXPECT_FALSE(kNotEmptyIsEmpty);
}
TEST(Charmap, Range) {
std::vector<size_t> poi = {0, 1, 2, 3, 4, 7, 8, 9, 15,
16, 17, 30, 31, 32, 33, 63, 64, 65,
127, 128, 129, 223, 224, 225, 254, 255};
for (auto lo = poi.begin(); lo != poi.end(); ++lo) {
SCOPED_TRACE(*lo);
for (auto hi = lo; hi != poi.end(); ++hi) {
SCOPED_TRACE(*hi);
EXPECT_EQ(Members(absl::CharSet::Range(*lo, *hi)),
ClosedRangeString(*lo, *hi));
}
}
}
TEST(Charmap, NullByteWithStringView) {
char characters[5] = {'a', 'b', '\0', 'd', 'x'};
absl::string_view view(characters, 5);
absl::CharSet tester(view);
EXPECT_TRUE(tester.contains('a'));
EXPECT_TRUE(tester.contains('b'));
EXPECT_TRUE(tester.contains('\0'));
EXPECT_TRUE(tester.contains('d'));
EXPECT_TRUE(tester.contains('x'));
EXPECT_FALSE(tester.contains('c'));
}
TEST(CharmapCtype, Match) {
for (int c = 0; c < 256; ++c) {
SCOPED_TRACE(c);
SCOPED_TRACE(static_cast<char>(c));
EXPECT_EQ(absl::ascii_isupper(c),
absl::CharSet::AsciiUppercase().contains(c));
EXPECT_EQ(absl::ascii_islower(c),
absl::CharSet::AsciiLowercase().contains(c));
EXPECT_EQ(absl::ascii_isdigit(c), absl::CharSet::AsciiDigits().contains(c));
EXPECT_EQ(absl::ascii_isalpha(c),
absl::CharSet::AsciiAlphabet().contains(c));
EXPECT_EQ(absl::ascii_isalnum(c),
absl::CharSet::AsciiAlphanumerics().contains(c));
EXPECT_EQ(absl::ascii_isxdigit(c),
absl::CharSet::AsciiHexDigits().contains(c));
EXPECT_EQ(absl::ascii_isprint(c),
absl::CharSet::AsciiPrintable().contains(c));
EXPECT_EQ(absl::ascii_isspace(c),
absl::CharSet::AsciiWhitespace().contains(c));
EXPECT_EQ(absl::ascii_ispunct(c),
absl::CharSet::AsciiPunctuation().contains(c));
}
}
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/charset.h | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/charset_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
afbf6f4d-13ff-4467-a905-6b6f60822eb7 | cpp | abseil/abseil-cpp | cord_rep_btree_navigator | absl/strings/internal/cord_rep_btree_navigator.cc | absl/strings/internal/cord_rep_btree_navigator_test.cc | #include "absl/strings/internal/cord_rep_btree_navigator.h"
#include <cassert>
#include "absl/strings/internal/cord_data_edge.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
using ReadResult = CordRepBtreeNavigator::ReadResult;
namespace {
inline CordRep* Substring(CordRep* rep, size_t offset, size_t n) {
assert(n <= rep->length);
assert(offset < rep->length);
assert(offset <= rep->length - n);
assert(IsDataEdge(rep));
if (n == 0) return nullptr;
if (n == rep->length) return CordRep::Ref(rep);
if (rep->tag == SUBSTRING) {
offset += rep->substring()->start;
rep = rep->substring()->child;
}
assert(rep->IsExternal() || rep->IsFlat());
CordRepSubstring* substring = new CordRepSubstring();
substring->length = n;
substring->tag = SUBSTRING;
substring->start = offset;
substring->child = CordRep::Ref(rep);
return substring;
}
inline CordRep* Substring(CordRep* rep, size_t offset) {
return Substring(rep, offset, rep->length - offset);
}
}
CordRepBtreeNavigator::Position CordRepBtreeNavigator::Skip(size_t n) {
int height = 0;
size_t index = index_[0];
CordRepBtree* node = node_[0];
CordRep* edge = node->Edge(index);
while (n >= edge->length) {
n -= edge->length;
while (++index == node->end()) {
if (++height > height_) return {nullptr, n};
node = node_[height];
index = index_[height];
}
edge = node->Edge(index);
}
while (height > 0) {
node = edge->btree();
index_[height] = static_cast<uint8_t>(index);
node_[--height] = node;
index = node->begin();
edge = node->Edge(index);
while (n >= edge->length) {
n -= edge->length;
++index;
assert(index != node->end());
edge = node->Edge(index);
}
}
index_[0] = static_cast<uint8_t>(index);
return {edge, n};
}
ReadResult CordRepBtreeNavigator::Read(size_t edge_offset, size_t n) {
int height = 0;
size_t length = edge_offset + n;
size_t index = index_[0];
CordRepBtree* node = node_[0];
CordRep* edge = node->Edge(index);
assert(edge_offset < edge->length);
if (length < edge->length) {
return {Substring(edge, edge_offset, n), length};
}
CordRepBtree* subtree = CordRepBtree::New(Substring(edge, edge_offset));
size_t subtree_end = 1;
do {
length -= edge->length;
while (++index == node->end()) {
index_[height] = static_cast<uint8_t>(index);
if (++height > height_) {
subtree->set_end(subtree_end);
if (length == 0) return {subtree, 0};
CordRep::Unref(subtree);
return {nullptr, length};
}
if (length != 0) {
subtree->set_end(subtree_end);
subtree = CordRepBtree::New(subtree);
subtree_end = 1;
}
node = node_[height];
index = index_[height];
}
edge = node->Edge(index);
if (length >= edge->length) {
subtree->length += edge->length;
subtree->edges_[subtree_end++] = CordRep::Ref(edge);
}
} while (length >= edge->length);
CordRepBtree* tree = subtree;
subtree->length += length;
while (height > 0) {
node = edge->btree();
index_[height] = static_cast<uint8_t>(index);
node_[--height] = node;
index = node->begin();
edge = node->Edge(index);
if (length != 0) {
CordRepBtree* right = CordRepBtree::New(height);
right->length = length;
subtree->edges_[subtree_end++] = right;
subtree->set_end(subtree_end);
subtree = right;
subtree_end = 0;
while (length >= edge->length) {
subtree->edges_[subtree_end++] = CordRep::Ref(edge);
length -= edge->length;
edge = node->Edge(++index);
}
}
}
if (length != 0) {
subtree->edges_[subtree_end++] = Substring(edge, 0, length);
}
subtree->set_end(subtree_end);
index_[0] = static_cast<uint8_t>(index);
return {tree, length};
}
}
ABSL_NAMESPACE_END
} | #include "absl/strings/internal/cord_rep_btree_navigator.h"
#include <string>
#include <vector>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/strings/internal/cord_internal.h"
#include "absl/strings/internal/cord_rep_btree.h"
#include "absl/strings/internal/cord_rep_test_util.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace cord_internal {
namespace {
using ::testing::Eq;
using ::testing::Ne;
using ::absl::cordrep_testing::CordRepBtreeFromFlats;
using ::absl::cordrep_testing::CordToString;
using ::absl::cordrep_testing::CreateFlatsFromString;
using ::absl::cordrep_testing::CreateRandomString;
using ::absl::cordrep_testing::MakeFlat;
using ::absl::cordrep_testing::MakeSubstring;
using ReadResult = CordRepBtreeNavigator::ReadResult;
using Position = CordRepBtreeNavigator::Position;
class CordRepBtreeNavigatorTest : public testing::TestWithParam<size_t> {
public:
using Flats = std::vector<CordRep*>;
static constexpr size_t kCharsPerFlat = 3;
CordRepBtreeNavigatorTest() {
data_ = CreateRandomString(count() * kCharsPerFlat);
flats_ = CreateFlatsFromString(data_, kCharsPerFlat);
if (count() > 1) {
CordRep::Unref(flats_[1]);
flats_[1] = MakeSubstring(kCharsPerFlat, kCharsPerFlat, MakeFlat(data_));
} else {
CordRep::Unref(flats_[0]);
flats_[0] = MakeSubstring(0, kCharsPerFlat, MakeFlat(data_));
}
tree_ = CordRepBtreeFromFlats(flats_);
}
~CordRepBtreeNavigatorTest() override { CordRep::Unref(tree_); }
size_t count() const { return GetParam(); }
CordRepBtree* tree() { return tree_; }
const std::string& data() const { return data_; }
const std::vector<CordRep*>& flats() const { return flats_; }
static std::string ToString(testing::TestParamInfo<size_t> param) {
return absl::StrCat(param.param, "_Flats");
}
private:
std::string data_;
Flats flats_;
CordRepBtree* tree_;
};
INSTANTIATE_TEST_SUITE_P(
WithParam, CordRepBtreeNavigatorTest,
testing::Values(1, CordRepBtree::kMaxCapacity - 1,
CordRepBtree::kMaxCapacity,
CordRepBtree::kMaxCapacity* CordRepBtree::kMaxCapacity - 1,
CordRepBtree::kMaxCapacity* CordRepBtree::kMaxCapacity,
CordRepBtree::kMaxCapacity* CordRepBtree::kMaxCapacity + 1,
CordRepBtree::kMaxCapacity* CordRepBtree::kMaxCapacity * 2 +
17),
CordRepBtreeNavigatorTest::ToString);
TEST(CordRepBtreeNavigatorTest, Uninitialized) {
CordRepBtreeNavigator nav;
EXPECT_FALSE(nav);
EXPECT_THAT(nav.btree(), Eq(nullptr));
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
EXPECT_DEATH(nav.Current(), ".*");
#endif
}
TEST_P(CordRepBtreeNavigatorTest, InitFirst) {
CordRepBtreeNavigator nav;
CordRep* edge = nav.InitFirst(tree());
EXPECT_TRUE(nav);
EXPECT_THAT(nav.btree(), Eq(tree()));
EXPECT_THAT(nav.Current(), Eq(flats().front()));
EXPECT_THAT(edge, Eq(flats().front()));
}
TEST_P(CordRepBtreeNavigatorTest, InitLast) {
CordRepBtreeNavigator nav;
CordRep* edge = nav.InitLast(tree());
EXPECT_TRUE(nav);
EXPECT_THAT(nav.btree(), Eq(tree()));
EXPECT_THAT(nav.Current(), Eq(flats().back()));
EXPECT_THAT(edge, Eq(flats().back()));
}
TEST_P(CordRepBtreeNavigatorTest, NextPrev) {
CordRepBtreeNavigator nav;
nav.InitFirst(tree());
const Flats& flats = this->flats();
EXPECT_THAT(nav.Previous(), Eq(nullptr));
EXPECT_THAT(nav.Current(), Eq(flats.front()));
for (size_t i = 1; i < flats.size(); ++i) {
ASSERT_THAT(nav.Next(), Eq(flats[i]));
EXPECT_THAT(nav.Current(), Eq(flats[i]));
}
EXPECT_THAT(nav.Next(), Eq(nullptr));
EXPECT_THAT(nav.Current(), Eq(flats.back()));
for (size_t i = flats.size() - 1; i > 0; --i) {
ASSERT_THAT(nav.Previous(), Eq(flats[i - 1]));
EXPECT_THAT(nav.Current(), Eq(flats[i - 1]));
}
EXPECT_THAT(nav.Previous(), Eq(nullptr));
EXPECT_THAT(nav.Current(), Eq(flats.front()));
}
TEST_P(CordRepBtreeNavigatorTest, PrevNext) {
CordRepBtreeNavigator nav;
nav.InitLast(tree());
const Flats& flats = this->flats();
EXPECT_THAT(nav.Next(), Eq(nullptr));
EXPECT_THAT(nav.Current(), Eq(flats.back()));
for (size_t i = flats.size() - 1; i > 0; --i) {
ASSERT_THAT(nav.Previous(), Eq(flats[i - 1]));
EXPECT_THAT(nav.Current(), Eq(flats[i - 1]));
}
EXPECT_THAT(nav.Previous(), Eq(nullptr));
EXPECT_THAT(nav.Current(), Eq(flats.front()));
for (size_t i = 1; i < flats.size(); ++i) {
ASSERT_THAT(nav.Next(), Eq(flats[i]));
EXPECT_THAT(nav.Current(), Eq(flats[i]));
}
EXPECT_THAT(nav.Next(), Eq(nullptr));
EXPECT_THAT(nav.Current(), Eq(flats.back()));
}
TEST(CordRepBtreeNavigatorTest, Reset) {
CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
CordRepBtreeNavigator nav;
nav.InitFirst(tree);
nav.Reset();
EXPECT_FALSE(nav);
EXPECT_THAT(nav.btree(), Eq(nullptr));
#if defined(GTEST_HAS_DEATH_TEST) && !defined(NDEBUG)
EXPECT_DEATH(nav.Current(), ".*");
#endif
CordRep::Unref(tree);
}
TEST_P(CordRepBtreeNavigatorTest, Skip) {
size_t count = this->count();
const Flats& flats = this->flats();
CordRepBtreeNavigator nav;
nav.InitFirst(tree());
for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) {
Position pos = nav.Skip(char_offset);
EXPECT_THAT(pos.edge, Eq(nav.Current()));
EXPECT_THAT(pos.edge, Eq(flats[0]));
EXPECT_THAT(pos.offset, Eq(char_offset));
}
for (size_t index1 = 0; index1 < count; ++index1) {
for (size_t index2 = index1; index2 < count; ++index2) {
for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) {
CordRepBtreeNavigator nav;
nav.InitFirst(tree());
size_t length1 = index1 * kCharsPerFlat;
Position pos1 = nav.Skip(length1 + char_offset);
ASSERT_THAT(pos1.edge, Eq(flats[index1]));
ASSERT_THAT(pos1.edge, Eq(nav.Current()));
ASSERT_THAT(pos1.offset, Eq(char_offset));
size_t length2 = index2 * kCharsPerFlat;
Position pos2 = nav.Skip(length2 - length1 + char_offset);
ASSERT_THAT(pos2.edge, Eq(flats[index2]));
ASSERT_THAT(pos2.edge, Eq(nav.Current()));
ASSERT_THAT(pos2.offset, Eq(char_offset));
}
}
}
}
TEST_P(CordRepBtreeNavigatorTest, Seek) {
size_t count = this->count();
const Flats& flats = this->flats();
CordRepBtreeNavigator nav;
nav.InitFirst(tree());
for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) {
Position pos = nav.Seek(char_offset);
EXPECT_THAT(pos.edge, Eq(nav.Current()));
EXPECT_THAT(pos.edge, Eq(flats[0]));
EXPECT_THAT(pos.offset, Eq(char_offset));
}
for (size_t index = 0; index < count; ++index) {
for (size_t char_offset = 0; char_offset < kCharsPerFlat; ++char_offset) {
size_t offset = index * kCharsPerFlat + char_offset;
Position pos1 = nav.Seek(offset);
ASSERT_THAT(pos1.edge, Eq(flats[index]));
ASSERT_THAT(pos1.edge, Eq(nav.Current()));
ASSERT_THAT(pos1.offset, Eq(char_offset));
}
}
}
TEST(CordRepBtreeNavigatorTest, InitOffset) {
CordRepBtree* tree = CordRepBtree::Create(MakeFlat("abc"));
tree = CordRepBtree::Append(tree, MakeFlat("def"));
CordRepBtreeNavigator nav;
Position pos = nav.InitOffset(tree, 5);
EXPECT_TRUE(nav);
EXPECT_THAT(nav.btree(), Eq(tree));
EXPECT_THAT(pos.edge, Eq(tree->Edges()[1]));
EXPECT_THAT(pos.edge, Eq(nav.Current()));
EXPECT_THAT(pos.offset, Eq(2u));
CordRep::Unref(tree);
}
TEST(CordRepBtreeNavigatorTest, InitOffsetAndSeekBeyondLength) {
CordRepBtree* tree1 = CordRepBtree::Create(MakeFlat("abc"));
CordRepBtree* tree2 = CordRepBtree::Create(MakeFlat("def"));
CordRepBtreeNavigator nav;
nav.InitFirst(tree1);
EXPECT_THAT(nav.Seek(3).edge, Eq(nullptr));
EXPECT_THAT(nav.Seek(100).edge, Eq(nullptr));
EXPECT_THAT(nav.btree(), Eq(tree1));
EXPECT_THAT(nav.Current(), Eq(tree1->Edges().front()));
EXPECT_THAT(nav.InitOffset(tree2, 3).edge, Eq(nullptr));
EXPECT_THAT(nav.InitOffset(tree2, 100).edge, Eq(nullptr));
EXPECT_THAT(nav.btree(), Eq(tree1));
EXPECT_THAT(nav.Current(), Eq(tree1->Edges().front()));
CordRep::Unref(tree1);
CordRep::Unref(tree2);
}
TEST_P(CordRepBtreeNavigatorTest, Read) {
const Flats& flats = this->flats();
const std::string& data = this->data();
for (size_t offset = 0; offset < data.size(); ++offset) {
for (size_t length = 1; length <= data.size() - offset; ++length) {
CordRepBtreeNavigator nav;
nav.InitFirst(tree());
size_t edge_offset = nav.Skip(offset).offset;
ReadResult result = nav.Read(edge_offset, length);
ASSERT_THAT(result.tree, Ne(nullptr));
EXPECT_THAT(result.tree->length, Eq(length));
if (result.tree->tag == BTREE) {
ASSERT_TRUE(CordRepBtree::IsValid(result.tree->btree()));
}
std::string value = CordToString(result.tree);
EXPECT_THAT(value, Eq(data.substr(offset, length)));
size_t partial = (offset + length) % kCharsPerFlat;
ASSERT_THAT(result.n, Eq(partial));
if (offset + length < data.size()) {
size_t index = (offset + length) / kCharsPerFlat;
EXPECT_THAT(nav.Current(), Eq(flats[index]));
}
CordRep::Unref(result.tree);
}
}
}
TEST_P(CordRepBtreeNavigatorTest, ReadBeyondLengthOfTree) {
CordRepBtreeNavigator nav;
nav.InitFirst(tree());
ReadResult result = nav.Read(2, tree()->length);
ASSERT_THAT(result.tree, Eq(nullptr));
}
TEST(CordRepBtreeNavigatorTest, NavigateMaximumTreeDepth) {
CordRepFlat* flat1 = MakeFlat("Hello world");
CordRepFlat* flat2 = MakeFlat("World Hello");
CordRepBtree* node = CordRepBtree::Create(flat1);
node = CordRepBtree::Append(node, flat2);
while (node->height() < CordRepBtree::kMaxHeight) {
node = CordRepBtree::New(node);
}
CordRepBtreeNavigator nav;
CordRep* edge = nav.InitFirst(node);
EXPECT_THAT(edge, Eq(flat1));
EXPECT_THAT(nav.Next(), Eq(flat2));
EXPECT_THAT(nav.Next(), Eq(nullptr));
EXPECT_THAT(nav.Previous(), Eq(flat1));
EXPECT_THAT(nav.Previous(), Eq(nullptr));
CordRep::Unref(node);
}
}
}
ABSL_NAMESPACE_END
} | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_rep_btree_navigator.cc | https://github.com/abseil/abseil-cpp/blob/03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4/absl/strings/internal/cord_rep_btree_navigator_test.cc | 03b8d6ea3dc6a0b8c6bcf42503c2053754dab2e4 |
8ba9fb26-5b1e-47d3-8a9e-005dcae536cb | cpp | tensorflow/tensorflow | executable | third_party/xla/xla/service/executable.cc | tensorflow/core/tfrt/mlrt/bytecode/executable_test.cc | #include "xla/service/executable.h"
#include <memory>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "xla/debug_options_flags.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_graph_dumper.h"
#include "xla/service/maybe_owning_device_memory.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
namespace xla {
ExecutionInput::~ExecutionInput() {
for (auto& index : unowned_indices_) {
auto buffer = buffers_.mutable_element(index)->Release();
if (buffer) {
buffer->Release();
}
}
}
absl::Status ExecutionInput::SetDynamicShape(Shape dynamic_shape) {
const Shape& input_shape = shape();
if (!ShapeUtil::DynamicShapeIsCompatible(input_shape, dynamic_shape)) {
return tsl::errors::InvalidArgument(
"Cannot set dynamic shape: ", input_shape.DebugString(), " vs. ",
dynamic_shape.DebugString());
}
dynamic_shape_ = std::make_unique<Shape>(std::move(dynamic_shape));
return absl::OkStatus();
}
void ExecutionInput::SetUnownedBuffer(const ShapeIndex& index,
MaybeOwningDeviceMemory buffer) {
*buffers_.mutable_element(index) = std::move(buffer);
unowned_indices_.insert(index);
}
absl::StatusOr<ShapedBuffer> ExecutionInput::ToShapedBuffer(
se::DeviceMemoryAllocator* allocator, int device_ordinal) const {
const Shape& input_shape = shape();
ShapedBuffer shaped_buffer(input_shape, device_ordinal);
for (const auto& index_buffer : Buffers()) {
const tensorflow::se::OwningDeviceMemory* mem =
index_buffer.second.AsOwningDeviceMemory();
if (mem != nullptr && (mem->allocator() != allocator ||
mem->device_ordinal() != device_ordinal)) {
return tsl::errors::InvalidArgument("Device buffer at index ",
index_buffer.first.ToString(),
" has mismatching allocator/device");
}
shaped_buffer.set_buffer(index_buffer.second.AsDeviceMemoryBase(),
index_buffer.first);
}
return std::move(shaped_buffer);
}
absl::StatusOr<ScopedShapedBuffer> Executable::ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) {
absl::StatusOr<ScopedShapedBuffer> result =
ExecuteAsyncOnStream(run_options, arguments, hlo_execution_profile);
absl::Status blocking_status = run_options->stream()->BlockHostUntilDone();
TF_RETURN_IF_ERROR(result.status());
TF_RETURN_IF_ERROR(blocking_status);
return result;
}
static ExecutionInput MakeMaybeOwningDeviceMemoryTree(
const ShapedBuffer& shaped_buffer) {
ExecutionInput result(shaped_buffer.on_device_shape());
shaped_buffer.buffers().ForEachElement(
[&](const ShapeIndex& index, const se::DeviceMemoryBase& mem) {
result.SetBuffer(index, MaybeOwningDeviceMemory(mem));
});
return result;
}
absl::StatusOr<ScopedShapedBuffer> Executable::ExecuteAsyncOnStream(
const ServiceExecutableRunOptions* run_options,
absl::Span<const ShapedBuffer* const> arguments,
HloExecutionProfile* hlo_execution_profile) {
std::vector<ExecutionInput> args;
args.reserve(arguments.size());
for (const ShapedBuffer* arg : arguments) {
args.emplace_back(MakeMaybeOwningDeviceMemoryTree(*arg));
}
TF_ASSIGN_OR_RETURN(ExecutionOutput out,
ExecuteAsyncOnStream(run_options, std::move(args),
hlo_execution_profile));
return out.ConsumeResult();
}
absl::StatusOr<ExecutionOutput> Executable::ExecuteOnStream(
const ServiceExecutableRunOptions* run_options,
std::vector<ExecutionInput> arguments,
HloExecutionProfile* hlo_execution_profile) {
absl::StatusOr<ExecutionOutput> result = ExecuteAsyncOnStream(
run_options, std::move(arguments), hlo_execution_profile);
absl::Status blocking_status = run_options->stream()->BlockHostUntilDone();
TF_RETURN_IF_ERROR(result.status());
TF_RETURN_IF_ERROR(blocking_status);
return result;
}
absl::StatusOr<std::vector<ScopedShapedBuffer>> Executable::ExecuteOnStreams(
absl::Span<const ServiceExecutableRunOptions> run_options,
absl::Span<const absl::Span<const ShapedBuffer* const>> arguments) {
TF_RET_CHECK(run_options.size() == arguments.size());
std::vector<ScopedShapedBuffer> return_values;
return_values.reserve(run_options.size());
if (run_options.size() == 1) {
TF_ASSIGN_OR_RETURN(auto rv,
ExecuteOnStream(&run_options[0], arguments[0],
nullptr));
return_values.push_back(std::move(rv));
return std::move(return_values);
}
for (size_t i = 0; i < run_options.size(); ++i) {
TF_ASSIGN_OR_RETURN(
auto rv, ExecuteAsyncOnStream(&run_options[i], arguments[i],
nullptr));
return_values.push_back(std::move(rv));
}
for (const auto& options : run_options) {
TF_RET_CHECK(options.stream() != nullptr);
TF_RETURN_IF_ERROR(options.stream()->BlockHostUntilDone());
}
return std::move(return_values);
}
absl::StatusOr<ScopedShapedBuffer> Executable::ExecuteOnStreamWrapper(
const ServiceExecutableRunOptions* run_options,
absl::Span<const ShapedBuffer* const> arguments) {
absl::StatusOr<ScopedShapedBuffer> result =
ExecuteAsyncOnStreamWrapper(run_options, arguments);
absl::Status block_status = run_options->stream()->BlockHostUntilDone();
TF_RETURN_IF_ERROR(result.status());
TF_RETURN_IF_ERROR(block_status);
return result;
}
absl::StatusOr<ExecutionOutput> Executable::ExecuteOnStreamWrapper(
const ServiceExecutableRunOptions* run_options,
std::vector<ExecutionInput> arguments) {
absl::StatusOr<ExecutionOutput> result =
ExecuteAsyncOnStreamWrapper(run_options, std::move(arguments));
absl::Status block_status = run_options->stream()->BlockHostUntilDone();
TF_RETURN_IF_ERROR(result.status());
TF_RETURN_IF_ERROR(block_status);
return result;
}
struct ExecuteAsyncOnStreamWrapperState {
ExecutionProfile* profile;
};
static ExecuteAsyncOnStreamWrapperState ExecuteWrapperBeforeExecution(
const Executable& executable,
const ServiceExecutableRunOptions* run_options) {
ExecuteAsyncOnStreamWrapperState state;
state.profile = run_options->run_options().execution_profile();
VLOG(1) << "enqueueing executable on stream...";
return state;
}
absl::Status ExecuteWrapperAfterExecution(
Executable* executable, const ExecuteAsyncOnStreamWrapperState& state,
absl::Status return_status, se::Stream* stream) {
if (!return_status.ok()) {
if (state.profile != nullptr) {
absl::Status status = stream->BlockHostUntilDone();
if (!status.ok()) {
LOG(ERROR) << "Failed to BlockHostUntilDone: " << status;
}
}
return return_status;
}
if (state.profile != nullptr) {
TF_RETURN_IF_ERROR(stream->BlockHostUntilDone());
const int64_t executable_size_in_bytes =
executable->SizeOfGeneratedCodeInBytes();
if (state.profile->compute_time_ns() == 0) {
state.profile->set_compute_time_ns(
state.profile->compute_and_transfer_time_ns());
}
if (executable_size_in_bytes != 0) {
state.profile->set_executable_size_in_bytes(executable_size_in_bytes);
}
}
return return_status;
}
absl::StatusOr<ScopedShapedBuffer> Executable::ExecuteAsyncOnStreamWrapper(
const ServiceExecutableRunOptions* run_options,
absl::Span<const ShapedBuffer* const> arguments) {
auto state = ExecuteWrapperBeforeExecution(*this, run_options);
absl::StatusOr<ScopedShapedBuffer> return_value =
ExecuteAsyncOnStream(run_options, arguments, nullptr);
TF_RETURN_IF_ERROR(ExecuteWrapperAfterExecution(
this, state, return_value.status(), run_options->stream()));
return return_value;
}
absl::StatusOr<ExecutionOutput> Executable::ExecuteAsyncOnStreamWrapper(
const ServiceExecutableRunOptions* run_options,
std::vector<ExecutionInput> arguments) {
auto state = ExecuteWrapperBeforeExecution(*this, run_options);
absl::StatusOr<ExecutionOutput> return_value =
ExecuteAsyncOnStream(run_options, std::move(arguments), nullptr);
TF_RETURN_IF_ERROR(ExecuteWrapperAfterExecution(
this, state, return_value.status(), run_options->stream()));
return return_value;
}
int64_t Executable::SizeOfGeneratedCodeInBytes() const { return -1; }
void Executable::MarkToBeReleasedArguments(absl::Span<ExecutionInput> arguments,
ExecutionOutput& result) {
for (ExecutionInput& argument : arguments) {
for (auto& index_buffer : *argument.MutableBuffers()) {
if (std::optional<se::OwningDeviceMemory> maybe_owning_buffer =
index_buffer.second.Release()) {
result.AddToBeReleased(std::move(*maybe_owning_buffer));
}
}
}
}
} | #include "tensorflow/core/tfrt/mlrt/bytecode/executable.h"
#include <cstring>
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
namespace mlrt {
namespace bc {
namespace {
TEST(ExecutableTest, Executable) {
bc::Buffer buffer;
bc::Allocator allocator(&buffer);
Executable::Constructor executable_ctor = bc::New<bc::Executable>(&allocator);
Vector<String>::Constructor kernel_names_ctor =
executable_ctor.construct_kernel_names(2);
kernel_names_ctor.ConstructAt(0, "add");
kernel_names_ctor.ConstructAt(1, "return");
auto attributes_ctor = executable_ctor.construct_attributes(1);
int32_t constant = 1;
std::string constant_str(sizeof(int32_t), '\0');
std::memcpy(constant_str.data(), &constant, sizeof(int32_t));
attributes_ctor.ConstructAt(0, constant_str);
executable_ctor.construct_functions(1);
Executable executable(buffer.Get(executable_ctor.address()));
EXPECT_THAT(executable.kernel_names(),
::testing::ElementsAreArray({"add", "return"}));
EXPECT_EQ(executable.attributes().size(), 1);
int32_t value;
ASSERT_EQ(executable.attributes()[0].size(), sizeof(value));
std::memcpy(&value, executable.attributes()[0].data(), sizeof(int32_t));
EXPECT_EQ(value, constant);
EXPECT_EQ(executable.functions().size(), 1);
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/executable.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/executable_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
550b3692-2d88-4054-bf6c-3b1158ebeda3 | cpp | tensorflow/tensorflow | filter_dataset_op | tensorflow/core/kernels/data/filter_dataset_op.cc | tensorflow/core/kernels/data/filter_dataset_op_test.cc | #include "tensorflow/core/kernels/data/filter_dataset_op.h"
#include <memory>
#include <utility>
#include <vector>
#include "tensorflow/core/common_runtime/function.h"
#include "tensorflow/core/common_runtime/input_colocation_exemption_registry.h"
#include "tensorflow/core/data/dataset_utils.h"
#include "tensorflow/core/data/name_utils.h"
#include "tensorflow/core/data/stats_utils.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/partial_tensor_shape.h"
#include "tensorflow/core/framework/stats_aggregator.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/lib/gtl/cleanup.h"
#include "tensorflow/core/lib/random/random.h"
#include "tensorflow/core/lib/strings/str_util.h"
namespace tensorflow {
namespace data {
constexpr const char* const FilterDatasetOp::kDatasetType;
constexpr const char* const FilterDatasetOp::kInputDataset;
constexpr const char* const FilterDatasetOp::kOtherArguments;
constexpr const char* const FilterDatasetOp::kPredicate;
constexpr const char* const FilterDatasetOp::kTarguments;
constexpr const char* const FilterDatasetOp::kOutputTypes;
constexpr const char* const FilterDatasetOp::kOutputShapes;
constexpr char kInputImplEmpty[] = "input_impl_empty";
constexpr char kFilteredElements[] = "filtered_elements";
constexpr char kDroppedElements[] = "dropped_elements";
class FilterDatasetOp::Dataset : public DatasetBase {
public:
Dataset(OpKernelContext* ctx, const DatasetBase* input,
std::unique_ptr<CapturedFunction> captured_func)
: DatasetBase(DatasetContext(ctx)),
input_(input),
captured_func_(std::move(captured_func)) {
input_->Ref();
}
~Dataset() override { input_->Unref(); }
std::unique_ptr<IteratorBase> MakeIteratorInternal(
const string& prefix) const override {
return std::make_unique<Iterator>(Iterator::Params{
this, name_utils::IteratorPrefix(kDatasetType, prefix)});
}
const DataTypeVector& output_dtypes() const override {
return input_->output_dtypes();
}
const std::vector<PartialTensorShape>& output_shapes() const override {
return input_->output_shapes();
}
string DebugString() const override {
return name_utils::DatasetDebugString(kDatasetType);
}
Status InputDatasets(std::vector<const DatasetBase*>* inputs) const override {
inputs->push_back(input_);
return absl::OkStatus();
}
Status CheckExternalState() const override {
TF_RETURN_IF_ERROR(captured_func_->CheckExternalState());
return input_->CheckExternalState();
}
protected:
Status AsGraphDefInternal(SerializationContext* ctx,
DatasetGraphDefBuilder* b,
Node** output) const override {
Node* input_graph_node;
TF_RETURN_IF_ERROR(b->AddInputDataset(ctx, input_, &input_graph_node));
std::vector<Node*> other_arguments;
DataTypeVector other_arguments_types;
TF_RETURN_IF_ERROR(captured_func_->AddToGraph(ctx, b, &other_arguments,
&other_arguments_types));
AttrValue f;
b->BuildAttrValue(captured_func_->func(), &f);
AttrValue other_arguments_types_attr;
b->BuildAttrValue(other_arguments_types, &other_arguments_types_attr);
TF_RETURN_IF_ERROR(b->AddDataset(
this, {{0, input_graph_node}}, {{1, other_arguments}},
{{kPredicate, f}, {kTarguments, other_arguments_types_attr}}, output));
return absl::OkStatus();
}
private:
class Iterator : public DatasetIterator<Dataset> {
public:
explicit Iterator(const Params& params)
: DatasetIterator<Dataset>(params),
filtered_elements_(0),
dropped_elements_(0) {}
bool SymbolicCheckpointCompatible() const override { return true; }
Status Initialize(IteratorContext* ctx) override {
TF_RETURN_IF_ERROR(
dataset()->input_->MakeIterator(ctx, this, prefix(), &input_impl_));
return dataset()->captured_func_->Instantiate(
ctx, &instantiated_captured_func_);
}
Status GetNextInternal(IteratorContext* ctx,
std::vector<Tensor>* out_tensors,
bool* end_of_sequence) override {
auto stats_aggregator = ctx->stats_aggregator();
bool matched;
do {
{
tf_shared_lock l(mu_);
if (!input_impl_) {
*end_of_sequence = true;
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(
input_impl_->GetNext(ctx, out_tensors, end_of_sequence));
}
if (*end_of_sequence) {
mutex_lock l(mu_);
input_impl_.reset();
return absl::OkStatus();
}
std::vector<Tensor> result;
auto status = instantiated_captured_func_->RunWithBorrowedArgs(
ctx, *out_tensors, &result, model_node());
if (!status.ok()) {
return AddErrorContext(status);
}
if (result.size() != 1 || result[0].dtype() != DT_BOOL ||
result[0].NumElements() != 1) {
out_tensors->clear();
return errors::InvalidArgument(
"Filter predicate `f` must return a scalar bool.");
}
matched = result[0].scalar<bool>()();
if (!matched) {
out_tensors->clear();
{
mutex_lock l(mu_);
dropped_elements_++;
}
if (stats_aggregator) {
mutex_lock l(mu_);
stats_aggregator->AddScalar(
stats_utils::DroppedElementsScalarName(dataset()->node_name()),
static_cast<float>(dropped_elements_), num_elements());
stats_aggregator->IncrementCounter(dataset()->node_name(),
stats_utils::kDroppedElements,
static_cast<float>(1));
}
}
} while (!matched);
{
mutex_lock l(mu_);
filtered_elements_++;
}
if (stats_aggregator) {
mutex_lock l(mu_);
stats_aggregator->AddScalar(
stats_utils::FilterdElementsScalarName(dataset()->node_name()),
static_cast<float>(filtered_elements_), num_elements());
stats_aggregator->IncrementCounter(dataset()->node_name(),
stats_utils::kFilteredElements,
static_cast<float>(1));
}
*end_of_sequence = false;
return absl::OkStatus();
}
protected:
std::shared_ptr<model::Node> CreateNode(
IteratorContext* ctx, model::Node::Args args) const override {
return model::MakeUnknownRatioNode(std::move(args));
}
Status SaveInternal(SerializationContext* ctx,
IteratorStateWriter* writer) override {
TF_RETURN_IF_ERROR(ctx->HandleCheckExternalStateStatus(
dataset()->captured_func_->CheckExternalState()));
mutex_lock l(mu_);
TF_RETURN_IF_ERROR(writer->WriteScalar(
prefix(), kInputImplEmpty, static_cast<int64_t>(!input_impl_)));
if (input_impl_) {
TF_RETURN_IF_ERROR(SaveInput(ctx, writer, input_impl_));
}
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kFilteredElements, filtered_elements_));
TF_RETURN_IF_ERROR(
writer->WriteScalar(prefix(), kDroppedElements, dropped_elements_));
return absl::OkStatus();
}
Status RestoreInternal(IteratorContext* ctx,
IteratorStateReader* reader) override {
mutex_lock l(mu_);
int64_t input_empty;
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kInputImplEmpty, &input_empty));
if (static_cast<bool>(input_empty)) {
input_impl_.reset();
} else {
TF_RETURN_IF_ERROR(RestoreInput(ctx, reader, input_impl_));
}
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kFilteredElements, &filtered_elements_));
TF_RETURN_IF_ERROR(
reader->ReadScalar(prefix(), kDroppedElements, &dropped_elements_));
return absl::OkStatus();
}
data::TraceMeMetadata GetTraceMeMetadata() const override {
tf_shared_lock l(mu_);
data::TraceMeMetadata result;
result.push_back(std::make_pair(
"passed",
strings::Printf("%lld", static_cast<long long>(filtered_elements_))));
result.push_back(std::make_pair(
"filtered",
strings::Printf("%lld", static_cast<long long>(dropped_elements_))));
return result;
}
private:
mutable mutex mu_;
std::unique_ptr<IteratorBase> input_impl_ TF_GUARDED_BY(mu_);
int64_t filtered_elements_ TF_GUARDED_BY(mu_);
int64_t dropped_elements_ TF_GUARDED_BY(mu_);
std::unique_ptr<InstantiatedCapturedFunction> instantiated_captured_func_;
};
const DatasetBase* const input_;
const std::unique_ptr<CapturedFunction> captured_func_;
};
FilterDatasetOp::FilterDatasetOp(OpKernelConstruction* ctx)
: UnaryDatasetOpKernel(ctx) {
OP_REQUIRES_OK(ctx, FunctionMetadata::Create(ctx, kPredicate, {},
&func_metadata_));
OP_REQUIRES(ctx, func_metadata_->short_circuit_info().indices.size() <= 1,
errors::InvalidArgument(
"predicate function has more than one return value."));
}
void FilterDatasetOp::MakeDataset(OpKernelContext* ctx, DatasetBase* input,
DatasetBase** output) {
std::unique_ptr<CapturedFunction> captured_func;
OP_REQUIRES_OK(ctx,
CapturedFunction::Create(ctx, func_metadata_, kOtherArguments,
&captured_func));
*output = new Dataset(ctx, input, std::move(captured_func));
}
namespace {
REGISTER_KERNEL_BUILDER(Name("FilterDataset").Device(DEVICE_CPU),
FilterDatasetOp);
REGISTER_INPUT_COLOCATION_EXEMPTION("FilterDataset");
}
}
} | #include "tensorflow/core/kernels/data/filter_dataset_op.h"
#include "tensorflow/core/data/dataset_test_base.h"
namespace tensorflow {
namespace data {
namespace {
constexpr char kNodeName[] = "filter_dataset";
class FilterDatasetParams : public DatasetParams {
public:
template <typename T>
FilterDatasetParams(T input_dataset_params,
std::vector<Tensor> other_arguments,
FunctionDefHelper::AttrValueWrapper pred_func,
std::vector<FunctionDef> func_lib,
DataTypeVector type_arguments,
DataTypeVector output_dtypes,
std::vector<PartialTensorShape> output_shapes,
string node_name)
: DatasetParams(std::move(output_dtypes), std::move(output_shapes),
std::move(node_name)),
other_arguments_(std::move(other_arguments)),
pred_func_(std::move(pred_func)),
func_lib_(std::move(func_lib)),
type_arguments_(std::move(type_arguments)) {
input_dataset_params_.push_back(std::make_unique<T>(input_dataset_params));
iterator_prefix_ =
name_utils::IteratorPrefix(input_dataset_params.dataset_type(),
input_dataset_params.iterator_prefix());
}
std::vector<Tensor> GetInputTensors() const override {
return other_arguments_;
}
Status GetInputNames(std::vector<string>* input_names) const override {
input_names->clear();
input_names->reserve(input_dataset_params_.size() +
other_arguments_.size());
input_names->emplace_back(FilterDatasetOp::kInputDataset);
for (int i = 0; i < other_arguments_.size(); ++i) {
input_names->emplace_back(
absl::StrCat(FilterDatasetOp::kOtherArguments, "_", i));
}
return absl::OkStatus();
}
Status GetAttributes(AttributeVector* attr_vector) const override {
*attr_vector = {{"predicate", pred_func_},
{"Targuments", type_arguments_},
{"output_shapes", output_shapes_},
{"output_types", output_dtypes_},
{"metadata", ""}};
return absl::OkStatus();
}
std::vector<FunctionDef> func_lib() const override { return func_lib_; }
string dataset_type() const override { return FilterDatasetOp::kDatasetType; }
private:
std::vector<Tensor> other_arguments_;
FunctionDefHelper::AttrValueWrapper pred_func_;
std::vector<FunctionDef> func_lib_;
DataTypeVector type_arguments_;
};
class FilterDatasetOpTest : public DatasetOpsTestBase {};
FilterDatasetParams FilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9, 1}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({1})},
kNodeName);
}
FilterDatasetParams FilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{0}, {})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams1() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("GetUnique",
{{"T", DT_INT64}, {"out_idx", DT_INT32}}),
{test::function::Unique()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams2() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{3, 3, 1},
{0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("IsZero", {{"T", DT_INT64}}),
{test::function::IsZero()},
{},
{DT_INT64},
{PartialTensorShape({3, 1})},
kNodeName);
}
FilterDatasetParams InvalidPredFuncFilterDatasetParams3() {
auto tensor_slice_dataset_params = TensorSliceDatasetParams(
{CreateTensor<int64_t>(TensorShape{9}, {0, 0, 0, 3, 4, 5, 6, 7, 8})},
"tensor_slice_dataset");
return FilterDatasetParams(
std::move(tensor_slice_dataset_params),
{},
FunctionDefHelper::FunctionRef("NonZero", {{"T", DT_INT64}}),
{test::function::NonZero()},
{},
{DT_INT64},
{PartialTensorShape({})},
kNodeName);
}
std::vector<GetNextTestCase<FilterDatasetParams>> GetNextTestCases() {
return {{FilterDatasetParams1(),
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{FilterDatasetParams2(),
{}}};
}
ITERATOR_GET_NEXT_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
GetNextTestCases())
TEST_F(FilterDatasetOpTest, DatasetNodeName) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetNodeName(dataset_params.node_name()));
}
TEST_F(FilterDatasetOpTest, DatasetTypeString) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetTypeString(
name_utils::OpName(FilterDatasetOp::kDatasetType)));
}
TEST_F(FilterDatasetOpTest, DatasetOutputDtypes) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckDatasetOutputDtypes({DT_INT64}));
}
std::vector<DatasetOutputShapesTestCase<FilterDatasetParams>>
DatasetOutputShapesTestCases() {
return {{FilterDatasetParams1(),
{PartialTensorShape({1})}},
{FilterDatasetParams2(),
{PartialTensorShape({})}}};
}
DATASET_OUTPUT_SHAPES_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
DatasetOutputShapesTestCases())
std::vector<CardinalityTestCase<FilterDatasetParams>> CardinalityTestCases() {
return {{FilterDatasetParams1(),
kUnknownCardinality},
{FilterDatasetParams2(),
kUnknownCardinality}};
}
DATASET_CARDINALITY_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
CardinalityTestCases())
TEST_F(FilterDatasetOpTest, IteratorOutputDtypes) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorOutputDtypes({DT_INT64}));
}
std::vector<IteratorOutputShapesTestCase<FilterDatasetParams>>
IteratorOutputShapesTestCases() {
return {{FilterDatasetParams1(),
{PartialTensorShape({1})}},
{FilterDatasetParams2(),
{PartialTensorShape({})}}};
}
ITERATOR_OUTPUT_SHAPES_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
IteratorOutputShapesTestCases())
TEST_F(FilterDatasetOpTest, IteratorPrefix) {
auto dataset_params = FilterDatasetParams1();
TF_ASSERT_OK(Initialize(dataset_params));
TF_ASSERT_OK(CheckIteratorPrefix(name_utils::IteratorPrefix(
FilterDatasetOp::kDatasetType, dataset_params.iterator_prefix())));
}
std::vector<IteratorSaveAndRestoreTestCase<FilterDatasetParams>>
IteratorSaveAndRestoreTestCases() {
return {{FilterDatasetParams1(),
{0, 2, 6},
CreateTensors<int64_t>(TensorShape({1}), {{0}, {0}, {0}})},
{FilterDatasetParams2(),
{0, 2, 6},
{}}};
}
ITERATOR_SAVE_AND_RESTORE_TEST_P(FilterDatasetOpTest, FilterDatasetParams,
IteratorSaveAndRestoreTestCases())
class ParameterizedInvalidPredicateFuncTest
: public FilterDatasetOpTest,
public ::testing::WithParamInterface<FilterDatasetParams> {};
TEST_P(ParameterizedInvalidPredicateFuncTest, InvalidPredicateFunc) {
auto dataset_params = GetParam();
TF_ASSERT_OK(Initialize(dataset_params));
bool end_of_sequence = false;
std::vector<Tensor> out_tensors;
EXPECT_EQ(
iterator_->GetNext(iterator_ctx_.get(), &out_tensors, &end_of_sequence)
.code(),
absl::StatusCode::kInvalidArgument);
EXPECT_TRUE(out_tensors.empty());
}
INSTANTIATE_TEST_SUITE_P(
FilterDatasetOpTest, ParameterizedInvalidPredicateFuncTest,
::testing::ValuesIn({InvalidPredFuncFilterDatasetParams1(),
InvalidPredFuncFilterDatasetParams2(),
InvalidPredFuncFilterDatasetParams3()}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/filter_dataset_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/data/filter_dataset_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
bd4ce84d-8c2b-4f27-a3b4-90b861b3d323 | cpp | google/tensorstore | constant_bit_vector | tensorstore/util/constant_bit_vector.h | tensorstore/util/constant_bit_vector_test.cc | #ifndef TENSORSTORE_UTIL_CONSTANT_BIT_VECTOR_H_
#define TENSORSTORE_UTIL_CONSTANT_BIT_VECTOR_H_
#include <cstddef>
#include <type_traits>
#include "tensorstore/util/bit_span.h"
#include "tensorstore/util/constant_vector.h"
namespace tensorstore {
template <typename Block, bool value, std::ptrdiff_t Length>
constexpr BitSpan<const Block, Length> GetConstantBitVector(
std::integral_constant<std::ptrdiff_t, Length> = {}) {
return {GetConstantVector<
Block, (value ? ~static_cast<Block>(0) : static_cast<Block>(0)),
BitVectorSizeInBlocks<Block>(Length)>()
.data(),
0, Length};
}
template <typename Block, bool value>
BitSpan<const Block> GetConstantBitVector(std::ptrdiff_t length) {
return {GetConstantVector<Block, (value ? ~static_cast<Block>(0)
: static_cast<Block>(0))>(
BitVectorSizeInBlocks<Block>(length))
.data(),
0, length};
}
}
#endif | #include "tensorstore/util/constant_bit_vector.h"
#include <cstdint>
#include <type_traits>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorstore/util/bit_span.h"
namespace {
using ::tensorstore::BitSpan;
using ::tensorstore::GetConstantBitVector;
TEST(GetConstantBitVectorTest, StaticExtentFalse) {
constexpr auto v = GetConstantBitVector<uint64_t, false, 113>();
static_assert(
std::is_same_v<decltype(v), const BitSpan<const uint64_t, 113>>);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(113, false)));
}
TEST(GetConstantBitVectorTest, StaticExtentTrue) {
constexpr auto v = GetConstantBitVector<uint64_t, true, 113>();
static_assert(
std::is_same_v<decltype(v), const BitSpan<const uint64_t, 113>>);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(113, true)));
}
TEST(GetConstantBitVectorTest, DynamicExtentFalse) {
auto v = GetConstantBitVector<uint64_t, false>(113);
static_assert(std::is_same_v<decltype(v), BitSpan<const uint64_t>>);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(113, false)));
}
TEST(GetConstantBitVectorTest, DynamicExtentTrue) {
auto v = GetConstantBitVector<uint64_t, true>(113);
static_assert(std::is_same_v<decltype(v), BitSpan<const uint64_t>>);
EXPECT_THAT(v, ::testing::ElementsAreArray(std::vector<bool>(113, true)));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/constant_bit_vector.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/util/constant_bit_vector_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
c7a66d7d-dadc-40c4-8d3c-c32222c567aa | cpp | tensorflow/tensorflow | xla_debug_info_manager | third_party/xla/xla/service/xla_debug_info_manager.cc | third_party/xla/xla/service/xla_debug_info_manager_test.cc | #include "xla/service/xla_debug_info_manager.h"
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_proto_util.h"
namespace xla {
void XlaDebugInfoManager::RegisterModule(
std::shared_ptr<const HloModule> hlo_module,
BufferAssignmentProto buffer_assignment) {
CHECK(hlo_module != nullptr);
absl::MutexLock lock(&mutex_);
auto result = modules_.try_emplace(hlo_module->unique_id());
CHECK(result.second);
XlaModuleEntry& m = result.first->second;
m.hlo_module = std::move(hlo_module);
m.buffer_assignment = std::move(buffer_assignment);
m.active = true;
}
void XlaDebugInfoManager::UnregisterModule(ModuleIdentifier module_id) {
absl::MutexLock lock(&mutex_);
auto it = modules_.find(module_id);
CHECK(it != modules_.end());
if (!tracing_active_) {
modules_.erase(it);
} else {
XlaModuleEntry& m = it->second;
m.active = false;
}
}
void XlaDebugInfoManager::StartTracing() {
absl::MutexLock lock(&mutex_);
tracing_active_ = true;
}
void XlaDebugInfoManager::StopTracing(
std::vector<std::unique_ptr<HloProto>>* module_debug_info) {
std::vector<XlaModuleEntry> modules_to_serialize;
{
absl::MutexLock lock(&mutex_);
if (!tracing_active_) return;
tracing_active_ = false;
modules_to_serialize.reserve(modules_.size());
for (auto it = modules_.begin(); it != modules_.end();) {
auto& m = it->second;
auto cur_it = it++;
if (!m.active) {
modules_to_serialize.emplace_back(std::move(m));
modules_.erase(cur_it);
} else {
modules_to_serialize.emplace_back(m);
}
}
}
if (module_debug_info) {
module_debug_info->clear();
for (const auto& m : modules_to_serialize) {
auto hlo_proto = std::make_unique<HloProto>(MakeHloProto(*m.hlo_module));
*hlo_proto->mutable_buffer_assignment() = m.buffer_assignment;
module_debug_info->emplace_back(std::move(hlo_proto));
}
}
}
bool XlaDebugInfoManager::TracksModule(ModuleIdentifier module_id) const {
absl::MutexLock lock(&mutex_);
return modules_.find(module_id) != modules_.end();
}
} | #include "xla/service/xla_debug_info_manager.h"
#include <memory>
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
class XlaDebugInfoManagerTestPeer {
public:
void RegisterModule(std::shared_ptr<const HloModule> hlo_module,
BufferAssignmentProto buffer_assignment) {
return xla_debug_info_manager_.RegisterModule(hlo_module,
std::move(buffer_assignment));
}
void UnregisterModule(ModuleIdentifier module_id) {
return xla_debug_info_manager_.UnregisterModule(module_id);
}
void StartTracing() { return xla_debug_info_manager_.StartTracing(); }
absl::flat_hash_set<ModuleIdentifier> StopTracing() {
std::vector<std::unique_ptr<HloProto>> module_debug_info;
xla_debug_info_manager_.StopTracing(&module_debug_info);
absl::flat_hash_set<ModuleIdentifier> module_ids;
for (const auto& hlo_proto : module_debug_info) {
module_ids.insert(hlo_proto->hlo_module().id());
}
return module_ids;
}
absl::flat_hash_set<ModuleIdentifier> GetModuleIds() {
absl::flat_hash_set<ModuleIdentifier> module_ids;
absl::MutexLock lock(&xla_debug_info_manager_.mutex_);
for (const auto& it : xla_debug_info_manager_.modules_) {
module_ids.insert(it.first);
}
return module_ids;
}
private:
XlaDebugInfoManager xla_debug_info_manager_;
};
namespace {
using ::testing::IsEmpty;
using ::testing::UnorderedElementsAre;
class XlaDebugInfoManagerTest : public HloTestBase {
protected:
struct DebugMetadata {
ModuleIdentifier unique_id;
std::shared_ptr<HloModule> module;
};
ModuleIdentifier RegisterProgram(const std::string& module_name) {
DebugMetadata debug_info;
HloModuleConfig config;
debug_info.module = std::make_shared<HloModule>(module_name, config);
ModuleIdentifier unique_id = debug_info.module->unique_id();
debug_info.unique_id = unique_id;
xla_debug_info_manager_.RegisterModule(debug_info.module,
BufferAssignmentProto());
external_references_.push_back(std::move(debug_info));
return unique_id;
}
void UnregisterProgram(ModuleIdentifier unique_id) {
for (int i = 0; i < external_references_.size(); i++) {
if (external_references_[i].unique_id == unique_id) {
xla_debug_info_manager_.UnregisterModule(unique_id);
external_references_.erase(external_references_.begin() + i);
break;
}
}
}
absl::flat_hash_set<ModuleIdentifier> GetModuleIds() {
return xla_debug_info_manager_.GetModuleIds();
}
void StartTrace() { xla_debug_info_manager_.StartTracing(); }
absl::flat_hash_set<ModuleIdentifier> StopTrace() {
return xla_debug_info_manager_.StopTracing();
}
std::vector<DebugMetadata> external_references_;
XlaDebugInfoManagerTestPeer xla_debug_info_manager_;
};
TEST_F(XlaDebugInfoManagerTest, NoTraceBasic) {
auto program0 = RegisterProgram("program0");
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0));
auto program1 = RegisterProgram("program1");
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0, program1));
UnregisterProgram(program0);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program1));
UnregisterProgram(program1);
EXPECT_TRUE(GetModuleIds().empty());
}
TEST_F(XlaDebugInfoManagerTest, NoTraceDuplicateIds) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
EXPECT_THAT(GetModuleIds(),
UnorderedElementsAre(program0A, program0B, program1));
UnregisterProgram(program1);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A, program0B));
UnregisterProgram(program0A);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0B));
UnregisterProgram(program0B);
EXPECT_THAT(GetModuleIds(), IsEmpty());
}
TEST_F(XlaDebugInfoManagerTest, ActiveTrace) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
StartTrace();
auto program2 = RegisterProgram("program2");
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1, program2));
StartTrace();
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1, program2));
UnregisterProgram(program2);
EXPECT_THAT(GetModuleIds(),
UnorderedElementsAre(program0A, program0B, program1));
UnregisterProgram(program0A);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0B, program1));
UnregisterProgram(program0B);
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program1));
UnregisterProgram(program1);
EXPECT_THAT(GetModuleIds(), IsEmpty());
}
TEST_F(XlaDebugInfoManagerTest, UnregisterDuringTrace) {
auto program0A = RegisterProgram("program0");
auto program0B = RegisterProgram("program0");
auto program1 = RegisterProgram("program1");
StartTrace();
UnregisterProgram(program1);
UnregisterProgram(program0B);
EXPECT_THAT(StopTrace(),
UnorderedElementsAre(program0A, program0B, program1));
EXPECT_THAT(GetModuleIds(), UnorderedElementsAre(program0A));
UnregisterProgram(program0A);
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/xla_debug_info_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/xla_debug_info_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
fd3e1b7e-aead-499d-bda0-cf8d606beb95 | cpp | tensorflow/tensorflow | guarantee_const_op | tensorflow/core/kernels/guarantee_const_op.cc | tensorflow/core/kernels/guarantee_const_op_test.cc | #include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/lib/core/errors.h"
namespace tensorflow {
namespace {
class GuaranteeConstOp : public OpKernel {
public:
explicit GuaranteeConstOp(OpKernelConstruction* ctx) : OpKernel(ctx) {}
void Compute(OpKernelContext* ctx) override {
const DataType input_dtype = ctx->input_dtype(0);
OP_REQUIRES(ctx, input_dtype != DT_RESOURCE,
errors::InvalidArgument(
"Input tensor cannot be a resource variable handle."));
const Tensor& input_tensor = ctx->input(0);
Tensor* output = nullptr;
if (!ctx->forward_input_to_output_with_shape(0, 0, input_tensor.shape(),
&output)) {
ctx->set_output(0, input_tensor);
}
}
bool IsExpensive() override { return false; }
};
REGISTER_KERNEL_BUILDER(Name("GuaranteeConst").Device(DEVICE_CPU),
GuaranteeConstOp);
}
} | #include "tensorflow/core/framework/fake_input.h"
#include "tensorflow/core/framework/node_def_builder.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/kernels/ops_testutil.h"
#include "tensorflow/core/kernels/ops_util.h"
#include "tensorflow/core/kernels/variable_ops.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/lib/strings/strcat.h"
#include "tensorflow/core/platform/test.h"
namespace tensorflow {
namespace {
class GuaranteeConstOpTest : public OpsTestBase {
protected:
Status Init(DataType input_type) {
TF_CHECK_OK(NodeDefBuilder("op", "GuaranteeConst")
.Input(FakeInput(input_type))
.Finalize(node_def()));
return InitOp();
}
};
TEST_F(GuaranteeConstOpTest, Int32Success_6) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({6}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({6}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(GuaranteeConstOpTest, Int32Success_2_3) {
TF_ASSERT_OK(Init(DT_INT32));
AddInputFromArray<int32>(TensorShape({2, 3}), {1, 2, 3, 4, 5, 6});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_INT32, TensorShape({2, 3}));
test::FillValues<int32>(&expected, {1, 2, 3, 4, 5, 6});
test::ExpectTensorEqual<int32>(expected, *GetOutput(0));
}
TEST_F(GuaranteeConstOpTest, StringSuccess) {
TF_ASSERT_OK(Init(DT_STRING));
AddInputFromArray<tstring>(TensorShape({6}), {"A", "b", "C", "d", "E", "f"});
TF_ASSERT_OK(RunOpKernel());
Tensor expected(allocator(), DT_STRING, TensorShape({6}));
test::FillValues<tstring>(&expected, {"A", "b", "C", "d", "E", "f"});
test::ExpectTensorEqual<tstring>(expected, *GetOutput(0));
}
TEST_F(GuaranteeConstOpTest, ResourceInputError) {
TF_ASSERT_OK(Init(DT_RESOURCE));
AddResourceInput("", "resource", new Var(DT_INT32));
const auto status = RunOpKernel();
ASSERT_EQ(error::INVALID_ARGUMENT, status.code());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/guarantee_const_op.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/kernels/guarantee_const_op_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
6d7c5496-6c39-4052-b4be-06ee7b55571d | cpp | google/cel-cpp | matchers | eval/public/testing/matchers.cc | eval/public/testing/matchers_test.cc | #include "eval/public/testing/matchers.h"
#include <ostream>
#include <utility>
#include "google/protobuf/message.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/strings/string_view.h"
#include "eval/public/set_util.h"
#include "internal/casts.h"
namespace google::api::expr::runtime {
void PrintTo(const CelValue& value, std::ostream* os) {
*os << value.DebugString();
}
namespace test {
namespace {
using ::testing::_;
using ::testing::MatcherInterface;
using ::testing::MatchResultListener;
class CelValueEqualImpl : public MatcherInterface<CelValue> {
public:
explicit CelValueEqualImpl(const CelValue& v) : value_(v) {}
bool MatchAndExplain(CelValue arg,
MatchResultListener* listener) const override {
return CelValueEqual(arg, value_);
}
void DescribeTo(std::ostream* os) const override {
*os << value_.DebugString();
}
private:
const CelValue& value_;
};
template <typename UnderlyingType>
class CelValueMatcherImpl : public testing::MatcherInterface<const CelValue&> {
public:
explicit CelValueMatcherImpl(testing::Matcher<UnderlyingType> m)
: underlying_type_matcher_(std::move(m)) {}
bool MatchAndExplain(const CelValue& v,
testing::MatchResultListener* listener) const override {
UnderlyingType arg;
return v.GetValue(&arg) && underlying_type_matcher_.Matches(arg);
}
void DescribeTo(std::ostream* os) const override {
CelValue::Type type =
static_cast<CelValue::Type>(CelValue::IndexOf<UnderlyingType>::value);
*os << absl::StrCat("type is ", CelValue::TypeName(type), " and ");
underlying_type_matcher_.DescribeTo(os);
}
private:
const testing::Matcher<UnderlyingType> underlying_type_matcher_;
};
template <>
class CelValueMatcherImpl<const google::protobuf::Message*>
: public testing::MatcherInterface<const CelValue&> {
public:
explicit CelValueMatcherImpl(testing::Matcher<const google::protobuf::Message*> m)
: underlying_type_matcher_(std::move(m)) {}
bool MatchAndExplain(const CelValue& v,
testing::MatchResultListener* listener) const override {
CelValue::MessageWrapper arg;
return v.GetValue(&arg) && arg.HasFullProto() &&
underlying_type_matcher_.Matches(
cel::internal::down_cast<const google::protobuf::Message*>(
arg.message_ptr()));
}
void DescribeTo(std::ostream* os) const override {
*os << absl::StrCat("type is ",
CelValue::TypeName(CelValue::Type::kMessage), " and ");
underlying_type_matcher_.DescribeTo(os);
}
private:
const testing::Matcher<const google::protobuf::Message*> underlying_type_matcher_;
};
}
CelValueMatcher EqualsCelValue(const CelValue& v) {
return CelValueMatcher(new CelValueEqualImpl(v));
}
CelValueMatcher IsCelNull() {
return CelValueMatcher(new CelValueMatcherImpl<CelValue::NullType>(_));
}
CelValueMatcher IsCelBool(testing::Matcher<bool> m) {
return CelValueMatcher(new CelValueMatcherImpl<bool>(std::move(m)));
}
CelValueMatcher IsCelInt64(testing::Matcher<int64_t> m) {
return CelValueMatcher(new CelValueMatcherImpl<int64_t>(std::move(m)));
}
CelValueMatcher IsCelUint64(testing::Matcher<uint64_t> m) {
return CelValueMatcher(new CelValueMatcherImpl<uint64_t>(std::move(m)));
}
CelValueMatcher IsCelDouble(testing::Matcher<double> m) {
return CelValueMatcher(new CelValueMatcherImpl<double>(std::move(m)));
}
CelValueMatcher IsCelString(testing::Matcher<absl::string_view> m) {
return CelValueMatcher(new CelValueMatcherImpl<CelValue::StringHolder>(
testing::Property(&CelValue::StringHolder::value, m)));
}
CelValueMatcher IsCelBytes(testing::Matcher<absl::string_view> m) {
return CelValueMatcher(new CelValueMatcherImpl<CelValue::BytesHolder>(
testing::Property(&CelValue::BytesHolder::value, m)));
}
CelValueMatcher IsCelMessage(testing::Matcher<const google::protobuf::Message*> m) {
return CelValueMatcher(
new CelValueMatcherImpl<const google::protobuf::Message*>(std::move(m)));
}
CelValueMatcher IsCelDuration(testing::Matcher<absl::Duration> m) {
return CelValueMatcher(new CelValueMatcherImpl<absl::Duration>(std::move(m)));
}
CelValueMatcher IsCelTimestamp(testing::Matcher<absl::Time> m) {
return CelValueMatcher(new CelValueMatcherImpl<absl::Time>(std::move(m)));
}
CelValueMatcher IsCelError(testing::Matcher<absl::Status> m) {
return CelValueMatcher(
new CelValueMatcherImpl<const google::api::expr::runtime::CelError*>(
testing::AllOf(testing::NotNull(), testing::Pointee(m))));
}
}
} | #include "eval/public/testing/matchers.h"
#include "absl/status/status.h"
#include "absl/time/time.h"
#include "eval/public/containers/container_backed_list_impl.h"
#include "eval/public/structs/cel_proto_wrapper.h"
#include "eval/testutil/test_message.pb.h"
#include "internal/testing.h"
#include "testutil/util.h"
namespace google::api::expr::runtime::test {
namespace {
using ::testing::Contains;
using ::testing::DoubleEq;
using ::testing::DoubleNear;
using ::testing::ElementsAre;
using ::testing::Gt;
using ::testing::Lt;
using ::testing::Not;
using ::testing::UnorderedElementsAre;
using testutil::EqualsProto;
TEST(IsCelValue, EqualitySmoketest) {
EXPECT_THAT(CelValue::CreateBool(true),
EqualsCelValue(CelValue::CreateBool(true)));
EXPECT_THAT(CelValue::CreateInt64(-1),
EqualsCelValue(CelValue::CreateInt64(-1)));
EXPECT_THAT(CelValue::CreateUint64(2),
EqualsCelValue(CelValue::CreateUint64(2)));
EXPECT_THAT(CelValue::CreateDouble(1.25),
EqualsCelValue(CelValue::CreateDouble(1.25)));
EXPECT_THAT(CelValue::CreateStringView("abc"),
EqualsCelValue(CelValue::CreateStringView("abc")));
EXPECT_THAT(CelValue::CreateBytesView("def"),
EqualsCelValue(CelValue::CreateBytesView("def")));
EXPECT_THAT(CelValue::CreateDuration(absl::Seconds(2)),
EqualsCelValue(CelValue::CreateDuration(absl::Seconds(2))));
EXPECT_THAT(
CelValue::CreateTimestamp(absl::FromUnixSeconds(1)),
EqualsCelValue(CelValue::CreateTimestamp(absl::FromUnixSeconds(1))));
EXPECT_THAT(CelValue::CreateInt64(-1),
Not(EqualsCelValue(CelValue::CreateBool(true))));
EXPECT_THAT(CelValue::CreateUint64(2),
Not(EqualsCelValue(CelValue::CreateInt64(-1))));
EXPECT_THAT(CelValue::CreateDouble(1.25),
Not(EqualsCelValue(CelValue::CreateUint64(2))));
EXPECT_THAT(CelValue::CreateStringView("abc"),
Not(EqualsCelValue(CelValue::CreateDouble(1.25))));
EXPECT_THAT(CelValue::CreateBytesView("def"),
Not(EqualsCelValue(CelValue::CreateStringView("abc"))));
EXPECT_THAT(CelValue::CreateDuration(absl::Seconds(2)),
Not(EqualsCelValue(CelValue::CreateBytesView("def"))));
EXPECT_THAT(CelValue::CreateTimestamp(absl::FromUnixSeconds(1)),
Not(EqualsCelValue(CelValue::CreateDuration(absl::Seconds(2)))));
EXPECT_THAT(
CelValue::CreateBool(true),
Not(EqualsCelValue(CelValue::CreateTimestamp(absl::FromUnixSeconds(1)))));
}
TEST(PrimitiveMatchers, Smoketest) {
EXPECT_THAT(CelValue::CreateNull(), IsCelNull());
EXPECT_THAT(CelValue::CreateBool(false), Not(IsCelNull()));
EXPECT_THAT(CelValue::CreateBool(true), IsCelBool(true));
EXPECT_THAT(CelValue::CreateBool(false), IsCelBool(Not(true)));
EXPECT_THAT(CelValue::CreateInt64(1), IsCelInt64(1));
EXPECT_THAT(CelValue::CreateInt64(-1), IsCelInt64(Not(Gt(0))));
EXPECT_THAT(CelValue::CreateUint64(1), IsCelUint64(1));
EXPECT_THAT(CelValue::CreateUint64(2), IsCelUint64(Not(Lt(2))));
EXPECT_THAT(CelValue::CreateDouble(1.5), IsCelDouble(DoubleEq(1.5)));
EXPECT_THAT(CelValue::CreateDouble(1.0 + 0.8),
IsCelDouble(DoubleNear(1.8, 1e-5)));
EXPECT_THAT(CelValue::CreateStringView("abc"), IsCelString("abc"));
EXPECT_THAT(CelValue::CreateStringView("abcdef"),
IsCelString(testing::HasSubstr("def")));
EXPECT_THAT(CelValue::CreateBytesView("abc"), IsCelBytes("abc"));
EXPECT_THAT(CelValue::CreateBytesView("abcdef"),
IsCelBytes(testing::HasSubstr("def")));
EXPECT_THAT(CelValue::CreateDuration(absl::Seconds(2)),
IsCelDuration(Lt(absl::Minutes(1))));
EXPECT_THAT(CelValue::CreateTimestamp(absl::FromUnixSeconds(20)),
IsCelTimestamp(Lt(absl::FromUnixSeconds(30))));
}
TEST(PrimitiveMatchers, WrongType) {
EXPECT_THAT(CelValue::CreateBool(true), Not(IsCelInt64(1)));
EXPECT_THAT(CelValue::CreateInt64(1), Not(IsCelUint64(1)));
EXPECT_THAT(CelValue::CreateUint64(1), Not(IsCelDouble(1.0)));
EXPECT_THAT(CelValue::CreateDouble(1.5), Not(IsCelString("abc")));
EXPECT_THAT(CelValue::CreateStringView("abc"), Not(IsCelBytes("abc")));
EXPECT_THAT(CelValue::CreateBytesView("abc"),
Not(IsCelDuration(Lt(absl::Minutes(1)))));
EXPECT_THAT(CelValue::CreateDuration(absl::Seconds(2)),
Not(IsCelTimestamp(Lt(absl::FromUnixSeconds(30)))));
EXPECT_THAT(CelValue::CreateTimestamp(absl::FromUnixSeconds(20)),
Not(IsCelBool(true)));
}
TEST(SpecialMatchers, SmokeTest) {
auto status = absl::InternalError("error");
CelValue error = CelValue::CreateError(&status);
EXPECT_THAT(error, IsCelError(testing::Eq(
absl::Status(absl::StatusCode::kInternal, "error"))));
TestMessage proto_message;
proto_message.add_bool_list(true);
proto_message.add_bool_list(false);
proto_message.add_int64_list(1);
proto_message.add_int64_list(-1);
CelValue message = CelProtoWrapper::CreateMessage(&proto_message, nullptr);
EXPECT_THAT(message, IsCelMessage(EqualsProto(proto_message)));
}
TEST(ListMatchers, NotList) {
EXPECT_THAT(CelValue::CreateInt64(1),
Not(IsCelList(Contains(IsCelInt64(1)))));
}
TEST(ListMatchers, All) {
ContainerBackedListImpl list({
CelValue::CreateInt64(1),
CelValue::CreateInt64(2),
CelValue::CreateInt64(3),
CelValue::CreateInt64(4),
});
CelValue cel_list = CelValue::CreateList(&list);
EXPECT_THAT(cel_list, IsCelList(Contains(IsCelInt64(3))));
EXPECT_THAT(cel_list, IsCelList(Not(Contains(IsCelInt64(0)))));
EXPECT_THAT(cel_list, IsCelList(ElementsAre(IsCelInt64(1), IsCelInt64(2),
IsCelInt64(3), IsCelInt64(4))));
EXPECT_THAT(cel_list,
IsCelList(Not(ElementsAre(IsCelInt64(2), IsCelInt64(1),
IsCelInt64(3), IsCelInt64(4)))));
EXPECT_THAT(cel_list,
IsCelList(UnorderedElementsAre(IsCelInt64(2), IsCelInt64(1),
IsCelInt64(4), IsCelInt64(3))));
EXPECT_THAT(
cel_list,
IsCelList(Not(UnorderedElementsAre(IsCelInt64(2), IsCelInt64(1),
IsCelInt64(4), IsCelInt64(0)))));
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/testing/matchers.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/testing/matchers_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
686813ec-b120-46dd-8c6e-b174ae9ae39a | cpp | tensorflow/tensorflow | op_converter_registry | tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.cc | tensorflow/compiler/tf2tensorrt/convert/op_converter_registry_test.cc | #include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include <set>
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_cat.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/mutex.h"
#include "tensorflow/core/util/env_var.h"
#if GOOGLE_CUDA && GOOGLE_TENSORRT
namespace tensorflow {
namespace tensorrt {
namespace convert {
struct OpConverterRegistration {
OpConverter converter;
int priority;
};
class OpConverterRegistry::Impl {
public:
~Impl() = default;
InitOnStartupMarker Register(const string& name, const int priority,
OpConverter converter) {
mutex_lock lock(mu_);
auto item = registry_.find(name);
if (item != registry_.end()) {
const int existing_priority = item->second.priority;
if (priority <= existing_priority) {
LOG(WARNING) << absl::StrCat(
"Ignoring TF->TRT ", name, " op converter with priority ",
existing_priority, " due to another converter with priority ",
priority);
return {};
} else {
LOG(WARNING) << absl::StrCat(
"Overwriting TF->TRT ", name, " op converter with priority ",
existing_priority, " using another converter with priority ",
priority);
registry_.erase(item);
}
}
registry_.insert({name, OpConverterRegistration{converter, priority}});
return {};
}
StatusOr<OpConverter> LookUp(string name) {
static const absl::flat_hash_set<string> tftrt_op_fakelist = [] {
string tftrt_op_fakelist_str;
TF_CHECK_OK(ReadStringFromEnvVar("TF_TRT_OP_FAKELIST",
"",
&tftrt_op_fakelist_str));
absl::flat_hash_set<string> tftrt_op_fakelist{};
for (const auto& x : str_util::Split(tftrt_op_fakelist_str, ",")) {
tftrt_op_fakelist.insert(x);
}
tftrt_op_fakelist.rehash(0);
return tftrt_op_fakelist;
}();
if (tftrt_op_fakelist.contains(name)) {
LOG_FIRST_N(INFO, 2) << "Emulating OP Converter: `" << name << "`. It "
<< "will cause TRT engine building to fail. This "
<< "feature is only intended to be used for "
<< "TF-TRT graph segmentation experiments. This "
<< "feature is controlled using: "
<< "`TF_TRT_OP_FAKELIST=OpName1,OpName2`.";
mutex_lock lock(mu_);
return registry_.find("FakeOp")->second.converter;
}
mutex_lock lock(mu_);
auto found = registry_.find(name);
if (found != registry_.end()) {
return found->second.converter;
}
return errors::NotFound("No converter for op ", name);
}
void Clear(const std::string& name) {
mutex_lock lock(mu_);
auto itr = registry_.find(name);
if (itr == registry_.end()) {
return;
}
registry_.erase(itr);
}
std::vector<std::string> ListRegisteredOps() const {
mutex_lock lock(mu_);
std::vector<std::string> result;
result.reserve(registry_.size());
for (const auto& item : registry_) {
result.push_back(item.first);
}
return result;
}
private:
mutable mutex mu_;
mutable std::unordered_map<std::string, OpConverterRegistration> registry_
TF_GUARDED_BY(mu_);
};
OpConverterRegistry::OpConverterRegistry() : impl_(std::make_unique<Impl>()) {}
StatusOr<OpConverter> OpConverterRegistry::LookUp(const string& name) {
return impl_->LookUp(name);
}
InitOnStartupMarker OpConverterRegistry::Register(const string& name,
const int priority,
OpConverter converter) {
return impl_->Register(name, priority, converter);
}
std::vector<std::string> OpConverterRegistry::ListRegisteredOps() const {
return impl_->ListRegisteredOps();
}
void OpConverterRegistry::Clear(const std::string& name) { impl_->Clear(name); }
OpConverterRegistry* GetOpConverterRegistry() {
static OpConverterRegistry* registry = new OpConverterRegistry();
return registry;
}
}
}
}
#endif | #if GOOGLE_CUDA && GOOGLE_TENSORRT
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h"
#include <gtest/gtest.h>
#include "tensorflow/compiler/tf2tensorrt/convert/op_converter.h"
namespace tensorflow {
namespace tensorrt {
namespace convert {
TEST(TestOpConverterRegistry, TestOpConverterRegistry) {
bool flag{false};
auto set_true_func = [&flag](const OpConverterParams*) -> Status {
flag = true;
return OkStatus();
};
auto set_false_func = [&flag](const OpConverterParams*) -> Status {
flag = false;
return OkStatus();
};
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority,
set_true_func);
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority - 1,
set_false_func);
auto func = GetOpConverterRegistry()->LookUp("FakeFunc");
EXPECT_TRUE(func.ok());
EXPECT_TRUE(((*func)(nullptr)).ok());
EXPECT_TRUE(flag);
GetOpConverterRegistry()->Register("FakeFunc", kDefaultConverterPriority + 1,
set_false_func);
func = GetOpConverterRegistry()->LookUp("FakeFunc");
EXPECT_TRUE(func.ok());
EXPECT_TRUE((*func)(nullptr).ok());
EXPECT_FALSE(flag);
GetOpConverterRegistry()->Clear("FakeFunc");
EXPECT_FALSE(GetOpConverterRegistry()->LookUp("FakeFunc").ok());
}
}
}
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/tf2tensorrt/convert/op_converter_registry_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
21b3659f-bb74-437c-825a-b25b1a5f2f54 | cpp | google/arolla | operator_repr_functions | arolla/expr/operator_repr_functions.cc | arolla/expr/operator_repr_functions_test.cc | #include "arolla/expr/operator_repr_functions.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/qtype/base_types.h"
#include "arolla/qtype/qtype_traits.h"
#include "arolla/qtype/unspecified_qtype.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "arolla/util/string.h"
#include "arolla/util/text.h"
namespace arolla::expr {
namespace {
struct InfixOp {
enum Kind : int8_t { kUnary, kBinary } kind;
ReprToken::Precedence precedence;
absl::string_view symbol;
};
static const auto* const kUnaryInfixOps =
new absl::flat_hash_map<absl::string_view, InfixOp>{
{"math.pos", {InfixOp::kUnary, {1, 1}, "+"}},
{"math.neg", {InfixOp::kUnary, {1, 1}, "-"}},
{"core.presence_not", {InfixOp::kUnary, {1, 1}, "~"}},
};
static const auto* const kBinaryInfixOps =
new absl::flat_hash_map<absl::string_view, InfixOp>{
{"math.pow", {InfixOp::kBinary, {1, 2}, " ** "}},
{"math.multiply", {InfixOp::kBinary, {3, 2}, " * "}},
{"math.divide", {InfixOp::kBinary, {3, 2}, " / "}},
{"math.floordiv", {InfixOp::kBinary, {3, 2}, "
{"math.mod", {InfixOp::kBinary, {3, 2}, " % "}},
{"math.add", {InfixOp::kBinary, {5, 4}, " + "}},
{"math.subtract", {InfixOp::kBinary, {5, 4}, " - "}},
{"core.presence_and", {InfixOp::kBinary, {7, 6}, " & "}},
{"core.presence_or", {InfixOp::kBinary, {9, 8}, " | "}},
{"core.less", {InfixOp::kBinary, {10, 10}, " < "}},
{"core.less_equal", {InfixOp::kBinary, {10, 10}, " <= "}},
{"core.equal", {InfixOp::kBinary, {10, 10}, " == "}},
{"core.not_equal", {InfixOp::kBinary, {10, 10}, " != "}},
{"core.greater_equal", {InfixOp::kBinary, {10, 10}, " >= "}},
{"core.greater", {InfixOp::kBinary, {10, 10}, " > "}},
};
std::vector<const ReprToken*> GetNodeDepsTokens(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
std::vector<const ReprToken*> inputs(node->node_deps().size());
for (size_t i = 0; i < node->node_deps().size(); ++i) {
inputs[i] = &node_tokens.at(node->node_deps()[i]->fingerprint());
}
return inputs;
}
std::optional<ReprToken> UnaryReprFn(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
auto it = kUnaryInfixOps->find(node->op()->display_name());
const auto inputs = GetNodeDepsTokens(node, node_tokens);
if (it == kUnaryInfixOps->end() || inputs.size() != 1) {
return std::nullopt;
}
const auto& infix_op = it->second;
ReprToken result;
if (inputs[0]->precedence.left < infix_op.precedence.right) {
result.str = absl::StrCat(infix_op.symbol, inputs[0]->str);
} else {
result.str = absl::StrCat(infix_op.symbol, "(", inputs[0]->str, ")");
}
result.precedence.left = infix_op.precedence.left;
result.precedence.right = infix_op.precedence.right;
return result;
}
std::optional<ReprToken> BinaryReprFn(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
auto it = kBinaryInfixOps->find(node->op()->display_name());
const auto inputs = GetNodeDepsTokens(node, node_tokens);
if (it == kBinaryInfixOps->end() || inputs.size() != 2) {
return std::nullopt;
}
const auto& infix_op = it->second;
ReprToken result;
const bool left_precedence =
(inputs[0]->precedence.right < infix_op.precedence.left);
const bool right_precedence =
(inputs[1]->precedence.left < infix_op.precedence.right);
if (left_precedence && right_precedence) {
result.str = absl::StrCat(inputs[0]->str, infix_op.symbol, inputs[1]->str);
} else if (left_precedence && !right_precedence) {
result.str =
absl::StrCat(inputs[0]->str, infix_op.symbol, "(", inputs[1]->str, ")");
} else if (!left_precedence && right_precedence) {
result.str =
absl::StrCat("(", inputs[0]->str, ")", infix_op.symbol, inputs[1]->str);
} else {
result.str = absl::StrCat("(", inputs[0]->str, ")", infix_op.symbol, "(",
inputs[1]->str, ")");
}
result.precedence.left = infix_op.precedence.left;
result.precedence.right = infix_op.precedence.right;
return result;
}
std::optional<ReprToken> GetAttrReprFn(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
DCHECK_EQ(node->op()->display_name(), "core.getattr");
constexpr ReprToken::Precedence kGetAttrPrecedence{0, -1};
const auto& node_deps = node->node_deps();
if (node_deps.size() != 2 || !node_deps[1]->is_literal()) {
return std::nullopt;
}
const auto& attr = node_deps[1]->qvalue();
if (!attr.has_value() || attr->GetType() != GetQType<Text>() ||
!IsIdentifier(attr->UnsafeAs<Text>().view())) {
return std::nullopt;
}
ReprToken result;
const auto inputs = GetNodeDepsTokens(node, node_tokens);
DCHECK_EQ(inputs.size(), 2);
if (inputs[0]->precedence.right < kGetAttrPrecedence.left) {
result.str =
absl::StrCat(inputs[0]->str, ".", attr->UnsafeAs<Text>().view());
} else {
result.str =
absl::StrCat("(", inputs[0]->str, ").", attr->UnsafeAs<Text>().view());
}
result.precedence = kGetAttrPrecedence;
return result;
}
std::optional<std::string> MakeSliceRepr(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
if (!IsRegisteredOperator(node->op()) ||
node->op()->display_name() != "core.make_slice") {
return std::nullopt;
}
auto is_unspecified = [](const ExprNodePtr& node) {
return node->is_literal() && node->qtype() == GetUnspecifiedQType();
};
constexpr ReprToken::Precedence kSlicePrecedence{11, 11};
const auto& node_deps = node->node_deps();
if (node_deps.size() != 3) {
return std::nullopt;
}
std::string result;
const auto inputs = GetNodeDepsTokens(node, node_tokens);
DCHECK_EQ(inputs.size(), 3);
if (is_unspecified(node_deps[0])) {
result = ":";
} else if (inputs[0]->precedence.right < kSlicePrecedence.left) {
result = absl::StrCat(inputs[0]->str, ":");
} else {
result = absl::StrCat("(", inputs[0]->str, "):");
}
if (!is_unspecified(node_deps[1])) {
if (inputs[1]->precedence.left < kSlicePrecedence.right &&
(inputs[1]->precedence.right < kSlicePrecedence.left ||
is_unspecified(node_deps[2]))) {
absl::StrAppend(&result, inputs[1]->str);
} else {
absl::StrAppend(&result, "(", inputs[1]->str, ")");
}
}
if (!is_unspecified(node_deps[2])) {
if (inputs[2]->precedence.left < kSlicePrecedence.right) {
absl::StrAppend(&result, ":", inputs[2]->str);
} else {
absl::StrAppend(&result, ":(", inputs[2]->str, ")");
}
}
return result;
}
std::optional<ReprToken> GetItemReprFn(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
DCHECK_EQ(node->op()->display_name(), "core.getitem");
constexpr ReprToken::Precedence kGetItemPrecedence{0, -1};
if (node->node_deps().size() != 2) {
return std::nullopt;
}
const auto& lhs = node_tokens.at(node->node_deps()[0]->fingerprint());
const auto maybe_slice = MakeSliceRepr(node->node_deps()[1], node_tokens);
const std::string& rhs_str =
maybe_slice ? *maybe_slice
: node_tokens.at(node->node_deps()[1]->fingerprint()).str;
ReprToken result;
if (lhs.precedence.right < kGetItemPrecedence.left) {
result.str = absl::StrCat(lhs.str, "[", rhs_str, "]");
} else {
result.str = absl::StrCat("(", lhs.str, ")[", rhs_str, "]");
}
result.precedence = kGetItemPrecedence;
return result;
}
class OpReprRegistry {
public:
void Set(std::string key, OperatorReprFn op_repr_fn)
ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
registry_[std::move(key)] = std::move(op_repr_fn);
}
OperatorReprFn Get(absl::string_view key) const ABSL_LOCKS_EXCLUDED(mutex_) {
absl::MutexLock lock(&mutex_);
if (const auto it = registry_.find(key); it != registry_.end()) {
return it->second;
}
return nullptr;
}
private:
mutable absl::Mutex mutex_;
absl::flat_hash_map<std::string, OperatorReprFn> registry_
ABSL_GUARDED_BY(mutex_);
};
OpReprRegistry* GetOpReprRegistryForRegisteredOp() {
static OpReprRegistry* result = []() {
auto* registry = new OpReprRegistry;
for (const auto& [key, _] : *kUnaryInfixOps) {
registry->Set(std::string(key), UnaryReprFn);
}
for (const auto& [key, _] : *kBinaryInfixOps) {
registry->Set(std::string(key), BinaryReprFn);
}
registry->Set("core.getattr", GetAttrReprFn);
registry->Set("core.getitem", GetItemReprFn);
return registry;
}();
return result;
}
std::optional<ReprToken> RegisteredOperatorReprFn(
const ExprNodePtr& expr_node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
DCHECK(expr_node->is_op() && IsRegisteredOperator(expr_node->op()));
if (auto op_repr_fn = GetOpReprRegistryForRegisteredOp()->Get(
expr_node->op()->display_name());
op_repr_fn != nullptr) {
return op_repr_fn(expr_node, node_tokens);
}
return std::nullopt;
}
OpReprRegistry* GetOpReprRegistryForQValueSpecialization() {
static OpReprRegistry* result = []() {
auto* registry = new OpReprRegistry;
registry->Set("::arolla::expr::RegisteredOperator",
RegisteredOperatorReprFn);
return registry;
}();
return result;
}
}
void RegisterOpReprFnByQValueSpecializationKey(
std::string qvalue_specialization_key, OperatorReprFn op_repr_fn) {
GetOpReprRegistryForQValueSpecialization()->Set(
std::move(qvalue_specialization_key), std::move(op_repr_fn));
}
void RegisterOpReprFnByByRegistrationName(std::string op_name,
OperatorReprFn op_repr_fn) {
GetOpReprRegistryForRegisteredOp()->Set(std::move(op_name),
std::move(op_repr_fn));
}
std::optional<ReprToken> FormatOperatorNodePretty(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
if (auto op_repr_fn = GetOpReprRegistryForQValueSpecialization()->Get(
node->op()->py_qvalue_specialization_key());
op_repr_fn != nullptr) {
if (auto res = op_repr_fn(node, node_tokens)) {
return *std::move(res);
}
}
return std::nullopt;
}
} | #include "arolla/expr/operator_repr_functions.h"
#include <memory>
#include <optional>
#include <string>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "arolla/expr/expr.h"
#include "arolla/expr/expr_attributes.h"
#include "arolla/expr/expr_node.h"
#include "arolla/expr/expr_operator_signature.h"
#include "arolla/expr/registered_expr_operator.h"
#include "arolla/expr/testing/test_operators.h"
#include "arolla/util/fingerprint.h"
#include "arolla/util/repr.h"
#include "arolla/util/testing/repr_token_eq.h"
namespace arolla::expr {
namespace {
using ::arolla::expr::testing::DummyOp;
using ::arolla::testing::ReprTokenEq;
using ::testing::Optional;
std::optional<ReprToken> AddRepr(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
const auto& x_token = node_tokens.at(node->node_deps()[0]->fingerprint());
const auto& y_token = node_tokens.at(node->node_deps()[1]->fingerprint());
return ReprToken{.str = absl::StrFormat("%s + %s", x_token.str, y_token.str),
.precedence = ReprToken::kSafeForSubscription};
}
std::optional<ReprToken> SubtractRepr(
const ExprNodePtr& node,
const absl::flat_hash_map<Fingerprint, ReprToken>& node_tokens) {
const auto& x_token = node_tokens.at(node->node_deps()[0]->fingerprint());
const auto& y_token = node_tokens.at(node->node_deps()[1]->fingerprint());
return ReprToken{.str = absl::StrFormat("%s - %s", x_token.str, y_token.str),
.precedence = ReprToken::kSafeForArithmetic};
}
TEST(OperatorReprFunctionsTest, OpClass) {
auto x = Leaf("x");
auto y = Leaf("y");
auto expr = ExprNode::UnsafeMakeOperatorNode(
std::make_shared<DummyOp>("custom.add",
ExprOperatorSignature({{"x"}, {"y"}})),
{x, y}, ExprAttributes());
absl::flat_hash_map<Fingerprint, ReprToken> node_tokens = {
{x->fingerprint(), ReprToken{.str = "L.x"}},
{y->fingerprint(), ReprToken{.str = "L.y"}},
};
absl::string_view specialization_key =
expr->op()->py_qvalue_specialization_key();
{
EXPECT_EQ(FormatOperatorNodePretty(expr, node_tokens), std::nullopt);
}
{
RegisterOpReprFnByQValueSpecializationKey(std::string(specialization_key),
AddRepr);
EXPECT_THAT(
FormatOperatorNodePretty(expr, node_tokens),
Optional(ReprTokenEq("L.x + L.y", ReprToken::kSafeForSubscription)));
}
{
RegisterOpReprFnByQValueSpecializationKey(std::string(specialization_key),
SubtractRepr);
EXPECT_THAT(
FormatOperatorNodePretty(expr, node_tokens),
Optional(ReprTokenEq("L.x - L.y", ReprToken::kSafeForArithmetic)));
}
}
TEST(OperatorReprFunctionsTest, RegisteredOp) {
auto x = Leaf("x");
auto y = Leaf("y");
auto expr = ExprNode::UnsafeMakeOperatorNode(
std::make_shared<RegisteredOperator>("test.add"), {x, y},
ExprAttributes());
absl::flat_hash_map<Fingerprint, ReprToken> node_tokens = {
{x->fingerprint(), ReprToken{.str = "L.x"}},
{y->fingerprint(), ReprToken{.str = "L.y"}},
};
{
EXPECT_EQ(FormatOperatorNodePretty(expr, node_tokens), std::nullopt);
}
{
RegisterOpReprFnByByRegistrationName("test.add", AddRepr);
EXPECT_THAT(
FormatOperatorNodePretty(expr, node_tokens),
Optional(ReprTokenEq("L.x + L.y", ReprToken::kSafeForSubscription)));
}
{
RegisterOpReprFnByByRegistrationName("test.add", SubtractRepr);
EXPECT_THAT(
FormatOperatorNodePretty(expr, node_tokens),
Optional(ReprTokenEq("L.x - L.y", ReprToken::kSafeForArithmetic)));
}
}
}
} | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_repr_functions.cc | https://github.com/google/arolla/blob/1ca990dbeca224035efdabffecc7f3738df6b52c/arolla/expr/operator_repr_functions_test.cc | 1ca990dbeca224035efdabffecc7f3738df6b52c |
0e5b51f5-887b-4bbd-86cf-cea50fe6fd3e | cpp | tensorflow/tensorflow | pjrt_future | third_party/xla/xla/pjrt/pjrt_future.cc | third_party/xla/xla/pjrt/pjrt_future_test.cc | #include "xla/pjrt/pjrt_future.h"
#include <atomic>
#include <cstdint>
#include <memory>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace {
struct State {
explicit State(int32_t size)
: pending_count(size), promise(PjRtFuture<>::CreatePromise()) {}
std::atomic<int32_t> pending_count;
PjRtFuture<>::Promise promise;
absl::Mutex mu;
absl::Status status ABSL_GUARDED_BY(&mu);
};
}
PjRtFuture<> JoinFutures(absl::Span<const PjRtFuture<>> futures) {
if (futures.empty()) {
return PjRtFuture<>(absl::OkStatus());
} else if (futures.size() == 1) {
return futures.front();
}
auto state = std::make_shared<State>(futures.size());
for (const PjRtFuture<>& future : futures) {
future.OnReady([state](absl::Status status) {
if (!status.ok()) {
absl::MutexLock lock(&state->mu);
state->status.Update(status);
}
const int pending_count =
state->pending_count.fetch_sub(1, std::memory_order_acq_rel);
CHECK_GE(pending_count, 1) << "Pending count can't drop below 0";
if (pending_count == 1) {
absl::MutexLock lock(&state->mu);
state->promise.Set(std::move(state->status));
}
});
}
return PjRtFuture<>(state->promise);
}
} | #include "xla/pjrt/pjrt_future.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
TEST(PjRtFutureTest, StatelessFuture) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set();
EXPECT_TRUE(future.IsReady());
EXPECT_EQ(future.Await(), absl::OkStatus());
future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
}
TEST(PjRtFutureTest, CopyableFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
PjRtFuture<int32_t> copy_constructed(future);
PjRtFuture<int32_t> copy_assigned = future;
EXPECT_FALSE(copy_constructed.IsReady());
EXPECT_FALSE(copy_assigned.IsReady());
promise.Set(42);
EXPECT_TRUE(copy_constructed.IsReady());
EXPECT_TRUE(copy_assigned.IsReady());
}
TEST(PjRtFutureTest, MoveConstructedFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
PjRtFuture<std::unique_ptr<int32_t>> move_constructed(std::move(future));
EXPECT_FALSE(move_constructed.IsReady());
promise.Set(std::make_unique<int32_t>(42));
EXPECT_TRUE(move_constructed.IsReady());
}
TEST(PjRtFutureTest, MoveAssignedFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
PjRtFuture<std::unique_ptr<int32_t>> move_assigned = std::move(future);
EXPECT_FALSE(move_assigned.IsReady());
promise.Set(std::make_unique<int32_t>(42));
EXPECT_TRUE(move_assigned.IsReady());
}
TEST(PjRtFutureTest, AwaitMoveOnlyFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
promise.Set(std::make_unique<int32_t>(42));
EXPECT_EQ(**future.Await(), 42);
EXPECT_EQ(**std::move(future).Await(), 42);
}
TEST(PjRtFutureTest, OnReadyRvalueFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
promise.Set(42);
std::move(future).OnReady(
[](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, OnReadyMoveOnlyFuture) {
auto promise = PjRtFuture<std::unique_ptr<int32_t>>::CreatePromise();
PjRtFuture<std::unique_ptr<int32_t>> future(promise);
promise.Set(std::make_unique<int32_t>(42));
std::move(future).OnReady([](absl::StatusOr<std::unique_ptr<int32_t>> value) {
EXPECT_EQ(**value, 42);
});
}
TEST(PjRtFutureTest, StatelessError) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(absl::InternalError("test"));
EXPECT_TRUE(future.IsReady());
absl::Status status = future.Await();
EXPECT_EQ(status, absl::InternalError("test"));
future.OnReady([](absl::Status status) {
EXPECT_EQ(status, absl::InternalError("test"));
});
}
TEST(PjRtFutureTest, StatelessImmediate) {
PjRtFuture<> ok_future(absl::OkStatus());
PjRtFuture<> error_future(absl::InternalError("test"));
EXPECT_TRUE(ok_future.IsReady());
EXPECT_TRUE(error_future.IsReady());
EXPECT_EQ(ok_future.Await(), absl::OkStatus());
EXPECT_EQ(error_future.Await(), absl::InternalError("test"));
ok_future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
error_future.OnReady([](absl::Status status) {
EXPECT_EQ(status, absl::InternalError("test"));
});
}
TEST(PjRtFutureTest, StatefulFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(42);
EXPECT_TRUE(future.IsReady());
future.OnReady([](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, StatusFuture) {
auto promise = PjRtFuture<>::CreatePromise();
PjRtFuture<> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(absl::OkStatus());
EXPECT_TRUE(future.IsReady());
future.OnReady(
[](absl::Status status) { EXPECT_EQ(status, absl::OkStatus()); });
}
TEST(PjRtFutureTest, StatusOrFuture) {
auto promise = PjRtFuture<int32_t>::CreatePromise();
PjRtFuture<int32_t> future(promise);
EXPECT_FALSE(future.IsReady());
promise.Set(42);
EXPECT_TRUE(future.IsReady());
future.OnReady([](absl::StatusOr<int32_t> value) { EXPECT_EQ(*value, 42); });
}
TEST(PjRtFutureTest, JoinFutures) {
auto empty_join = JoinFutures({});
EXPECT_TRUE(empty_join.IsReady());
EXPECT_EQ(empty_join.Await(), absl::OkStatus());
auto promise0 = PjRtFuture<>::CreatePromise();
auto promise1 = PjRtFuture<>::CreatePromise();
std::vector<PjRtFuture<>> futures0 = {PjRtFuture<>(promise0)};
std::vector<PjRtFuture<>> futures1 = {PjRtFuture<>(promise0),
PjRtFuture<>(promise1)};
auto join_one = JoinFutures(futures0);
EXPECT_FALSE(join_one.IsReady());
auto join_two = JoinFutures(futures1);
EXPECT_FALSE(join_two.IsReady());
promise0.Set();
EXPECT_TRUE(join_one.IsReady());
EXPECT_FALSE(join_two.IsReady());
EXPECT_EQ(join_one.Await(), absl::OkStatus());
promise1.Set();
EXPECT_TRUE(join_two.IsReady());
EXPECT_EQ(join_two.Await(), absl::OkStatus());
}
TEST(PjRtFutureTest, JoinErrors) {
auto empty_join = JoinFutures({});
EXPECT_TRUE(empty_join.IsReady());
EXPECT_EQ(empty_join.Await(), absl::OkStatus());
auto promise0 = PjRtFuture<>::CreatePromise();
auto promise1 = PjRtFuture<>::CreatePromise();
std::vector<PjRtFuture<>> futures0 = {PjRtFuture<>(promise0)};
std::vector<PjRtFuture<>> futures1 = {PjRtFuture<>(promise0),
PjRtFuture<>(promise1)};
auto join_one = JoinFutures(futures0);
EXPECT_FALSE(join_one.IsReady());
auto join_two = JoinFutures(futures1);
EXPECT_FALSE(join_two.IsReady());
promise0.Set(absl::InternalError("error #0"));
EXPECT_TRUE(join_one.IsReady());
EXPECT_FALSE(join_two.IsReady());
EXPECT_EQ(join_one.Await(), absl::InternalError("error #0"));
promise1.Set(absl::InternalError("error #1"));
EXPECT_TRUE(join_two.IsReady());
EXPECT_EQ(join_two.Await(), absl::InternalError("error #0"));
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_future.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/pjrt/pjrt_future_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8377d019-df08-42d2-95d1-41c6f8d85eb8 | cpp | google/tensorstore | global_initializer | tensorstore/internal/global_initializer.h | tensorstore/internal/global_initializer_test.cc | #ifndef TENSORSTORE_INTERNAL_GLOBAL_INITIALIZER_H_
#define TENSORSTORE_INTERNAL_GLOBAL_INITIALIZER_H_
#include "tensorstore/internal/preprocessor/cat.h"
#define TENSORSTORE_GLOBAL_INITIALIZER \
namespace { \
const struct TENSORSTORE_PP_CAT(TsGlobalInit, __LINE__) { \
TENSORSTORE_PP_CAT(TsGlobalInit, __LINE__) \
(); \
} TENSORSTORE_PP_CAT(tensorstore_global_init, __LINE__); \
} \
TENSORSTORE_PP_CAT(TsGlobalInit, __LINE__)::TENSORSTORE_PP_CAT( \
TsGlobalInit, __LINE__)()
#endif | #include "tensorstore/internal/global_initializer.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace {
std::vector<int> vec;
TENSORSTORE_GLOBAL_INITIALIZER { vec.push_back(1); }
TENSORSTORE_GLOBAL_INITIALIZER { vec.push_back(2); }
TENSORSTORE_GLOBAL_INITIALIZER { vec.push_back(3); }
TEST(GlobalInitializerTest, Ordering) {
EXPECT_THAT(vec, ::testing::ElementsAre(1, 2, 3));
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/global_initializer.h | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/internal/global_initializer_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
6475b107-a6ce-4c24-b567-5dba680f88ba | cpp | google/libphonenumber | phonenumbermatch | cpp/src/phonenumbers/phonenumbermatch.cc | cpp/test/phonenumbers/phonenumbermatch_test.cc | #include "phonenumbers/phonenumbermatch.h"
#include <string>
#include "phonenumbers/phonenumber.h"
#include "phonenumbers/phonenumber.pb.h"
#include "phonenumbers/stringutil.h"
namespace i18n {
namespace phonenumbers {
PhoneNumberMatch::PhoneNumberMatch(int start,
const string& raw_string,
const PhoneNumber& number)
: start_(start), raw_string_(raw_string), number_(number) {
}
PhoneNumberMatch::PhoneNumberMatch()
: start_(-1), raw_string_(""), number_(PhoneNumber::default_instance()) {
}
const PhoneNumber& PhoneNumberMatch::number() const {
return number_;
}
int PhoneNumberMatch::start() const {
return start_;
}
int PhoneNumberMatch::end() const {
return static_cast<int>(start_ + raw_string_.length());
}
int PhoneNumberMatch::length() const {
return static_cast<int>(raw_string_.length());
}
const string& PhoneNumberMatch::raw_string() const {
return raw_string_;
}
void PhoneNumberMatch::set_start(int start) {
start_ = start;
}
void PhoneNumberMatch::set_raw_string(const string& raw_string) {
raw_string_ = raw_string;
}
void PhoneNumberMatch::set_number(const PhoneNumber& number) {
number_.CopyFrom(number);
}
string PhoneNumberMatch::ToString() const {
return StrCat("PhoneNumberMatch [", start(), ",", end(), ") ",
raw_string_.c_str());
}
bool PhoneNumberMatch::Equals(const PhoneNumberMatch& match) const {
return ExactlySameAs(match.number_, number_) &&
match.raw_string_.compare(raw_string_) == 0 &&
match.start_ == start_;
}
void PhoneNumberMatch::CopyFrom(const PhoneNumberMatch& match) {
raw_string_ = match.raw_string();
start_ = match.start();
number_ = match.number();
}
}
} | #include "phonenumbers/phonenumber.h"
#include "phonenumbers/phonenumbermatch.h"
#include <gtest/gtest.h>
#include "phonenumbers/phonenumber.pb.h"
namespace i18n {
namespace phonenumbers {
TEST(PhoneNumberMatch, TestGetterMethods) {
PhoneNumber number;
const int start_index = 10;
const string raw_phone_number("1 800 234 45 67");
PhoneNumberMatch match1(start_index, raw_phone_number, number);
EXPECT_EQ(start_index, match1.start());
EXPECT_EQ(start_index + static_cast<int>(raw_phone_number.length()),
match1.end());
EXPECT_EQ(static_cast<int>(raw_phone_number.length()), match1.length());
EXPECT_EQ(raw_phone_number, match1.raw_string());
EXPECT_EQ("PhoneNumberMatch [10,25) 1 800 234 45 67", match1.ToString());
}
TEST(PhoneNumberMatch, TestEquals) {
PhoneNumber number;
PhoneNumberMatch match1(10, "1 800 234 45 67", number);
PhoneNumberMatch match2(10, "1 800 234 45 67", number);
match2.set_start(11);
ASSERT_FALSE(match1.Equals(match2));
match2.set_start(match1.start());
EXPECT_TRUE(match1.Equals(match2));
PhoneNumber number2;
number2.set_raw_input("123");
match2.set_number(number2);
ASSERT_FALSE(match1.Equals(match2));
match2.set_number(match1.number());
EXPECT_TRUE(ExactlySameAs(match1.number(), match2.number()));
EXPECT_TRUE(match1.Equals(match2));
match2.set_raw_string("123");
ASSERT_FALSE(match1.Equals(match2));
}
TEST(PhoneNumberMatch, TestAssignmentOverload) {
PhoneNumber number;
PhoneNumberMatch match1(10, "1 800 234 45 67", number);
PhoneNumberMatch match2;
ASSERT_FALSE(match1.Equals(match2));
match2.CopyFrom(match1);
ASSERT_TRUE(match1.Equals(match2));
PhoneNumberMatch match3;
PhoneNumberMatch match4;
match4.CopyFrom(match2);
match3.CopyFrom(match2);
ASSERT_TRUE(match3.Equals(match4));
ASSERT_TRUE(match4.Equals(match2));
}
TEST(PhoneNumberMatch, TestCopyConstructor) {
PhoneNumber number;
PhoneNumberMatch match1(10, "1 800 234 45 67", number);
PhoneNumberMatch match2;
match2.CopyFrom(match1);
ASSERT_TRUE(match1.Equals(match2));
}
}
} | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/src/phonenumbers/phonenumbermatch.cc | https://github.com/google/libphonenumber/blob/9aa9aaa39ad8098aef56071d2df4f6f8d251c98b/cpp/test/phonenumbers/phonenumbermatch_test.cc | 9aa9aaa39ad8098aef56071d2df4f6f8d251c98b |
539747d9-ddc4-43ed-b586-0406c8dbf82e | cpp | google/tensorstore | context | tensorstore/context.cc | tensorstore/context_test.cc | #include "tensorstore/context.h"
#include <stddef.h>
#include <algorithm>
#include <cassert>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/no_destructor.h"
#include "absl/base/thread_annotations.h"
#include "absl/log/absl_check.h"
#include "absl/log/absl_log.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context_impl.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/container/heterogeneous_container.h"
#include "tensorstore/internal/intrusive_ptr.h"
#include "tensorstore/internal/json/value_as.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/mutex.h"
#include "tensorstore/internal/riegeli/delimited.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/serialization/fwd.h"
#include "tensorstore/serialization/json.h"
#include "tensorstore/serialization/json_bindable.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/util/quote_string.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/str_cat.h"
namespace tensorstore {
namespace internal_context {
ResourceProviderImplBase::~ResourceProviderImplBase() = default;
ResourceOrSpecBase::~ResourceOrSpecBase() = default;
ResourceImplBase::~ResourceImplBase() = default;
ResourceSpecImplBase::~ResourceSpecImplBase() = default;
ContextImplPtr GetCreator(ResourceImplBase& resource) {
absl::MutexLock lock(&resource.mutex_);
auto* creator_ptr = resource.weak_creator_;
if (!creator_ptr ||
!internal::IncrementReferenceCountIfNonZero(*creator_ptr)) {
return {};
}
return ContextImplPtr(creator_ptr, internal::adopt_object_ref);
}
void ResourceOrSpecPtrTraits::increment(ResourceOrSpecBase* p) {
intrusive_ptr_increment(p);
}
void ResourceOrSpecPtrTraits::decrement(ResourceOrSpecBase* p) {
intrusive_ptr_decrement(p);
}
void ResourceImplWeakPtrTraits::increment(ResourceOrSpecBase* p) {
intrusive_ptr_increment(p);
}
void ResourceImplWeakPtrTraits::decrement(ResourceOrSpecBase* p) {
intrusive_ptr_decrement(p);
}
void ResourceImplStrongPtrTraits::increment(ResourceImplBase* p) {
intrusive_ptr_increment(p);
p->spec_->provider_->AcquireContextReference(*p);
}
void ResourceImplStrongPtrTraits::decrement(ResourceImplBase* p) {
p->spec_->provider_->ReleaseContextReference(*p);
intrusive_ptr_decrement(p);
}
void intrusive_ptr_increment(ContextSpecImpl* p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<ContextSpecImpl>*>(p));
}
void intrusive_ptr_decrement(ContextSpecImpl* p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<ContextSpecImpl>*>(p));
}
void intrusive_ptr_increment(ContextImpl* p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<ContextImpl>*>(p));
}
void intrusive_ptr_decrement(ContextImpl* p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<ContextImpl>*>(p));
}
ContextImpl::ContextImpl() = default;
ContextImpl::~ContextImpl() {
for (const auto& resource_container : resources_) {
auto& result = resource_container->result_;
if (!result.ok()) continue;
auto& resource = **result;
absl::MutexLock lock(&resource.mutex_);
if (resource.weak_creator_ == this) {
resource.weak_creator_ = nullptr;
}
}
}
namespace {
struct ContextProviderRegistry {
absl::Mutex mutex_;
internal::HeterogeneousHashSet<
std::unique_ptr<const ResourceProviderImplBase>, std::string_view,
&ResourceProviderImplBase::id_>
providers_ ABSL_GUARDED_BY(mutex_);
};
static ContextProviderRegistry& GetRegistry() {
static absl::NoDestructor<ContextProviderRegistry> registrar;
return *registrar;
}
ResourceContainer* FindCycle(ResourceContainer* container) {
size_t power = 1;
size_t lambda = 1;
auto* tortoise = container;
auto* hare = container->creation_blocked_on_;
while (true) {
if (!hare) return nullptr;
if (tortoise == hare) return tortoise;
if (power == lambda) {
tortoise = hare;
power *= 2;
lambda = 0;
}
hare = hare->creation_blocked_on_;
lambda += 1;
}
}
void KillCycle(ResourceContainer* container) {
std::vector<std::string> parts;
auto* node = container;
do {
assert(node->spec_);
std::string part;
if (!node->spec_->key_.empty()) {
tensorstore::StrAppend(&part, QuoteString(node->spec_->key_), ":");
}
auto json_result = node->spec_->ToJson(IncludeDefaults{true});
if (json_result.has_value()) {
tensorstore::StrAppend(
&part,
json_result->dump(
-1, ' ', true,
::nlohmann::json::error_handler_t::ignore));
} else {
tensorstore::StrAppend(
&part, "unprintable spec for ",
tensorstore::QuoteString(node->spec_->provider_->id_));
}
parts.push_back(std::move(part));
node = node->creation_blocked_on_;
} while (node != container);
auto error = absl::InvalidArgumentError("Context resource reference cycle: " +
absl::StrJoin(parts, " -> "));
do {
auto* next = std::exchange(node->creation_blocked_on_, nullptr);
node->result_ = error;
node->condvar_.SignalAll();
node = next;
} while (node != container);
}
void WaitForCompletion(absl::Mutex* mutex, ResourceContainer* container,
ResourceContainer* trigger) {
if (trigger) {
assert(!trigger->creation_blocked_on_);
trigger->creation_blocked_on_ = container;
}
if (!container->ready()) {
container->condvar_.WaitWithTimeout(mutex, absl::Milliseconds(5));
if (!container->ready()) {
if (auto* cycle_node = FindCycle(container)) {
KillCycle(cycle_node);
}
while (!container->ready()) {
container->condvar_.Wait(mutex);
}
}
}
if (trigger) {
trigger->creation_blocked_on_ = nullptr;
}
}
Result<ResourceImplStrongPtr> CreateResource(ContextImpl& context,
ResourceSpecImplBase& spec,
ResourceContainer* trigger) {
std::unique_ptr<ResourceContainer> container(new ResourceContainer);
auto* container_ptr = container.get();
container->spec_.reset(&spec);
if (trigger) {
assert(!trigger->creation_blocked_on_);
trigger->creation_blocked_on_ = container.get();
}
context.resources_.insert(std::move(container));
Result<ResourceImplStrongPtr> result{};
{
internal::ScopedWriterUnlock unlock(context.root_->mutex_);
result = spec.CreateResource({&context, container_ptr});
if (result.ok()) {
auto& resource = **result;
if (resource.spec_.get() == &spec) {
absl::MutexLock lock(&resource.mutex_);
assert(resource.weak_creator_ == nullptr);
resource.weak_creator_ = &context;
}
}
}
container_ptr->result_ = std::move(result);
if (trigger) {
trigger->creation_blocked_on_ = nullptr;
}
container_ptr->condvar_.SignalAll();
return container_ptr->result_;
}
Result<ResourceImplStrongPtr> GetOrCreateResourceStrongPtr(
ContextImpl& context, ResourceSpecImplBase& spec,
ResourceContainer* trigger) {
if (!spec.provider_) {
ABSL_LOG(FATAL) << "Context resource provider not registered for: "
<< QuoteString(spec.key_);
}
const std::string_view key = spec.key_;
if (key.empty()) {
ResourceContainer container;
container.spec_.reset(&spec);
if (trigger) {
absl::MutexLock lock(&context.root_->mutex_);
assert(!trigger->creation_blocked_on_);
trigger->creation_blocked_on_ = &container;
}
auto result = spec.CreateResource({&context, &container});
if (trigger) {
absl::MutexLock lock(&context.root_->mutex_);
trigger->creation_blocked_on_ = nullptr;
}
return result;
}
absl::MutexLock lock(&context.root_->mutex_);
assert(context.spec_);
#ifndef NDEBUG
{
auto it = context.spec_->resources_.find(key);
assert(it != context.spec_->resources_.end() && it->get() == &spec);
}
#endif
if (auto it = context.resources_.find(key); it != context.resources_.end()) {
auto* container = it->get();
WaitForCompletion(&context.root_->mutex_, container, trigger);
return container->result_;
}
return CreateResource(context, spec, trigger);
}
}
Result<ResourceImplWeakPtr> GetOrCreateResource(ContextImpl& context,
ResourceSpecImplBase& spec,
ResourceContainer* trigger) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto p, GetOrCreateResourceStrongPtr(context, spec, trigger));
p->spec_->provider_->ReleaseContextReference(*p);
return ResourceImplWeakPtr(p.release(), internal::adopt_object_ref);
}
class ResourceReference : public ResourceSpecImplBase {
public:
ResourceReference(const std::string& referent) : referent_(referent) {}
void EncodeCacheKey(std::string* out) const override {
internal::EncodeCacheKey(out, ResourceSpecImplBase::kReference, referent_);
}
Result<ResourceImplStrongPtr> CreateResource(
const internal::ContextResourceCreationContext& creation_context)
override {
std::string_view referent = referent_;
auto* mutex = &creation_context.context_->root_->mutex_;
absl::MutexLock lock(mutex);
ContextImpl* c = creation_context.context_;
if (referent.empty()) {
assert(!key_.empty());
if (c->parent_) {
c = c->parent_.get();
referent = provider_->id_;
} else {
auto default_spec = MakeDefaultResourceSpec(*provider_, key_);
return internal_context::CreateResource(*c, *default_spec,
creation_context.trigger_);
}
}
while (true) {
if (auto it = c->resources_.find(referent); it != c->resources_.end()) {
ResourceContainer* container = it->get();
WaitForCompletion(mutex, container, creation_context.trigger_);
return container->result_;
}
auto* context_spec = c->spec_.get();
if (context_spec) {
if (auto it = context_spec->resources_.find(referent);
it != context_spec->resources_.end()) {
return internal_context::CreateResource(*c, **it,
creation_context.trigger_);
}
}
if (!c->parent_) {
if (referent != provider_->id_) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Resource not defined: ", QuoteString(referent)));
}
auto default_spec = MakeDefaultResourceSpec(*provider_, provider_->id_);
return internal_context::CreateResource(*c, *default_spec,
creation_context.trigger_);
}
c = c->parent_.get();
}
}
Result<::nlohmann::json> ToJson(Context::ToJsonOptions options) override {
if (referent_.empty()) return nullptr;
return referent_;
}
ResourceSpecImplPtr UnbindContext(
const internal::ContextSpecBuilder& spec_builder) final {
auto& builder_impl = *internal_context::Access::impl(spec_builder);
++builder_impl.ids_[referent_];
return ResourceSpecImplPtr(this);
}
std::string referent_;
};
void RegisterContextResourceProvider(
std::unique_ptr<const ResourceProviderImplBase> provider) {
auto& registry = GetRegistry();
absl::MutexLock lock(®istry.mutex_);
auto id = provider->id_;
if (!registry.providers_.insert(std::move(provider)).second) {
ABSL_LOG(FATAL) << "Provider " << QuoteString(id) << " already registered";
}
}
const ResourceProviderImplBase* GetProvider(std::string_view id) {
auto& registry = GetRegistry();
absl::ReaderMutexLock lock(®istry.mutex_);
auto it = registry.providers_.find(id);
if (it == registry.providers_.end()) return nullptr;
return it->get();
}
const ResourceProviderImplBase& GetProviderOrDie(std::string_view id) {
auto* provider = GetProvider(id);
if (!provider) {
ABSL_LOG(FATAL) << "Context resource provider " << QuoteString(id)
<< " not registered";
}
return *provider;
}
ResourceSpecImplPtr MakeDefaultResourceSpec(
const ResourceProviderImplBase& provider, std::string_view key) {
auto default_spec = provider.Default();
default_spec->provider_ = &provider;
default_spec->key_ = key;
default_spec->is_default_ = true;
return default_spec;
}
std::string_view ParseResourceProvider(std::string_view key) {
return key.substr(0, key.find('#'));
}
absl::Status ProviderNotRegisteredError(std::string_view key) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid context resource identifier: ", QuoteString(key)));
}
Result<ResourceSpecImplPtr> ResourceSpecFromJson(
const ResourceProviderImplBase& provider, const ::nlohmann::json& j,
JsonSerializationOptions options) {
ResourceSpecImplPtr impl;
if (j.is_null()) {
impl.reset(new ResourceReference(""));
} else if (auto* s = j.get_ptr<const std::string*>()) {
auto provider_id = ParseResourceProvider(*s);
if (provider_id != provider.id_) {
return absl::InvalidArgumentError(tensorstore::StrCat(
"Invalid reference to ", QuoteString(provider.id_),
" resource: ", QuoteString(*s)));
}
impl.reset(new ResourceReference(*s));
} else {
TENSORSTORE_ASSIGN_OR_RETURN(impl, provider.FromJson(j, options));
}
impl->provider_ = &provider;
return impl;
}
Result<ResourceSpecImplPtr> ResourceSpecFromJsonWithKey(
std::string_view key, const ::nlohmann::json& j,
Context::FromJsonOptions options) {
auto* provider = GetProvider(ParseResourceProvider(key));
ResourceSpecImplPtr impl;
if (!provider) {
return ProviderNotRegisteredError(key);
} else {
TENSORSTORE_ASSIGN_OR_RETURN(impl,
ResourceSpecFromJson(*provider, j, options));
}
impl->key_ = key;
return impl;
}
Result<ResourceSpecImplPtr> ResourceSpecFromJson(
std::string_view provider_id, const ::nlohmann::json& j,
Context::FromJsonOptions options) {
auto& provider = GetProviderOrDie(provider_id);
if (j.is_null()) {
return internal_json::ExpectedError(j, "non-null value");
}
return ResourceSpecFromJson(provider, j, options);
}
ResourceOrSpecPtr DefaultResourceSpec(std::string_view provider_id) {
return ToResourceOrSpecPtr(
ResourceSpecFromJson(provider_id, std::string(provider_id), {}).value());
}
}
Context Context::Default() {
Context context;
context.impl_.reset(new internal_context::ContextImpl);
context.impl_->root_ = context.impl_.get();
return context;
}
Context::Context(const Context::Spec& spec, Context parent)
: impl_(new internal_context::ContextImpl) {
impl_->spec_ = spec.impl_;
impl_->parent_ = std::move(parent.impl_);
if (impl_->parent_) {
impl_->root_ = impl_->parent_->root_;
} else {
impl_->root_ = impl_.get();
}
}
Result<Context> Context::FromJson(::nlohmann::json json_spec, Context parent,
FromJsonOptions options) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto spec, Spec::FromJson(std::move(json_spec), std::move(options)));
return Context(spec, std::move(parent));
}
Context::Spec Context::spec() const {
if (!impl_) return {};
Context::Spec spec;
internal_context::Access::impl(spec) = impl_->spec_;
return spec;
}
Context Context::parent() const {
if (!impl_) return {};
Context parent_context;
parent_context.impl_ = impl_->parent_;
return parent_context;
}
namespace jb = tensorstore::internal_json_binding;
TENSORSTORE_DEFINE_JSON_DEFAULT_BINDER(
Context::Spec,
jb::Compose<::nlohmann::json::object_t>([](auto is_loading,
const auto& options, auto* obj,
auto* j_obj) -> absl::Status {
if constexpr (is_loading) {
obj->impl_.reset(new internal_context::ContextSpecImpl);
obj->impl_->resources_.reserve(j_obj->size());
for (const auto& [key, value] : *j_obj) {
TENSORSTORE_ASSIGN_OR_RETURN(
auto resource, internal_context::ResourceSpecFromJsonWithKey(
key, value, options));
obj->impl_->resources_.insert(std::move(resource));
}
} else {
if (!obj->impl_) return absl::OkStatus();
for (const auto& resource_spec : obj->impl_->resources_) {
TENSORSTORE_ASSIGN_OR_RETURN(auto resource_spec_json,
resource_spec->ToJson(options));
assert(!resource_spec_json.is_discarded());
j_obj->emplace(resource_spec->key_, std::move(resource_spec_json));
}
}
return absl::OkStatus();
}))
namespace internal {
TENSORSTORE_DEFINE_JSON_BINDER(ContextSpecDefaultableJsonBinder,
[](auto is_loading, const auto& options,
auto* obj, auto* j) {
return jb::DefaultInitializedValue()(
is_loading, options, obj, j);
})
bool IsPartialBindingContext(const Context& context) {
return internal_context::Access::impl(context)->root_->bind_partial_;
}
void SetRecordBindingState(internal::ContextSpecBuilder& builder,
bool record_binding_state) {
auto& impl = internal_context::Access::impl(builder);
auto ptr = impl.release();
ptr.set_tag(record_binding_state);
impl.reset(ptr, internal::adopt_object_ref);
}
}
namespace internal_context {
Result<::nlohmann::json> BuilderResourceSpec::ToJson(
Context::ToJsonOptions options) {
::nlohmann::json json_spec;
if (!underlying_spec_->key_.empty()) {
return underlying_spec_->key_;
}
return underlying_spec_->ToJson(options);
}
Result<ResourceImplStrongPtr> BuilderResourceSpec::CreateResource(
const internal::ContextResourceCreationContext& creation_context) {
return underlying_spec_->CreateResource(creation_context);
}
ResourceSpecImplPtr BuilderResourceSpec::UnbindContext(
const internal::ContextSpecBuilder& spec_builder) {
if (!underlying_spec_->key_.empty() &&
!underlying_spec_->provider_->config_only_) {
return ResourceSpecImplPtr(new ResourceReference(underlying_spec_->key_));
}
return underlying_spec_->UnbindContext(spec_builder);
}
void BuilderResourceSpec::EncodeCacheKey(std::string* out) const {
underlying_spec_->EncodeCacheKey(out);
}
BuilderImpl::~BuilderImpl() {
auto& ids = ids_;
using SharedEntry = std::pair<ResourceImplBase*, ResourceEntry*>;
std::vector<SharedEntry> shared_entries;
for (auto& p : resources_) {
const auto& key = p.first->spec_->key_;
if (!key.empty()) {
ids[key]++;
}
if (p.second.shared) {
shared_entries.emplace_back(p.first.get(), &p.second);
}
}
std::sort(shared_entries.begin(), shared_entries.end(),
[](const SharedEntry& a, const SharedEntry& b) {
return a.second->id < b.second->id;
});
for (auto [resource, entry] : shared_entries) {
std::string key = resource->spec_->key_;
if (key.empty() || ids.at(key) != 1) {
size_t i = 0;
while (true) {
key = tensorstore::StrCat(resource->spec_->provider_->id_, "#", i);
if (!ids.count(key)) break;
++i;
}
ids[key]++;
}
entry->spec->underlying_spec_->key_ = key;
root_->resources_.insert(entry->spec->underlying_spec_);
}
}
void BuilderImplPtrTraits::increment(BuilderImplTaggedPtr p) {
intrusive_ptr_increment(
static_cast<internal::AtomicReferenceCount<BuilderImpl>*>(p.get()));
}
void BuilderImplPtrTraits::decrement(BuilderImplTaggedPtr p) {
intrusive_ptr_decrement(
static_cast<internal::AtomicReferenceCount<BuilderImpl>*>(p.get()));
}
}
namespace internal {
ContextSpecBuilder ContextSpecBuilder::Make(ContextSpecBuilder parent,
Context::Spec existing_spec) {
ContextSpecBuilder builder;
if (existing_spec.impl_) {
if (existing_spec.impl_->use_count() != 1) {
existing_spec.impl_.reset(
new internal_context::ContextSpecImpl(*existing_spec.impl_));
}
}
if (parent.impl_) {
builder.impl_ = std::move(parent.impl_);
builder.spec_impl_ = std::move(existing_spec.impl_);
} else {
builder.impl_.reset(internal_context::BuilderImplTaggedPtr(
new internal_context::BuilderImpl, parent.impl_.get().tag()));
if (!existing_spec.impl_) {
builder.spec_impl_.reset(new internal_context::ContextSpecImpl);
} else {
builder.spec_impl_ = std::move(existing_spec.impl_);
}
builder.impl_->root_ = builder.spec_impl_;
}
if (builder.spec_impl_ && !builder.spec_impl_->resources_.empty()) {
auto& ids = builder.impl_->ids_;
for (const auto& resource_spec : builder.spec_impl_->resources_) {
ids[resource_spec->key_]++;
resource_spec->UnbindContext(builder);
}
}
return builder;
}
Context::Spec ContextSpecBuilder::spec() const {
Context::Spec spec;
spec.impl_ = spec_impl_;
return spec;
}
}
namespace internal_context {
ResourceSpecImplPtr ResourceImplBase::UnbindContext(
const internal::ContextSpecBuilder& spec_builder) {
auto spec = spec_->provider_->DoGetSpec(*this, spec_builder);
spec->provider_ = spec_->provider_;
spec->is_default_ = spec_->is_default_;
spec->key_ = spec_->key_;
return spec;
}
namespace {
internal_context::ResourceSpecImplPtr AddResource(
const internal::ContextSpecBuilder& builder,
internal_context::ResourceImplBase* resource) {
internal_context::ResourceImplWeakPtr resource_ptr(resource);
auto* impl = internal_context::Access::impl(builder).get().get();
auto& entry = impl->resources_[resource_ptr];
if (!entry.spec) {
entry.spec.reset(new internal_context::BuilderResourceSpec);
auto new_spec = entry.spec;
entry.spec->provider_ = resource->spec_->provider_;
entry.id = impl->next_id_++;
entry.shared =
(resource->spec_->is_default_ || !resource->spec_->key_.empty());
auto underlying_spec = resource->UnbindContext(builder);
new_spec->underlying_spec_ = std::move(underlying_spec);
return new_spec;
} else {
entry.shared = true;
return entry.spec;
}
}
}
ResourceOrSpecPtr AddResourceOrSpec(const internal::ContextSpecBuilder& builder,
ResourceOrSpecTaggedPtr resource_or_spec) {
assert(internal_context::Access::impl(builder));
if (!resource_or_spec) {
resource_or_spec.set_tag<1>(false);
return ResourceOrSpecPtr(resource_or_spec);
}
if (!IsResource(resource_or_spec)) {
return ToResourceOrSpecPtr(
static_cast<ResourceSpecImplBase*>(resource_or_spec.get())
->UnbindContext(builder));
} else {
auto new_ptr = ToResourceOrSpecPtr(AddResource(
builder, static_cast<ResourceImplBase*>(resource_or_spec.get())));
if (internal::GetRecordBindingState(builder)) {
auto new_tagged_ptr = new_ptr.release();
new_tagged_ptr.set_tag<1>(true);
new_ptr = ResourceOrSpecPtr(new_tagged_ptr, internal::adopt_object_ref);
}
return new_ptr;
}
}
absl::Status ResourceSpecFromJsonWithDefaults(
std::string_view provider_id, const JsonSerializationOptions& options,
ResourceOrSpecPtr& spec, ::nlohmann::json* j) {
if (j->is_discarded()) {
spec = internal_context::DefaultResourceSpec(provider_id);
} else if (j->is_array()) {
const auto& arr = j->get_ref<const ::nlohmann::json::array_t&>();
if (arr.size() != 1) {
return internal_json::ExpectedError(*j, "single-element array");
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto spec_ptr, ResourceSpecFromJson(provider_id, arr[0], options));
spec = ToResourceOrSpecPtr(std::move(spec_ptr));
if (options.preserve_bound_context_resources_) {
auto tagged_ptr = spec.release();
tagged_ptr.set_tag<1>(true);
spec = ResourceOrSpecPtr(tagged_ptr, internal::adopt_object_ref);
}
} else {
TENSORSTORE_ASSIGN_OR_RETURN(auto spec_ptr,
internal_context::ResourceSpecFromJson(
provider_id, std::move(*j), options));
spec = ToResourceOrSpecPtr(std::move(spec_ptr));
}
return absl::OkStatus();
}
absl::Status ResourceSpecToJsonWithDefaults(
const JsonSerializationOptions& options, ResourceOrSpecTaggedPtr spec,
::nlohmann::json* j) {
if (!spec || IsResource(spec)) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
} else {
auto* spec_ptr = static_cast<ResourceSpecImplBase*>(spec.get());
TENSORSTORE_ASSIGN_OR_RETURN(*j, spec_ptr->ToJson(options));
if (options.preserve_bound_context_resources_ &&
IsImmediateBindingResourceSpec(spec)) {
::nlohmann::json::array_t arr(1);
arr[0] = std::move(*j);
*j = std::move(arr);
}
if (!IncludeDefaults(options).include_defaults() && j->is_string() &&
j->get_ref<const std::string&>() == spec_ptr->provider_->id_) {
*j = ::nlohmann::json(::nlohmann::json::value_t::discarded);
}
}
return absl::OkStatus();
}
absl::Status GetOrCreateResource(ContextImpl* context,
ResourceOrSpecTaggedPtr resource_or_spec,
ResourceContainer* trigger,
ResourceOrSpecPtr& resource) {
assert(context);
if (!resource_or_spec) {
resource.reset();
return absl::OkStatus();
}
if (IsResource(resource_or_spec)) {
resource.reset(resource_or_spec);
return absl::OkStatus();
}
if (context->root_->bind_partial_ &&
!IsImmediateBindingResourceSpec(resource_or_spec)) {
resource.reset(resource_or_spec);
return absl::OkStatus();
}
TENSORSTORE_ASSIGN_OR_RETURN(
auto resource_ptr,
internal_context::GetOrCreateResource(
*context, static_cast<ResourceSpecImplBase&>(*resource_or_spec),
trigger));
resource = ToResourceOrSpecPtr(std::move(resource_ptr));
return absl::OkStatus();
}
void StripContext(ResourceOrSpecPtr& spec) {
if (!spec) return;
spec = internal_context::DefaultResourceSpec(
IsResource(spec.get())
? static_cast<ResourceImplBase&>(*spec).spec_->provider_->id_
: static_cast<ResourceSpecImplBase&>(*spec).provider_->id_);
}
namespace {
[[nodiscard]] bool VerifyProviderIdMatch(serialization::DecodeSource& source,
std::string_view provider_id,
std::string_view key) {
if (internal_context::ParseResourceProvider(key) == provider_id) {
return true;
}
source.Fail(serialization::DecodeError(tensorstore::StrCat(
"Context resource key ", tensorstore::QuoteString(key),
" does not match expected provider ",
tensorstore::QuoteString(provider_id))));
return false;
}
struct ContextResourceSpecImplSerializer {
[[nodiscard]] static bool Encode(
serialization::EncodeSink& sink,
const internal_context::ResourceSpecImplPtr& value,
JsonSerializationOptions json_serialization_options = {}) {
if (!serialization::EncodeTuple(sink, value->is_default_, value->key_)) {
return false;
}
if (value->is_default_) return true;
::nlohmann::json json;
TENSORSTORE_ASSIGN_OR_RETURN(
json, value->ToJson(json_serialization_options), (sink.Fail(_), false));
assert(!json.is_discarded());
return serialization::Encode(sink, json);
}
[[nodiscard]] bool Decode(
serialization::DecodeSource& source,
internal_context::ResourceSpecImplPtr& value,
JsonSerializationOptions json_serialization_options = {}) {
bool is_default;
std::string_view key;
if (!serialization::DecodeTuple(source, is_default, key)) return false;
if (!key.empty() && !VerifyProviderIdMatch(source, provider_id, key)) {
return false;
}
if (is_default) {
auto& provider = internal_context::GetProviderOrDie(provider_id);
value = MakeDefaultResourceSpec(provider, key);
} else {
std::string key_copy(key);
::nlohmann::json json_spec;
if (!serialization::Decode(source, json_spec)) return false;
TENSORSTORE_ASSIGN_OR_RETURN(
value,
internal_context::ResourceSpecFromJson(provider_id, json_spec,
json_serialization_options),
(source.Fail(_), false));
value->key_ = std::move(key_copy);
}
return true;
}
std::string_view provider_id;
};
[[nodiscard]] bool EncodeContextSpecBuilder(
serialization::EncodeSink& sink, internal::ContextSpecBuilder&& builder);
[[nodiscard]] bool DecodeContextSpecBuilder(
serialization::DecodeSource& source,
internal_context::ContextImplPtr& context);
struct ContextResourceImplSerializer {
[[nodiscard]] static bool Encode(
serialization::EncodeSink& sink,
const internal_context::ResourceImplWeakPtr& value) {
auto creator = internal_context::GetCreator(*value);
if (!serialization::Encode(sink, creator)) return false;
if (creator) {
assert(!value->spec_->key_.empty());
return serialization::Encode(sink, value->spec_->key_);
}
auto builder = internal::ContextSpecBuilder::Make();
auto spec = value->UnbindContext(builder);
if (!internal_context::EncodeContextSpecBuilder(sink, std::move(builder))) {
return false;
}
return ContextResourceSpecImplSerializer::Encode(sink, spec);
}
[[nodiscard]] bool Decode(
serialization::DecodeSource& source,
internal_context::ResourceImplWeakPtr& value) const {
internal_context::ContextImplPtr creator;
if (!serialization::Decode(source, creator)) return false;
if (creator) {
return DecodeByReferenceToExistingContext(source, *creator, value);
}
internal_context::ContextImplPtr context_impl;
if (!internal_context::DecodeContextSpecBuilder(source, context_impl)) {
return false;
}
internal_context::ResourceSpecImplPtr resource_spec;
if (!ContextResourceSpecImplSerializer{provider_id}.Decode(source,
resource_spec)) {
return false;
}
std::string key;
std::swap(key, resource_spec->key_);
TENSORSTORE_ASSIGN_OR_RETURN(value,
internal_context::GetOrCreateResource(
*context_impl, *resource_spec, nullptr),
(source.Fail(_), false));
resource_spec->key_ = std::move(key);
return true;
}
[[nodiscard]] bool DecodeByReferenceToExistingContext(
serialization::DecodeSource& source,
internal_context::ContextImpl& creator,
internal_context::ResourceImplWeakPtr& value) const {
std::string_view key;
if (!serialization::Decode(source, key)) return false;
if (!VerifyProviderIdMatch(source, provider_id, key)) return false;
TENSORSTORE_ASSIGN_OR_RETURN(
auto spec, internal_context::ResourceSpecFromJson(provider_id, key, {}),
(source.Fail(_), false));
TENSORSTORE_ASSIGN_OR_RETURN(
value, internal_context::GetOrCreateResource(creator, *spec, nullptr),
(source.Fail(_), false));
return true;
}
std::string_view provider_id;
};
bool EncodeContextSpecBuilder(serialization::EncodeSink& sink,
internal::ContextSpecBuilder&& builder) {
std::vector<
std::pair<internal_context::ResourceImplWeakPtr,
internal::IntrusivePtr<internal_context::BuilderResourceSpec>>>
deps;
auto& resources = internal_context::Access::impl(builder)->resources_;
deps.reserve(resources.size());
for (auto& [resource, entry] : resources) {
deps.emplace_back(resource, entry.spec);
entry.shared = true;
}
ABSL_CHECK_EQ(internal_context::Access::impl(builder)->use_count(), 1);
builder = internal::ContextSpecBuilder();
if (!serialization::WriteSize(sink.writer(), deps.size())) return false;
for (size_t i = 0; i < deps.size(); ++i) {
auto& [dep_resource, dep_spec] = deps[i];
if (!serialization::Encode(sink, dep_spec->underlying_spec_->key_)) {
return false;
}
if (!sink.Indirect(dep_resource, ContextResourceImplSerializer{})) {
return false;
}
}
return true;
}
[[nodiscard]] bool DecodeContextResourceInContextSpecBuilder(
serialization::DecodeSource& source,
internal_context::ContextImpl& context_impl) {
std::string key;
if (!serialization::Decode(source, key)) return false;
internal_context::ResourceImplWeakPtr resource;
std::string_view provider_id = internal_context::ParseResourceProvider(key);
if (!source.Indirect(resource, ContextResourceImplSerializer{provider_id})) {
return false;
}
if (resource->spec_->provider_->id_ != provider_id) {
source.Fail(serialization::DecodeError(tensorstore::StrCat(
"Context resource has provider id ",
tensorstore::QuoteString(resource->spec_->provider_->id_),
" but expected ", tensorstore::QuoteString(provider_id))));
return false;
}
auto container = std::make_unique<internal_context::ResourceContainer>();
if (resource->spec_->key_ != key) {
container->spec_.reset(new internal_context::BuilderResourceSpec);
container->spec_->provider_ = resource->spec_->provider_;
container->spec_->key_ = std::move(key);
static_cast<internal_context::BuilderResourceSpec&>(*container->spec_)
.underlying_spec_ = resource->spec_;
} else {
container->spec_ = resource->spec_;
}
container->result_.emplace(resource.get());
if (!context_impl.spec_->resources_.emplace(container->spec_).second) {
source.Fail(absl::DataLossError(
tensorstore::StrCat("Duplicate context resource key in Context spec ",
tensorstore::QuoteString(container->spec_->key_))));
return false;
}
[[maybe_unused]] bool inserted_resource =
context_impl.resources_.emplace(std::move(container)).second;
assert(inserted_resource);
return true;
}
bool DecodeContextSpecBuilder(serialization::DecodeSource& source,
internal_context::ContextImplPtr& context) {
size_t count;
if (!serialization::ReadSize(source.reader(), count)) return false;
internal_context::ContextImplPtr context_impl(
new internal_context::ContextImpl);
context_impl->spec_.reset(new internal_context::ContextSpecImpl);
context_impl->root_ = context_impl.get();
while (count--) {
if (!DecodeContextResourceInContextSpecBuilder(source, *context_impl)) {
return false;
}
}
context = std::move(context_impl);
return true;
}
[[nodiscard]] bool EncodeContextResource(
serialization::EncodeSink& sink,
const internal_context::ResourceImplWeakPtr& resource) {
return serialization::IndirectPointerSerializer<
internal_context::ResourceImplWeakPtr,
ContextResourceImplSerializer>()
.Encode(sink, resource);
}
[[nodiscard]] bool DecodeContextResource(
serialization::DecodeSource& source, std::string_view provider_id,
internal_context::ResourceImplWeakPtr& resource) {
return serialization::IndirectPointerSerializer<
internal_context::ResourceImplWeakPtr,
ContextResourceImplSerializer>{{provider_id}}
.Decode(source, resource);
}
}
bool EncodeContextResourceOrSpec(
serialization::EncodeSink& sink,
const internal_context::ResourceOrSpecPtr& resource) {
const bool is_resource = internal_context::IsResource(resource.get());
if (!serialization::Encode(sink, is_resource)) return false;
if (is_resource) {
return EncodeContextResource(
sink, internal_context::ResourceImplWeakPtr(
static_cast<internal_context::ResourceImplBase*>(
resource.get().get())));
} else {
return ContextResourceSpecImplSerializer::Encode(
sink, internal_context::ResourceSpecImplPtr(
static_cast<internal_context::ResourceSpecImplBase*>(
resource.get().get())));
}
}
bool DecodeContextResourceOrSpec(
serialization::DecodeSource& source, std::string_view provider_id,
internal_context::ResourceOrSpecPtr& resource) {
bool is_resource;
if (!serialization::Decode(source, is_resource)) return false;
if (is_resource) {
internal_context::ResourceImplWeakPtr resource_ptr;
if (!DecodeContextResource(source, provider_id, resource_ptr)) return false;
resource = internal_context::ToResourceOrSpecPtr(std::move(resource_ptr));
} else {
internal_context::ResourceSpecImplPtr spec_ptr;
if (!ContextResourceSpecImplSerializer{provider_id}.Decode(source,
spec_ptr)) {
return false;
}
resource = internal_context::ToResourceOrSpecPtr(std::move(spec_ptr));
}
return true;
}
bool ContextSpecImplPtrNonNullDirectSerializer::Encode(
serialization::EncodeSink& sink,
const internal_context::ContextSpecImplPtr& value) {
Context::Spec spec;
internal_context::Access::impl(spec) = value;
return serialization::JsonBindableSerializer<Context::Spec>::Encode(sink,
spec);
}
bool ContextSpecImplPtrNonNullDirectSerializer::Decode(
serialization::DecodeSource& source,
internal_context::ContextSpecImplPtr& value) {
Context::Spec spec;
if (!serialization::JsonBindableSerializer<Context::Spec>::Decode(source,
spec)) {
return false;
}
value = internal_context::Access::impl(spec);
return true;
}
bool ContextImplPtrNonNullDirectSerializer::Encode(
serialization::EncodeSink& sink,
const internal_context::ContextImplPtr& value) {
return serialization::EncodeTuple(sink, value->spec_, value->parent_);
}
bool ContextImplPtrNonNullDirectSerializer::Decode(
serialization::DecodeSource& source,
internal_context::ContextImplPtr& value) {
Context::Spec spec;
Context parent;
if (!serialization::DecodeTuple(source, spec, parent)) return false;
Context context(std::move(spec), std::move(parent));
value = std::move(internal_context::Access::impl(context));
return true;
}
bool UntypedContextResourceImplPtrNonNullDirectSerializer::Encode(
serialization::EncodeSink& sink,
const internal_context::ResourceImplWeakPtr& value) {
std::string_view provider_id = value->spec_->provider_->id_;
if (!serialization::Encode(sink, provider_id)) return false;
return ContextResourceImplSerializer{provider_id}.Encode(sink, value);
}
bool UntypedContextResourceImplPtrNonNullDirectSerializer::Decode(
serialization::DecodeSource& source,
internal_context::ResourceImplWeakPtr& value) {
std::string provider_id;
if (!serialization::Decode(source, provider_id)) return false;
if (!internal_context::GetProvider(provider_id)) {
source.Fail(internal_context::ProviderNotRegisteredError(provider_id));
return false;
}
return ContextResourceImplSerializer{provider_id}.Decode(source, value);
}
}
namespace serialization {
bool Serializer<Context::Spec>::Encode(EncodeSink& sink,
const Context::Spec& value) {
return serialization::Encode(sink, internal_context::Access::impl(value));
}
bool Serializer<Context::Spec>::Decode(DecodeSource& source,
Context::Spec& value) {
return serialization::Decode(source, internal_context::Access::impl(value));
}
bool Serializer<Context>::Encode(EncodeSink& sink, const Context& value) {
return serialization::Encode(sink, internal_context::Access::impl(value));
}
bool Serializer<Context>::Decode(DecodeSource& source, Context& value) {
return serialization::Decode(source, internal_context::Access::impl(value));
}
}
}
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::internal_context::ContextSpecImplPtr,
(tensorstore::serialization::IndirectPointerSerializer<
tensorstore::internal_context::ContextSpecImplPtr,
tensorstore::internal_context::
ContextSpecImplPtrNonNullDirectSerializer>{}))
TENSORSTORE_DEFINE_SERIALIZER_SPECIALIZATION(
tensorstore::internal_context::ContextImplPtr,
(tensorstore::serialization::IndirectPointerSerializer<
tensorstore::internal_context::ContextImplPtr,
tensorstore::internal_context::
ContextImplPtrNonNullDirectSerializer>{})) | #include "tensorstore/context.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include <nlohmann/json.hpp>
#include "tensorstore/context_impl.h"
#include "tensorstore/context_resource_provider.h"
#include "tensorstore/internal/cache_key/cache_key.h"
#include "tensorstore/internal/cache_key/std_optional.h"
#include "tensorstore/internal/json_binding/bindable.h"
#include "tensorstore/internal/json_binding/json_binding.h"
#include "tensorstore/internal/json_binding/std_optional.h"
#include "tensorstore/internal/json_gtest.h"
#include "tensorstore/internal/testing/concurrent.h"
#include "tensorstore/json_serialization_options.h"
#include "tensorstore/json_serialization_options_base.h"
#include "tensorstore/serialization/serialization.h"
#include "tensorstore/serialization/std_tuple.h"
#include "tensorstore/serialization/test_util.h"
#include "tensorstore/util/result.h"
#include "tensorstore/util/status.h"
#include "tensorstore/util/status_testutil.h"
namespace tensorstore {
template <typename Provider>
auto* GetRawPointer(const Context::Resource<Provider>& resource) {
return resource.get();
}
}
namespace {
namespace jb = tensorstore::internal_json_binding;
using ::tensorstore::Context;
using ::tensorstore::IncludeDefaults;
using ::tensorstore::MatchesJson;
using ::tensorstore::MatchesStatus;
using ::tensorstore::Result;
using ::tensorstore::internal::ContextResourceCreationContext;
using ::tensorstore::internal::ContextResourceRegistration;
using ::tensorstore::internal::ContextResourceTraits;
using ::tensorstore::internal::ContextSpecBuilder;
using ::tensorstore::internal_testing::TestConcurrent;
using ::tensorstore::serialization::SerializationRoundTrip;
struct IntResource : public ContextResourceTraits<IntResource> {
struct Spec {
std::int64_t value;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value);
};
};
using Resource = std::int64_t;
static constexpr char id[] = "int_resource";
static Spec Default() { return {42}; }
static constexpr auto JsonBinder() {
return jb::Object(jb::Member(
"value", jb::Projection(&Spec::value,
jb::DefaultValue([](auto* v) { *v = 42; }))));
}
static Result<Resource> Create(Spec v,
ContextResourceCreationContext context) {
return v.value;
}
static Spec GetSpec(Resource v, const ContextSpecBuilder& builder) {
return {v};
}
};
struct IntConfigResource : public ContextResourceTraits<IntConfigResource> {
constexpr static bool config_only = true;
struct Spec {
std::int64_t value;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value);
};
};
using Resource = std::int64_t;
static constexpr char id[] = "int_config_resource";
static Spec Default() { return {42}; }
static constexpr auto JsonBinder() { return jb::Projection(&Spec::value); }
static Result<Resource> Create(Spec v,
ContextResourceCreationContext context) {
return v.value;
}
static Spec GetSpec(Resource v, const ContextSpecBuilder& builder) {
return {v};
}
};
struct StrongRefResource : public ContextResourceTraits<StrongRefResource> {
struct Spec {
std::int64_t value;
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value);
};
};
struct Resource {
size_t num_strong_references = 0;
};
static constexpr char id[] = "strongref";
static Spec Default() { return Spec{42}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(jb::Member(
"value", jb::Projection(&Spec::value, jb::DefaultValue([](auto* obj) {
*obj = 7;
}))));
}
static Result<Resource> Create(Spec v,
ContextResourceCreationContext context) {
return Resource{};
}
static Spec GetSpec(const Resource& v, const ContextSpecBuilder& builder) {
return {42};
}
static void AcquireContextReference(Resource& v) {
++v.num_strong_references;
}
static void ReleaseContextReference(Resource& v) {
--v.num_strong_references;
}
};
struct OptionalResource : public ContextResourceTraits<OptionalResource> {
using Spec = std::optional<size_t>;
using Resource = Spec;
static constexpr char id[] = "optional_resource";
static Spec Default() { return {}; }
static constexpr auto JsonBinder() {
namespace jb = tensorstore::internal_json_binding;
return jb::Object(jb::Member(
"limit", jb::DefaultInitializedValue(jb::Optional(
jb::Integer<size_t>(1), [] { return "shared"; }))));
}
static Result<Resource> Create(Spec v,
ContextResourceCreationContext context) {
return v;
}
static Spec GetSpec(Resource v, const ContextSpecBuilder& builder) {
return v;
}
};
const ContextResourceRegistration<IntResource> int_resource_registration;
const ContextResourceRegistration<IntConfigResource>
int_config_resource_registration;
const ContextResourceRegistration<StrongRefResource>
strong_ref_resource_registration;
const ContextResourceRegistration<OptionalResource>
optional_resource_registration;
TEST(IntResourceTest, InvalidDirectSpec) {
EXPECT_THAT(Context::Resource<IntResource>::FromJson(nullptr),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected non-null value, but received: null"));
EXPECT_THAT(Context::Resource<IntResource>::FromJson(3),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Expected object, but received: 3"));
EXPECT_THAT(
Context::Resource<IntResource>::FromJson("foo"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid reference to \"int_resource\" resource: \"foo\""));
}
TEST(IntResourceTest, Default) {
auto context = Context::Default();
EXPECT_EQ(context, context);
EXPECT_FALSE(context.parent());
auto context2 = Context::Default();
EXPECT_NE(context, context2);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource2,
context.GetResource(resource_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource3,
context2.GetResource(resource_spec));
EXPECT_EQ(resource, resource);
EXPECT_EQ(resource, resource2);
EXPECT_NE(resource, resource3);
EXPECT_THAT(context.GetResource<IntResource>(),
::testing::Optional(resource));
EXPECT_THAT(context.GetResource<IntResource>("int_resource"),
::testing::Optional(resource));
EXPECT_THAT(resource, ::testing::Pointee(42));
EXPECT_THAT(context.GetResource<IntResource>({{"value", 50}}),
::testing::Optional(::testing::Pointee(50)));
}
TEST(IntResourceTest, ValidDirectSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson({{"value", 7}}));
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, ValidIndirectSpecDefaultId) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, Context::Spec::FromJson({{"int_resource", {{"value", 7}}}}));
auto context = Context(spec);
auto resource_spec = Context::Resource<IntResource>::DefaultSpec();
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, ContextFromJson) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, Context::FromJson({{"int_resource", {{"value", 7}}}}));
EXPECT_THAT(context.GetResource<IntResource>(),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, ValidIndirectSpecDefault) {
auto context = Context::Default();
auto resource_spec = Context::Resource<IntResource>::DefaultSpec();
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(42)));
}
TEST(IntResourceTest, ValidIndirectSpecIdentifier) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, Context::Spec::FromJson({{"int_resource#x", {{"value", 7}}}}));
auto context = Context(spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource#x"));
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, UndefinedIndirectReference) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource#x"));
EXPECT_THAT(context.GetResource(resource_spec),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Resource not defined: \"int_resource#x\""));
}
TEST(IntResourceTest, SimpleReference) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
Context::Spec::FromJson({
{"int_resource#x", {{"value", 7}}},
{"int_resource#y", "int_resource#x"},
}));
auto context = Context(spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource#y"));
EXPECT_THAT(context.GetResource(resource_spec),
::testing::Optional(::testing::Pointee(7)));
}
TEST(IntResourceTest, ReferenceCycle1) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, Context::Spec::FromJson({{"int_resource", "int_resource"}}));
auto context = Context(spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource"));
EXPECT_THAT(context.GetResource(resource_spec),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Context resource reference cycle: "
"\"int_resource\":\"int_resource\""));
}
TEST(IntResourceTest, ReferenceCycle2) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
Context::Spec::FromJson({
{"int_resource#a", "int_resource#b"},
{"int_resource#b", "int_resource#a"},
}));
auto context = Context(spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson("int_resource#a"));
EXPECT_THAT(context.GetResource(resource_spec),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Context resource reference cycle: "
"\"int_resource#b\":\"int_resource#a\" -> "
"\"int_resource#a\":\"int_resource#b\""));
}
TEST(IntResourceTest, Inherit) {
const ::nlohmann::json json_spec1{
{"int_resource", {{"value", 7}}},
{"int_resource#a", {{"value", 9}}},
{"int_resource#d", {{"value", 42}}},
{"int_resource#c", nullptr},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec1,
Context::Spec::FromJson(json_spec1));
EXPECT_THAT(spec1.ToJson(IncludeDefaults{false}),
::testing::Optional(MatchesJson({
{"int_resource", {{"value", 7}}},
{"int_resource#a", {{"value", 9}}},
{"int_resource#d", ::nlohmann::json::object_t{}},
{"int_resource#c", nullptr},
})));
EXPECT_THAT(spec1.ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson({
{"int_resource", {{"value", 7}}},
{"int_resource#a", {{"value", 9}}},
{"int_resource#d", {{"value", 42}}},
{"int_resource#c", nullptr},
})));
::nlohmann::json json_spec2{
{"int_resource", {{"value", 8}}},
{"int_resource#b", nullptr},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec2,
Context::Spec::FromJson(json_spec2));
auto context1 = Context(spec1);
auto context2 = Context(spec2, context1);
EXPECT_EQ(context1, context2.parent());
EXPECT_THAT(context1.spec().ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson(json_spec1)));
EXPECT_THAT(context2.spec().ToJson(),
::testing::Optional(MatchesJson(json_spec2)));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource1,
context2.GetResource(
Context::Resource<IntResource>::FromJson("int_resource").value()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2,
context2.GetResource(
Context::Resource<IntResource>::FromJson("int_resource#a").value()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource3,
context2.GetResource(
Context::Resource<IntResource>::FromJson("int_resource#b").value()));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource4,
context2.GetResource(
Context::Resource<IntResource>::FromJson("int_resource#c").value()));
EXPECT_EQ(8, *resource1);
EXPECT_EQ(9, *resource2);
EXPECT_EQ(7, *resource3);
EXPECT_EQ(42, *resource4);
}
TEST(IntResourceTest, Unknown) {
EXPECT_THAT(Context::Spec::FromJson({
{"foo", {{"value", 7}}},
}),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Invalid context resource identifier: \"foo\""));
}
TEST(IntConfigResourceTest, ContextSpec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, Context::FromJson({{"int_config_resource", 111}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource1,
context.GetResource<IntConfigResource>());
EXPECT_THAT(resource1, ::testing::Pointee(111));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource2,
context.GetResource<IntConfigResource>(222));
EXPECT_THAT(resource2, ::testing::Pointee(222));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource3,
context.GetResource<IntConfigResource>(222));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource4,
context.GetResource<IntConfigResource>(111));
std::string cache_key1, cache_key2, cache_key3, cache_key4;
tensorstore::internal::EncodeCacheKey(&cache_key1, resource1);
tensorstore::internal::EncodeCacheKey(&cache_key2, resource2);
tensorstore::internal::EncodeCacheKey(&cache_key3, resource3);
tensorstore::internal::EncodeCacheKey(&cache_key4, resource4);
EXPECT_EQ(cache_key1, cache_key4);
EXPECT_EQ(cache_key2, cache_key3);
}
TEST(StrongRefResourceTest, DirectSpec) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec, Context::Resource<StrongRefResource>::FromJson(
::nlohmann::json::object_t{}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_EQ(0, resource->num_strong_references);
}
TEST(StrongRefResourceTest, IndirectSpec) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec,
Context::Spec::FromJson({{"strongref", ::nlohmann::json::object_t{}}}));
auto context = Context(spec);
auto resource_spec = Context::Resource<StrongRefResource>::DefaultSpec();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
EXPECT_EQ(1, resource->num_strong_references);
context = Context();
EXPECT_EQ(0, resource->num_strong_references);
}
TEST(ContextSpecBuilderTest, Simple) {
auto spec =
Context::Spec::FromJson({{"int_resource", {{"value", 5}}}}).value();
auto context = Context(spec);
auto resource_spec = Context::Resource<IntResource>::DefaultSpec();
auto resource = context.GetResource(resource_spec).value();
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec = builder.AddResource(resource);
}
EXPECT_THAT(
new_spec.ToJson(),
::testing::Optional(MatchesJson({{"int_resource", {{"value", 5}}}})));
EXPECT_THAT(new_resource_spec.ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson("int_resource")));
auto new_context = Context(new_spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_resource,
new_context.GetResource(new_resource_spec));
EXPECT_EQ(5, *new_resource);
}
TEST(ContextSpecBuilderTest, Default) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource, Context::Default().GetResource<IntResource>());
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec = builder.AddResource(resource);
}
EXPECT_THAT(
jb::ToJson(new_spec,
tensorstore::internal::ContextSpecDefaultableJsonBinder,
IncludeDefaults{false}),
::testing::Optional(
MatchesJson({{"int_resource", ::nlohmann::json::object_t()}})));
EXPECT_THAT(
new_spec.ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson({{"int_resource", {{"value", 42}}}})));
EXPECT_THAT(new_resource_spec.ToJson(IncludeDefaults{true}),
::testing::Optional(MatchesJson("int_resource")));
auto new_context = Context(new_spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto new_resource,
new_context.GetResource(new_resource_spec));
EXPECT_THAT(new_resource, ::testing::Pointee(42));
}
TEST(ContextSpecBuilderTest, MultipleContexts) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec1, Context::Spec::FromJson({{"int_resource", {{"value", 5}}}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec2, Context::Spec::FromJson({{"int_resource", {{"value", 6}}}}));
auto context1 = Context(spec1);
auto context2 = Context(spec2);
auto resource_spec = Context::Resource<IntResource>::DefaultSpec();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource1,
context1.GetResource(resource_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource2,
context2.GetResource(resource_spec));
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec1;
Context::Resource<IntResource> new_resource_spec2;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec1 = builder.AddResource(resource1);
new_resource_spec2 = builder.AddResource(resource2);
}
EXPECT_THAT(new_spec.ToJson(), ::testing::Optional(MatchesJson({
{"int_resource#0", {{"value", 5}}},
{"int_resource#1", {{"value", 6}}},
})));
EXPECT_EQ("int_resource#0", new_resource_spec1.ToJson());
EXPECT_EQ("int_resource#1", new_resource_spec2.ToJson());
}
TEST(ContextSpecBuilderTest, Inline) {
auto context = Context::Default();
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<IntResource>::FromJson({{"value", 5}}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec = builder.AddResource(resource);
}
EXPECT_THAT(new_spec.ToJson(),
::testing::Optional(MatchesJson(::nlohmann::json::object_t())));
EXPECT_THAT(new_resource_spec.ToJson(),
::testing::Optional(MatchesJson({{"value", 5}})));
}
TEST(ContextSpecBuilderTest, InlineEqualToDefault) {
auto context = Context::Default();
auto resource_spec =
Context::Resource<IntResource>::FromJson({{"value", 42}}).value();
auto resource = context.GetResource(resource_spec).value();
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec = builder.AddResource(resource);
}
EXPECT_EQ(::nlohmann::json({}), new_spec.ToJson());
EXPECT_EQ(::nlohmann::json::object_t{},
new_resource_spec.ToJson(IncludeDefaults{false}));
}
TEST(ContextSpecBuilderTest, InlineShared) {
auto context = Context::Default();
auto resource_spec =
Context::Resource<IntResource>::FromJson({{"value", 5}}).value();
auto resource = context.GetResource(resource_spec).value();
Context::Spec new_spec;
Context::Resource<IntResource> new_resource_spec1;
Context::Resource<IntResource> new_resource_spec2;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec1 = builder.AddResource(resource);
new_resource_spec2 = builder.AddResource(resource);
}
EXPECT_EQ(::nlohmann::json({{"int_resource#0", {{"value", 5}}}}),
new_spec.ToJson());
EXPECT_EQ("int_resource#0", new_resource_spec1.ToJson());
EXPECT_EQ("int_resource#0", new_resource_spec2.ToJson());
}
TEST(ContextSpecBuilderTest, ExcludeDefaultsJson) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, Context::FromJson({
{"optional_resource", {{"limit", "shared"}}},
{"optional_resource#a", {{"limit", 5}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource1,
context.GetResource<OptionalResource>());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2,
context.GetResource<OptionalResource>("optional_resource#a"));
Context::Spec new_spec;
Context::Resource<OptionalResource> new_resource_spec1;
Context::Resource<OptionalResource> new_resource_spec2;
{
auto builder = ContextSpecBuilder::Make();
new_spec = builder.spec();
new_resource_spec1 = builder.AddResource(resource1);
new_resource_spec2 = builder.AddResource(resource2);
}
EXPECT_THAT(new_spec.ToJson(tensorstore::IncludeDefaults{false}),
::testing::Optional(MatchesJson({
{"optional_resource#a", {{"limit", 5}}},
{"optional_resource", ::nlohmann::json::object_t()},
})));
EXPECT_THAT(new_spec.ToJson(tensorstore::IncludeDefaults{true}),
::testing::Optional(MatchesJson({
{"optional_resource#a", {{"limit", 5}}},
{"optional_resource", {{"limit", "shared"}}},
})));
}
TEST(ContextTest, WeakCreator) {
using ::tensorstore::internal_context::Access;
using ::tensorstore::internal_context::GetCreator;
using ::tensorstore::internal_context::ResourceImplBase;
const ::nlohmann::json json_spec1{
{"int_resource", {{"value", 7}}},
{"int_resource#a", {{"value", 9}}},
{"int_resource#d", {{"value", 42}}},
{"int_resource#c", nullptr},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec1,
Context::Spec::FromJson(json_spec1));
::nlohmann::json json_spec2{
{"int_resource", {{"value", 8}}},
{"int_resource#b", nullptr},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec2,
Context::Spec::FromJson(json_spec2));
auto context1 = Context(spec1);
auto context2 = Context(spec2, context1);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource1,
context1.GetResource<IntResource>());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource2,
context2.GetResource<IntResource>());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2_a, context2.GetResource<IntResource>("int_resource#a"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2_b, context2.GetResource<IntResource>("int_resource#b"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource2_c, context2.GetResource<IntResource>("int_resource#c"));
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource1))));
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2_a))));
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2_b))));
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2_c))));
EXPECT_EQ(
Access::impl(context2),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2))));
context2 = Context();
EXPECT_EQ(
Access::impl(context1),
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource1))));
EXPECT_FALSE(
GetCreator(static_cast<ResourceImplBase&>(*Access::impl(resource2))));
}
struct NestedResource : public ContextResourceTraits<NestedResource> {
struct Spec {
int value;
Context::Resource<NestedResource> parent;
int GetTotal() const {
int total = value;
if (parent.has_resource()) total += parent->GetTotal();
return total;
}
constexpr static auto ApplyMembers = [](auto&& x, auto f) {
return f(x.value, x.parent);
};
};
using Resource = Spec;
static constexpr char id[] = "nested_resource";
static Spec Default() { return {42}; }
static constexpr auto JsonBinder() {
return jb::Object(
jb::Member("value",
jb::Projection(&Spec::value,
jb::DefaultValue([](auto* v) { *v = 42; }))),
jb::Member(
"parent",
jb::Projection(
&Spec::parent,
jb::DefaultInitializedPredicate<jb::kNeverIncludeDefaults>(
[](auto* obj) { return !obj->valid(); }))));
}
static Result<Resource> Create(const Spec& spec,
ContextResourceCreationContext context) {
Resource resource = spec;
TENSORSTORE_RETURN_IF_ERROR(resource.parent.BindContext(context));
return resource;
}
static Spec GetSpec(const Resource& resource,
const ContextSpecBuilder& builder) {
Spec spec = resource;
UnbindContext(spec, builder);
return spec;
}
static void UnbindContext(Spec& spec, const ContextSpecBuilder& builder) {
spec.parent.UnbindContext(builder);
}
};
const ContextResourceRegistration<NestedResource> nested_resource_registration;
TEST(NestedResourceTest, Basic) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context, Context::FromJson({
{"nested_resource#a", {{"value", 1}}},
{"nested_resource#b",
{{"value", 3}, {"parent", "nested_resource#a"}}},
{"nested_resource#c",
{{"value", 5}, {"parent", "nested_resource#b"}}},
{"nested_resource#d",
{{"value", 10}, {"parent", "nested_resource#e"}}},
{"nested_resource#e",
{{"value", 15}, {"parent", "nested_resource#d"}}},
}));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto a, context.GetResource<NestedResource>("nested_resource#a"));
EXPECT_FALSE(a->parent.valid());
EXPECT_EQ(1, a->GetTotal());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto b, context.GetResource<NestedResource>("nested_resource#b"));
EXPECT_EQ(a, b->parent);
EXPECT_EQ(4, b->GetTotal());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto c, context.GetResource<NestedResource>("nested_resource#c"));
EXPECT_EQ(b, c->parent);
EXPECT_EQ(9, c->GetTotal());
EXPECT_THAT(
context.GetResource<NestedResource>("nested_resource#d"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Context resource reference cycle: "
"\"nested_resource#d\" -> "
"\"nested_resource#d\":"
"\\{\"parent\":\"nested_resource#e\",\"value\":10\\} -> "
"\"nested_resource#e\" -> "
"\"nested_resource#e\":"
"\\{\"parent\":\"nested_resource#d\",\"value\":15\\}"));
EXPECT_THAT(context.GetResource<NestedResource>("nested_resource#e"),
MatchesStatus(absl::StatusCode::kInvalidArgument,
"Context resource reference cycle: .*"));
}
TEST(ContextSpecBuilderTest, PartiallyBound) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto context_spec, Context::Spec::FromJson({
{"nested_resource#a", {{"value", 2}}},
{"nested_resource#b",
{{"value", 3}, {"parent", "nested_resource#a"}}},
}));
auto context = Context(context_spec);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto resource_spec,
Context::Resource<NestedResource>::FromJson("nested_resource#b"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto resource,
context.GetResource(resource_spec));
Context::Spec new_spec;
Context::Resource<NestedResource> new_resource_spec1;
Context::Resource<NestedResource> new_resource_spec2;
{
auto builder = ContextSpecBuilder::Make({}, context_spec);
new_spec = builder.spec();
new_resource_spec1 = builder.AddResource(resource_spec);
new_resource_spec2 = builder.AddResource(resource);
}
EXPECT_THAT(new_spec.ToJson(),
::testing::Optional(MatchesJson({
{"nested_resource#a", {{"value", 2}}},
{"nested_resource#b",
{{"value", 3}, {"parent", "nested_resource#a"}}},
{"nested_resource#1", {{"value", 2}}},
{"nested_resource#0",
{{"value", 3}, {"parent", "nested_resource#1"}}},
})));
EXPECT_THAT(new_resource_spec1.ToJson(),
::testing::Optional(MatchesJson("nested_resource#b")));
EXPECT_THAT(new_resource_spec2.ToJson(),
::testing::Optional(MatchesJson("nested_resource#0")));
}
TEST(ContextSpecSerializationTest, Empty) {
Context::Spec spec;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec_copy,
SerializationRoundTrip(spec));
EXPECT_THAT(spec.ToJson(),
::testing::Optional(MatchesJson(::nlohmann::json::object_t())));
}
TEST(ContextSpecSerializationTest, NonEmpty) {
::nlohmann::json json_spec{
{"int_resource", ::nlohmann::json::object_t()},
{"int_resource#a", "int_resource"},
{"int_resource#b", ::nlohmann::json::object_t()},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
Context::Spec::FromJson(json_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec_copy,
SerializationRoundTrip(spec));
EXPECT_THAT(spec.ToJson(), ::testing::Optional(MatchesJson(json_spec)));
}
TEST(ContextSerializationTest, Null) {
Context context;
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto context_copy,
SerializationRoundTrip(context));
EXPECT_FALSE(context);
}
TEST(ContextSerializationTest, NonNull) {
::nlohmann::json parent_json_spec{
{"int_resource#c", ::nlohmann::json::object_t()},
};
::nlohmann::json child_json_spec{
{"int_resource", ::nlohmann::json::object_t()},
{"int_resource#a", "int_resource"},
{"int_resource#b", ::nlohmann::json::object_t()},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto parent_spec,
Context::Spec::FromJson(parent_json_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto child_spec,
Context::Spec::FromJson(child_json_spec));
Context parent_context(parent_spec);
Context child_context(child_spec, parent_context);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto child_context_copy,
SerializationRoundTrip(child_context));
EXPECT_THAT(child_context_copy.spec().ToJson(),
::testing::Optional(child_json_spec));
EXPECT_THAT(child_context_copy.parent().spec().ToJson(),
::testing::Optional(parent_json_spec));
EXPECT_FALSE(child_context_copy.parent().parent());
}
TEST(ContextSerializationTest, Shared) {
::nlohmann::json parent_json_spec{
{"int_resource#c", {{"value", 7}}},
};
::nlohmann::json child_json_spec{
{"int_resource", {{"value", 5}}},
{"int_resource#a", "int_resource"},
{"int_resource#b", {{"value", 6}}},
};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto parent_spec,
Context::Spec::FromJson(parent_json_spec));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto child_spec,
Context::Spec::FromJson(child_json_spec));
Context parent_context(parent_spec);
Context child_context(child_spec, parent_context);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto res_parent,
parent_context.GetResource<IntResource>());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto res_child,
child_context.GetResource<IntResource>());
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto res_a, child_context.GetResource<IntResource>("int_resource#a"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto res_b, child_context.GetResource<IntResource>("int_resource#b"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto res_c_child,
child_context.GetResource<IntResource>("int_resource#c"));
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto res_c_parent,
parent_context.GetResource<IntResource>("int_resource#c"));
EXPECT_EQ(res_child, res_a);
EXPECT_EQ(res_c_child, res_c_parent);
EXPECT_NE(res_child, res_parent);
EXPECT_NE(res_a, res_b);
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto copy, SerializationRoundTrip(std::make_tuple(
parent_context, child_context, res_parent, res_child,
res_a, res_b, res_c_child, res_c_parent)));
auto [copy_parent_context, copy_child_context, copy_res_parent,
copy_res_child, copy_res_a, copy_res_b, copy_res_c_child,
copy_res_c_parent] = copy;
EXPECT_EQ(copy_parent_context, copy_child_context.parent());
EXPECT_THAT(copy_child_context.GetResource<IntResource>("int_resource#a"),
::testing::Optional(copy_res_a));
EXPECT_THAT(copy_child_context.GetResource<IntResource>("int_resource#b"),
::testing::Optional(copy_res_b));
EXPECT_THAT(copy_child_context.GetResource<IntResource>("int_resource#c"),
::testing::Optional(copy_res_c_child));
EXPECT_THAT(copy_parent_context.GetResource<IntResource>("int_resource#c"),
::testing::Optional(copy_res_c_parent));
}
TEST(ContextTest, ConcurrentCreateSingleResource) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, Context::Spec::FromJson({{"int_resource", {{"value", 5}}}}));
Context context;
TestConcurrent<3>(
100,
[&] { context = Context(spec); },
[&] {},
[&](auto i) {
TENSORSTORE_EXPECT_OK(context.GetResource<IntResource>());
});
}
TEST(ContextTest, ConcurrentCreateMultipleResources) {
std::vector<std::string> resource_keys{"int_resource#a", "int_resource#b"};
TENSORSTORE_ASSERT_OK_AND_ASSIGN(auto spec,
Context::Spec::FromJson({
{resource_keys[0], {{"value", 5}}},
{resource_keys[1], {{"value", 6}}},
}));
Context context;
TestConcurrent<4>(
100,
[&] { context = Context(spec); },
[&] {},
[&](auto i) {
TENSORSTORE_EXPECT_OK(context.GetResource<IntResource>(
resource_keys[i % resource_keys.size()]));
});
}
TEST(ContextTest, ConcurrentCreateInParent) {
TENSORSTORE_ASSERT_OK_AND_ASSIGN(
auto spec, Context::Spec::FromJson({{"int_resource", {{"value", 5}}}}));
Context context;
TestConcurrent<3>(
100,
[&] { context = Context(spec); },
[&] {},
[&](auto i) {
Context child({}, context);
TENSORSTORE_EXPECT_OK(child.GetResource<IntResource>());
});
}
} | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/context.cc | https://github.com/google/tensorstore/blob/4f887a6430414cd6088e1743555015b10f116d50/tensorstore/context_test.cc | 4f887a6430414cd6088e1743555015b10f116d50 |
4ada8775-bd63-45fb-95ee-e5da7caaac97 | cpp | tensorflow/tensorflow | span | tensorflow/core/tfrt/mlrt/bytecode/span.h | tensorflow/core/tfrt/mlrt/bytecode/span_test.cc | #ifndef TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_SPAN_H_
#define TENSORFLOW_CORE_TFRT_MLRT_BYTECODE_SPAN_H_
#include <cstdint>
#include <vector>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
namespace mlrt {
namespace bc {
template <typename T>
class Span {
public:
using value_type = T;
using iterator = ReadIterator<T>;
using const_iterator = iterator;
Span() = default;
Span(const char* data, size_t size) : data_(data), size_(size) {}
template <typename SizeType>
Span(const Vector<T, SizeType>& vec)
: Span(vec.data(), vec.size()) {}
Span(const String& vec)
: Span(vec.data(), vec.size()) {}
Span(const std::vector<T>& vec)
: Span(reinterpret_cast<const char*>(vec.data()), vec.size()) {}
const char* data() const { return data_; }
const char* data(size_t index) const { return data_ + index * sizeof(T); }
iterator begin() const { return iterator(data_); }
iterator end() const { return iterator(data_ + size_ * sizeof(T)); }
T back() const {
DCHECK_GT(size_, 0);
return *iterator(data_ + (size_ - 1) * sizeof(T));
}
T operator[](size_t index) const {
DCHECK_LT(index, size());
auto iter = begin();
iter += index;
return *iter;
}
size_t size() const { return size_; }
bool empty() const { return size_ == 0; }
Span drop_front(size_t num = 1) const {
auto beg = begin();
beg += num;
DCHECK_GE(size(), num);
return Span(beg.data(), size() - num);
}
Span drop_back(size_t num = 1) const {
DCHECK_GE(size(), num);
return Span(data(), size() - num);
}
private:
const char* data_ = nullptr;
size_t size_ = 0;
};
}
}
#endif | #include "tensorflow/core/tfrt/mlrt/bytecode/span.h"
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/core/tfrt/mlrt/bytecode/bytecode.h"
namespace mlrt {
namespace bc {
namespace {
TEST(SpanTest, SpanOfTrivial) {
Buffer buffer;
Allocator alloc(&buffer);
auto ctor = New<Vector<uint32_t>>(&alloc, 4);
for (int i = 0; i < 4; ++i) {
ctor.ConstructAt(i, i);
}
Vector<uint32_t> vec(buffer.Get(ctor.address()));
Span<uint32_t> span(vec);
ASSERT_EQ(span.size(), 4);
EXPECT_EQ(span[0], 0);
EXPECT_EQ(span[1], 1);
EXPECT_EQ(span[2], 2);
EXPECT_EQ(span[3], 3);
EXPECT_THAT(span, testing::ElementsAreArray({0, 1, 2, 3}));
}
TEST(BefTest, SpanOfVector) {
Buffer buffer;
Allocator alloc(&buffer);
using T = Vector<uint32_t>;
using V = Vector<T>;
auto vctor = New<V>(&alloc, 3);
{
auto tctor = vctor.ConstructAt(0, 2);
tctor.ConstructAt(0, 0);
tctor.ConstructAt(1, 1);
}
{
auto tctor = vctor.ConstructAt(1, 1);
tctor.ConstructAt(0, 2);
}
vctor.ConstructAt(2, 0);
V v(buffer.Get(vctor.address()));
Span<T> span(v);
T t0 = span[0];
ASSERT_EQ(t0.size(), 2);
EXPECT_EQ(t0[0], 0);
EXPECT_EQ(t0[1], 1);
EXPECT_THAT(t0, testing::ElementsAreArray({0, 1}));
T t1 = span[1];
ASSERT_EQ(t1.size(), 1);
EXPECT_EQ(t1[0], 2);
EXPECT_THAT(t1, testing::ElementsAreArray({2}));
T t2 = span[2];
ASSERT_EQ(t2.size(), 0);
}
TEST(SpanTest, SpanOfStdVectorTrivial) {
std::vector<uint32_t> vec = {0, 1, 2, 3};
Span<uint32_t> span(vec);
EXPECT_THAT(span, testing::ElementsAreArray({0, 1, 2, 3}));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/span.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/tfrt/mlrt/bytecode/span_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
c6506a74-564b-4421-9121-be913bba8ef3 | cpp | tensorflow/tensorflow | dfs_hlo_visitor_with_default | third_party/xla/xla/hlo/ir/dfs_hlo_visitor_with_default.h | third_party/xla/xla/service/dfs_hlo_visitor_with_default_test.cc | #ifndef XLA_HLO_IR_DFS_HLO_VISITOR_WITH_DEFAULT_H_
#define XLA_HLO_IR_DFS_HLO_VISITOR_WITH_DEFAULT_H_
#include <memory>
#include <utility>
#include "absl/base/optimization.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "tsl/platform/status.h"
namespace xla {
template <typename HloInstructionPtr>
class DfsHloVisitorWithDefaultBase
: public DfsHloVisitorBase<HloInstructionPtr> {
public:
DfsHloVisitorWithDefaultBase() = default;
~DfsHloVisitorWithDefaultBase() override = default;
virtual absl::Status DefaultAction(HloInstructionPtr hlo_instruction) = 0;
absl::Status HandleElementwiseUnary(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleElementwiseBinary(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleBatchNormTraining(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleBatchNormInference(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleBatchNormGrad(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleClamp(HloInstructionPtr clamp) override {
return DefaultAction(clamp);
}
absl::Status HandleConcatenate(HloInstructionPtr concatenate) override {
return DefaultAction(concatenate);
}
absl::Status HandleSelect(HloInstructionPtr select) override {
return DefaultAction(select);
}
absl::Status HandleDot(HloInstructionPtr dot) override {
return DefaultAction(dot);
}
absl::Status HandleConvolution(HloInstructionPtr convolution) override {
return DefaultAction(convolution);
}
absl::Status HandleFft(HloInstructionPtr fft) override {
return DefaultAction(fft);
}
absl::Status HandleTriangularSolve(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCholesky(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleOptimizationBarrier(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleAllGather(HloInstructionPtr crs) override {
return DefaultAction(crs);
}
absl::Status HandleAllGatherStart(HloInstructionPtr crs) override {
return DefaultAction(crs);
}
absl::Status HandleAllGatherDone(HloInstructionPtr crs) override {
return DefaultAction(crs);
}
absl::Status HandleAllReduce(HloInstructionPtr crs) override {
return DefaultAction(crs);
}
absl::Status HandleReduceScatter(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleAllReduceStart(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleAllReduceDone(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleAllToAll(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCollectiveBroadcast(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCollectivePermute(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCollectivePermuteStart(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleCollectivePermuteDone(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleReplicaId(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandlePartitionId(HloInstructionPtr hlo) override {
return DefaultAction(hlo);
}
absl::Status HandleRng(HloInstructionPtr random) override {
return DefaultAction(random);
}
absl::Status HandleRngBitGenerator(HloInstructionPtr random) override {
return DefaultAction(random);
}
absl::Status HandleRngGetAndUpdateState(HloInstructionPtr random) override {
return DefaultAction(random);
}
absl::Status HandleInfeed(HloInstructionPtr infeed) override {
return DefaultAction(infeed);
}
absl::Status HandleOutfeed(HloInstructionPtr outfeed) override {
return DefaultAction(outfeed);
}
absl::Status HandleReverse(HloInstructionPtr reverse) override {
return DefaultAction(reverse);
}
absl::Status HandleSort(HloInstructionPtr sort) override {
return DefaultAction(sort);
}
absl::Status HandleConstant(HloInstructionPtr constant) override {
return DefaultAction(constant);
}
absl::Status HandleIota(HloInstructionPtr iota) override {
return DefaultAction(iota);
}
absl::Status HandleGetTupleElement(
HloInstructionPtr get_tuple_element) override {
return DefaultAction(get_tuple_element);
}
absl::Status HandleParameter(HloInstructionPtr parameter) override {
return DefaultAction(parameter);
}
absl::Status HandleFusion(HloInstructionPtr fusion) override {
return DefaultAction(fusion);
}
absl::Status HandleCall(HloInstructionPtr call) override {
return DefaultAction(call);
}
absl::Status HandleCustomCall(HloInstructionPtr custom_call) override {
return DefaultAction(custom_call);
}
absl::Status HandleSlice(HloInstructionPtr slice) override {
return DefaultAction(slice);
}
absl::Status HandleDynamicSlice(HloInstructionPtr dynamic_slice) override {
return DefaultAction(dynamic_slice);
}
absl::Status HandleDynamicUpdateSlice(
HloInstructionPtr dynamic_update_slice) override {
return DefaultAction(dynamic_update_slice);
}
absl::Status HandleTuple(HloInstructionPtr tuple) override {
return DefaultAction(tuple);
}
absl::Status HandleMap(HloInstructionPtr map) override {
return DefaultAction(map);
}
absl::Status HandleReduce(HloInstructionPtr reduce) override {
return DefaultAction(reduce);
}
absl::Status HandleReduceWindow(HloInstructionPtr reduce_window) override {
return DefaultAction(reduce_window);
}
absl::Status HandleSelectAndScatter(
HloInstructionPtr select_and_scatter) override {
return DefaultAction(select_and_scatter);
}
absl::Status HandleBitcast(HloInstructionPtr bitcast) override {
return DefaultAction(bitcast);
}
absl::Status HandleBroadcast(HloInstructionPtr broadcast) override {
return DefaultAction(broadcast);
}
absl::Status HandlePad(HloInstructionPtr pad) override {
return DefaultAction(pad);
}
absl::Status HandleDynamicReshape(
HloInstructionPtr dynamic_reshape) override {
return DefaultAction(dynamic_reshape);
}
absl::Status HandleReshape(HloInstructionPtr reshape) override {
return DefaultAction(reshape);
}
absl::Status HandleTranspose(HloInstructionPtr transpose) override {
return DefaultAction(transpose);
}
absl::Status HandleWhile(HloInstructionPtr xla_while) override {
return DefaultAction(xla_while);
}
absl::Status HandleConditional(HloInstructionPtr conditional) override {
return DefaultAction(conditional);
}
absl::Status HandleAsyncStart(HloInstructionPtr async_start) override {
return DefaultAction(async_start);
}
absl::Status HandleAsyncUpdate(HloInstructionPtr async_update) override {
return DefaultAction(async_update);
}
absl::Status HandleAsyncDone(HloInstructionPtr async_done) override {
return DefaultAction(async_done);
}
absl::Status HandleCopyStart(HloInstructionPtr copy_start) override {
return DefaultAction(copy_start);
}
absl::Status HandleCopyDone(HloInstructionPtr copy_done) override {
return DefaultAction(copy_done);
}
absl::Status HandleRecv(HloInstructionPtr recv) override {
return DefaultAction(recv);
}
absl::Status HandleRecvDone(HloInstructionPtr recv_done) override {
return DefaultAction(recv_done);
}
absl::Status HandleSend(HloInstructionPtr send) override {
return DefaultAction(send);
}
absl::Status HandleTopK(HloInstructionPtr topk) override {
return DefaultAction(topk);
}
absl::Status HandleSendDone(HloInstructionPtr send_done) override {
return DefaultAction(send_done);
}
absl::Status HandleGather(HloInstructionPtr gather) override {
return DefaultAction(gather);
}
absl::Status HandleScatter(HloInstructionPtr scatter) override {
return DefaultAction(scatter);
}
absl::Status HandleAfterAll(HloInstructionPtr token) override {
return DefaultAction(token);
}
absl::Status HandleGetDimensionSize(HloInstructionPtr get_size) override {
return DefaultAction(get_size);
}
absl::Status HandleSetDimensionSize(HloInstructionPtr get_size) override {
return DefaultAction(get_size);
}
absl::Status HandleAddDependency(HloInstructionPtr add_dependency) override {
return DefaultAction(add_dependency);
}
absl::Status FinishVisit(HloInstructionPtr ) override {
return absl::OkStatus();
}
private:
DfsHloVisitorWithDefaultBase(const DfsHloVisitorWithDefaultBase&) = delete;
DfsHloVisitorWithDefaultBase& operator=(const DfsHloVisitorWithDefaultBase&) =
delete;
};
using DfsHloVisitorWithDefault = DfsHloVisitorWithDefaultBase<HloInstruction*>;
using ConstDfsHloVisitorWithDefault =
DfsHloVisitorWithDefaultBase<const HloInstruction*>;
class DfsHloRewriteVisitor : public DfsHloVisitorWithDefault {
public:
absl::StatusOr<bool> RunOnModule(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads = {}) {
absl::Status status;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
status = computation->Accept(this);
if (ABSL_PREDICT_FALSE(!status.ok())) return status;
}
return changed();
}
absl::Status DefaultAction(HloInstruction* ) override {
return absl::OkStatus();
}
bool changed() const { return changed_; }
protected:
absl::Status ReplaceWithNewInstruction(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction) {
VLOG(3) << "Replacing instruction:" << "\n old: "
<< old_instruction->ToString()
<< "\n new: " << new_instruction->ToString();
absl::Status status = old_instruction->parent()->ReplaceWithNewInstruction(
old_instruction, std::move(new_instruction));
if (ABSL_PREDICT_TRUE(status.ok())) {
changed_ = true;
}
return status;
}
absl::StatusOr<bool> ReplaceInstruction(HloInstruction* old_instruction,
HloInstruction* new_instruction,
bool preserve_sharding) {
VLOG(3) << "Replacing instruction:" << "\n old: "
<< old_instruction->ToString()
<< "\n new: " << new_instruction->ToString();
absl::StatusOr<bool> changed_or =
old_instruction->parent()->ReplaceInstruction(
old_instruction, new_instruction, preserve_sharding);
if (ABSL_PREDICT_TRUE(changed_or.ok())) {
changed_ |= changed_or.value();
}
return changed_or;
}
absl::Status ReplaceInstruction(HloInstruction* old_instruction,
HloInstruction* new_instruction) {
absl::StatusOr<bool> changed_or =
ReplaceInstruction(old_instruction, new_instruction,
false);
if (ABSL_PREDICT_TRUE(changed_or.ok())) {
DCHECK(changed_or.value());
}
return changed_or.status();
}
void MarkAsChanged() { changed_ = true; }
private:
bool changed_ = false;
};
template <typename HloInstructionPtr>
class FunctionVisitorBase
: public DfsHloVisitorWithDefaultBase<HloInstructionPtr> {
public:
explicit FunctionVisitorBase(
std::function<absl::Status(HloInstructionPtr)> visitor_func)
: visitor_func_(std::move(visitor_func)) {}
absl::Status DefaultAction(HloInstructionPtr hlo_instruction) override {
return visitor_func_(hlo_instruction);
}
private:
FunctionVisitorBase(const FunctionVisitorBase&) = delete;
FunctionVisitorBase& operator=(const FunctionVisitorBase&) = delete;
std::function<absl::Status(HloInstructionPtr)> visitor_func_;
};
using FunctionVisitor = FunctionVisitorBase<HloInstruction*>;
using ConstFunctionVisitor = FunctionVisitorBase<const HloInstruction*>;
}
#endif | #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_runner.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
class DfsHloVisitorWithDefaultTest : public HloTestBase {};
TEST_F(DfsHloVisitorWithDefaultTest, DefaultElementwiseTest) {
class ElementwiseTestVisitor : public DfsHloVisitorWithDefault {
public:
absl::Status DefaultAction(HloInstruction* hlo) override {
TF_RET_CHECK(!(hlo->IsElementwise() && hlo->operand_count() == 2))
<< hlo->ToString();
TF_RET_CHECK(!(hlo->IsElementwise() && hlo->operand_count() == 1))
<< hlo->ToString();
return absl::OkStatus();
}
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override {
TF_RET_CHECK(hlo->IsElementwise() && hlo->operand_count() == 2)
<< hlo->ToString();
return absl::OkStatus();
}
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override {
TF_RET_CHECK(hlo->IsElementwise() && hlo->operand_count() == 1)
<< hlo->ToString();
return absl::OkStatus();
}
};
const std::string& hlo_string = R"(
HloModule TestModule
ENTRY TestComputation {
arg = f32[] parameter(0)
tuple = (f32[]) tuple(arg)
gte = f32[] get-tuple-element(tuple), index=0
abs = f32[] abs(arg)
add = f32[] add(arg, gte)
broadcast = f32[42] broadcast(add), dimensions={}
slice = f32[1] slice(broadcast), slice={[1:2]}
copy = f32[] copy(arg)
eq = pred[] compare(arg, gte), direction=EQ
neg = f32[] negate(arg)
ROOT convert = f64[] convert(f32[] arg)
})";
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(hlo_string).value();
ElementwiseTestVisitor visitor;
TF_EXPECT_OK(module->entry_computation()->Accept(&visitor));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/dfs_hlo_visitor_with_default.h | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dfs_hlo_visitor_with_default_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
ac9dcc13-70b4-4bb8-b8e3-89f9de54e3a5 | cpp | tensorflow/tensorflow | nccl_manager | tensorflow/core/nccl/nccl_manager.cc | tensorflow/core/nccl/nccl_manager_test.cc | #include "tensorflow/core/nccl/nccl_manager.h"
#include <utility>
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include "absl/base/call_once.h"
#include "tensorflow/core/framework/types.h"
#include "tensorflow/core/lib/core/refcount.h"
#include "tensorflow/core/lib/core/threadpool.h"
#include "tensorflow/core/platform/blocking_counter.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
#include "tensorflow/core/profiler/lib/annotated_traceme.h"
#include "tensorflow/core/profiler/lib/connected_traceme.h"
#include "tensorflow/core/profiler/lib/traceme.h"
#if GOOGLE_CUDA
#include "xla/stream_executor/gpu/scoped_activate_context.h"
#elif TENSORFLOW_USE_ROCM
#include "tensorflow/core/platform/rocm.h"
#endif
namespace tensorflow {
using stream_executor::gpu::ScopedActivateContext;
#if TENSORFLOW_USE_ROCM
#define cudaError_t hipError_t
#define cudaStream_t hipStream_t
#define cudaGetErrorString hipGetErrorString
#define cudaGetDevice hipGetDevice
#define cudaSetDevice hipSetDevice
#define cudaSuccess hipSuccess
int NcclManager::instance_count = 0;
#endif
#define NCCL_RETURN_IF_ERROR(...) \
do { \
ncclResult_t nccl_status = (__VA_ARGS__); \
if (nccl_status != ncclSuccess) { \
return errors::Internal("NCCL: ", ncclGetErrorString(nccl_status), \
". Set NCCL_DEBUG=WARN for detail."); \
} \
} while (0)
#define CUDA_RETURN_IF_ERROR(...) \
do { \
cudaError_t cuda_status = (__VA_ARGS__); \
if (cuda_status != cudaSuccess) { \
return errors::Internal("CUDA: ", cudaGetErrorString(cuda_status)); \
} \
} while (0)
struct NcclManager::NcclStream : public core::RefCounted {
public:
NcclStream() = default;
~NcclStream() = default;
se::StreamExecutor* executor = nullptr;
#if TENSORFLOW_USE_ROCM
se::Stream* stream = nullptr;
#else
std::unique_ptr<se::Stream> stream;
#endif
mutex mu;
condition_variable cv;
std::deque<std::pair<Collective*, int>> pending_launches_ TF_GUARDED_BY(mu);
bool shutdown_requested TF_GUARDED_BY(mu) = false;
};
struct NcclManager::CommunicatorMember {
public:
CommunicatorMember() {}
~CommunicatorMember() {
if (nccl_comm != nullptr) ncclCommDestroy(nccl_comm);
}
ncclComm_t nccl_comm = nullptr;
NcclStream* nccl_stream = nullptr;
};
struct NcclManager::Communicator {
public:
explicit Communicator(std::vector<CommunicatorMember> members,
const string& key)
: num_devices(members.size()), members(std::move(members)), key(key) {}
const int num_devices;
std::vector<CommunicatorMember> members;
const string key;
};
namespace {
static constexpr DataTypeSet kValidDataTypes =
ToSet(DT_HALF) | ToSet(DT_FLOAT) | ToSet(DT_DOUBLE) | ToSet(DT_INT32) |
ToSet(DT_INT64);
ncclDataType_t ToNcclType(DataType t) {
switch (t) {
case DT_HALF:
return ncclHalf;
case DT_FLOAT:
return ncclFloat;
case DT_DOUBLE:
return ncclDouble;
case DT_INT32:
return ncclInt;
case DT_INT64:
return ncclInt64;
default:
return ncclFloat;
}
}
void StringToNcclUniqueId(const string& str_id, ncclUniqueId* nccl_id) {
if (str_id.size() == NCCL_UNIQUE_ID_BYTES) {
memcpy(nccl_id->internal, str_id.data(), NCCL_UNIQUE_ID_BYTES);
}
}
}
struct NcclManager::Collective : public core::RefCounted {
Collective(const string& collective_key_in, DataType data_type_in,
CollectiveType type_in, ncclRedOp_t reduction_op_in,
int num_local_devices_in, int num_global_devices_in,
const string& communicator_key_in)
: collective_key(collective_key_in),
data_type(data_type_in),
type(type_in),
reduction_op(reduction_op_in),
num_local_devices(num_local_devices_in),
num_global_devices(num_global_devices_in),
single_node(num_local_devices_in == num_global_devices_in),
communicator_key(communicator_key_in) {
participants.reserve(num_local_devices_in);
#if TENSORFLOW_USE_ROCM
if (NcclManager::instance_count > 1) {
status = errors::Internal(
"ROCm cannot use multi-node NCCL collectives on a single node");
}
#endif
}
const string collective_key;
const DataType data_type;
const CollectiveType type;
const ncclRedOp_t reduction_op;
const int num_local_devices;
const int num_global_devices;
const bool single_node;
const string communicator_key;
Communicator* communicator = nullptr;
std::vector<std::unique_ptr<Participant>> participants;
int root_rank = -1;
int available_participants = 0;
bool multi_node_ready = false;
uint64 trace_context = 0;
Status status;
};
NcclManager::NcclManager() {
VLOG(2) << "New NcclManager " << this;
#if TENSORFLOW_USE_ROCM
++instance_count;
#endif
}
NcclManager::~NcclManager() {
VLOG(2) << "~NcclManager " << this;
#if TENSORFLOW_USE_ROCM
--instance_count;
#endif
for (auto& it : device_to_comm_streams_) {
for (NcclStream* nccl_stream : it.second) {
{
mutex_lock l(nccl_stream->mu);
nccl_stream->shutdown_requested = true;
nccl_stream->cv.notify_all();
}
nccl_stream->Unref();
}
}
}
NcclManager* NcclManager::instance() {
static NcclManager* instance = new NcclManager();
#if TENSORFLOW_USE_ROCM
static absl::once_flag once;
absl::call_once(once, [] { --NcclManager::instance_count; });
#endif
return instance;
}
string NcclManager::GenerateCommunicatorKey() {
ncclUniqueId nccl_id;
ncclGetUniqueId(&nccl_id);
return string(nccl_id.internal, NCCL_UNIQUE_ID_BYTES);
}
Status NcclManager::GetCommunicator(NcclManager::Collective* collective,
NcclManager::Communicator** communicator) {
std::sort(collective->participants.begin(), collective->participants.end(),
[](const std::unique_ptr<Participant>& a,
const std::unique_ptr<Participant>& b) {
if (a->gpu_device_id != b->gpu_device_id) {
return a->gpu_device_id < b->gpu_device_id;
}
if (a->executor != b->executor) {
return a->executor < b->executor;
}
return a->global_rank < b->global_rank;
});
mutex_lock l(mu_);
if (!status_.ok()) {
return status_;
}
if (collective->communicator_key.empty()) {
for (auto& comm : communicators_) {
if (comm->num_devices == collective->num_global_devices) {
int i;
for (i = 0; i < collective->num_local_devices; ++i) {
if (comm->members[i].nccl_stream->executor !=
collective->participants[i]->executor) {
break;
}
}
if (i == collective->num_local_devices) {
*communicator = comm.get();
return OkStatus();
}
}
}
} else {
#if NCCL_MAJOR < 2
return errors::Internal(
"Cannot use multi-node NCCL collectives with NCCL 1.x");
#endif
if (collective->communicator_key.size() != NCCL_UNIQUE_ID_BYTES) {
return errors::Internal("Expected communicator_key of size ",
NCCL_UNIQUE_ID_BYTES, " but found size ",
collective->communicator_key.size());
}
for (auto& comm : communicators_) {
if (comm->key == collective->communicator_key) {
*communicator = comm.get();
return OkStatus();
}
}
}
auto* env = Env::Default();
std::set<NcclStream*> used_streams;
std::vector<CommunicatorMember> members(collective->num_local_devices);
std::vector<int> devices(collective->num_local_devices);
for (int i = 0; i < collective->num_local_devices; ++i) {
auto* executor = collective->participants[i]->executor;
auto& streams = device_to_comm_streams_[executor];
NcclStream* nccl_stream = nullptr;
for (const auto& s : streams) {
if (used_streams.insert(s).second) {
nccl_stream = s;
break;
}
}
if (nccl_stream == nullptr) {
nccl_stream = new NcclStream();
nccl_stream->executor = executor;
#if TENSORFLOW_USE_ROCM
nccl_stream->stream = collective->participants[i]->context->nccl_stream();
#else
TF_ASSIGN_OR_RETURN(auto stream, executor->CreateStream());
nccl_stream->stream = std::move(stream);
#endif
streams.emplace_back(nccl_stream);
used_streams.insert(nccl_stream);
nccl_stream->Ref();
env->SchedClosure([this, nccl_stream]() {
LoopKernelLaunches(nccl_stream);
nccl_stream->Unref();
});
}
members[i].nccl_stream = nccl_stream;
devices[i] = collective->participants[i]->gpu_device_id;
}
std::vector<ncclComm_t> nccl_comms(collective->num_local_devices);
VLOG(2) << "Created nccl Communicator with "
<< "num_global_devices = " << collective->num_global_devices
<< " num_local_devices = " << collective->num_local_devices
<< " communicator_key ="
<< absl::StrJoin(
std::vector<int>{collective->communicator_key.begin(),
collective->communicator_key.end()},
" ");
#if NCCL_MAJOR >= 2
ncclUniqueId nccl_id;
if (collective->single_node) {
NCCL_RETURN_IF_ERROR(ncclGetUniqueId(&nccl_id));
} else {
StringToNcclUniqueId(collective->communicator_key, &nccl_id);
}
int saved_device = 0;
CUDA_RETURN_IF_ERROR(cudaGetDevice(&saved_device));
NCCL_RETURN_IF_ERROR(ncclGroupStart());
for (int i = 0; i < collective->num_local_devices; ++i) {
const int rank = collective->participants[i]->global_rank >= 0
? collective->participants[i]->global_rank
: i;
CUDA_RETURN_IF_ERROR(cudaSetDevice(devices[i]));
NCCL_RETURN_IF_ERROR(ncclCommInitRank(
nccl_comms.data() + i, collective->num_global_devices, nccl_id, rank));
}
NCCL_RETURN_IF_ERROR(ncclGroupEnd());
CUDA_RETURN_IF_ERROR(cudaSetDevice(saved_device));
#else
NCCL_RETURN_IF_ERROR(ncclCommInitAll(
nccl_comms.data(), collective->num_local_devices, devices.data()));
#endif
for (int i = 0; i < collective->num_local_devices; ++i) {
members[i].nccl_comm = nccl_comms[i];
}
communicators_.emplace_back(
new Communicator(std::move(members), collective->communicator_key));
*communicator = communicators_.back().get();
return OkStatus();
}
void NcclManager::AddToAllReduce(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kAllReduce, reduction_op);
}
void NcclManager::AddToAllGather(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kAllGather,
ncclSum );
}
void NcclManager::AddToReduceScatter(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kReduceScatter, reduction_op);
}
void NcclManager::AddToAllToAll(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kAllToAll,
ncclSum );
}
void NcclManager::AddBroadcastSend(std::unique_ptr<Participant> participant,
const Context& context) {
participant->root = true;
AddParticipant(std::move(participant), context, kBroadcast,
ncclSum );
}
void NcclManager::AddBroadcastRecv(std::unique_ptr<Participant> participant,
const Context& context) {
AddParticipant(std::move(participant), context, kBroadcast,
ncclSum );
}
void NcclManager::AddReduceSend(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
AddParticipant(std::move(participant), context, kReduce, reduction_op);
}
void NcclManager::AddReduceRecv(std::unique_ptr<Participant> participant,
const Context& context,
ncclRedOp_t reduction_op) {
participant->root = true;
AddParticipant(std::move(participant), context, kReduce, reduction_op);
}
void NcclManager::SignalMultiNodeReady(const string& collective_key) {
Collective* to_run = nullptr;
{
mutex_lock l(mu_);
auto collective_it = collectives_.find(collective_key);
if (collective_it != collectives_.end()) {
Collective* collective = collective_it->second;
collective->multi_node_ready = true;
if (CheckReady(collective_key, collective)) {
to_run = collective;
}
VLOG(2) << "SignalMultiNodeReady collective " << collective_key
<< " to_run " << to_run;
}
}
if (to_run != nullptr) RunCollective(to_run);
}
void NcclManager::AddParticipant(std::unique_ptr<Participant> participant,
const Context& context,
CollectiveType collective_type,
ncclRedOp_t reduction_op) {
Collective* to_run = nullptr;
DataType data_type;
Status nccl_manager_status;
if (participant->input != nullptr) {
data_type = participant->input->dtype();
} else {
data_type = participant->output->dtype();
}
{
mutex_lock l(mu_);
nccl_manager_status = status_;
if (nccl_manager_status.ok()) {
auto collective_it = collectives_.find(context.collective_key);
Collective* collective = nullptr;
if (collective_it == collectives_.end()) {
collective = new Collective(
context.collective_key, data_type, collective_type, reduction_op,
context.num_local_devices, context.num_global_devices,
context.communicator_key);
collectives_.emplace(context.collective_key, collective);
} else {
collective = collective_it->second;
}
if (collective->status.ok() && !collective->single_node &&
collective->communicator_key.empty()) {
collective->status = errors::Internal(
"Collective ", reduction_op,
" is multi node with num_local_devices=",
collective->num_local_devices,
" and num_global_devices=", collective->num_global_devices,
" but has an empty communicator_key");
}
if (collective->status.ok() && collective->communicator_key.size() !=
context.communicator_key.size()) {
collective->status =
errors::Internal("Collective ", reduction_op,
" mismatch in member communicator_key with size ",
collective->communicator_key.size(),
" and arg communicator_key with size ",
context.communicator_key.size());
}
if (collective->status.ok() && collective->type != collective_type) {
collective->status = errors::Internal(
"Collective ", reduction_op, " previously initialized with type ",
collective->type, " but now got type ", collective_type);
}
if (collective->status.ok() &&
collective->num_global_devices != context.num_global_devices) {
collective->status =
errors::Internal("Collective ", reduction_op,
" previously initialized with num_global_devices ",
collective->num_global_devices, " but now got ",
context.num_global_devices);
}
if (collective->status.ok() &&
collective->num_local_devices != context.num_local_devices) {
collective->status =
errors::Internal("Collective ", reduction_op,
"previously initialized with num_local_devices ",
collective->num_local_devices, " but now got ",
context.num_local_devices);
}
if (collective->status.ok() &&
collective->participants.size() >= collective->num_local_devices) {
collective->status = errors::Internal(
"Collective ", reduction_op, " expected ",
collective->num_local_devices, " participants but now has ",
collective->participants.size(),
" with one more participant being added");
}
if (collective->status.ok() && collective->root_rank >= 0 &&
context.source_rank >= 0 &&
collective->root_rank != context.source_rank) {
collective->status = errors::Internal(
"Collective ", collective->collective_key,
" already has root_rank ", collective->root_rank,
" but new participant has root_rank ", context.source_rank);
}
if (collective->status.ok() &&
!kValidDataTypes.Contains(collective->data_type)) {
collective->status = errors::Internal(
"Collective ", collective->collective_key,
" expected data types compatible with NCCL but instead got ",
DataTypeString(collective->data_type));
}
if (context.source_rank >= 0) {
collective->root_rank = context.source_rank;
}
collective->participants.emplace_back(std::move(participant));
++collective->available_participants;
if (CheckReady(context.collective_key, collective)) {
to_run = collective;
}
}
}
if (!nccl_manager_status.ok()) {
participant->done_callback(nccl_manager_status);
return;
}
if (to_run != nullptr) RunCollective(to_run);
}
bool NcclManager::CheckReady(const string& collective_key,
Collective* collective) {
if (collective->available_participants == collective->num_local_devices) {
if (collective->num_global_devices == collective->num_local_devices ||
collective->multi_node_ready) {
collectives_.erase(collective_key);
return true;
}
}
return false;
}
void NcclManager::RunCollective(Collective* collective) {
tensorflow::profiler::TraceMeProducer traceme("Schedule Collective");
collective->trace_context = traceme.GetContextId();
static mutex collective_mu(LINKER_INITIALIZED);
Status status = collective->status;
if (status.ok()) {
status = GetCommunicator(collective, &collective->communicator);
}
for (int i = 0; status.ok() && i < collective->num_local_devices; ++i) {
Participant* p = collective->participants[i].get();
NcclStream* nccl_stream = collective->communicator->members[i].nccl_stream;
CHECK(nccl_stream != nullptr);
const int rank = p->global_rank >= 0 ? p->global_rank : i;
if (p->input != nullptr) {
status = nccl_stream->stream->WaitFor(p->tensor_stream);
}
if (p->root) {
if (collective->root_rank == -1) {
collective->root_rank = rank;
} else if (collective->root_rank != rank) {
status = errors::Internal(
"Inconsistent root rank ", collective->root_rank, " and GPU id ",
p->gpu_device_id, " rank ", rank, " also marked as root.");
}
}
VLOG(2) << "RunCollective rank " << rank << " global_rank "
<< p->global_rank << " root_rank " << collective->root_rank;
}
if (status.ok() && collective->type == kBroadcast &&
collective->root_rank < 0) {
status = errors::Internal("Root rank not indicated for collective ",
collective->collective_key);
}
if (!status.ok()) {
for (int i = 0; i < collective->num_local_devices; ++i) {
collective->participants[i]->done_callback(status);
}
collective->Unref();
return;
}
{
mutex_lock l(collective_mu);
for (int i = 0; i < collective->num_local_devices; ++i) {
NcclStream* nccl_stream =
collective->communicator->members[i].nccl_stream;
mutex_lock l(nccl_stream->mu);
nccl_stream->pending_launches_.push_front(std::make_pair(collective, i));
collective->Ref();
nccl_stream->cv.notify_all();
}
}
collective->Unref();
}
namespace {
size_t ComputeBufferSize(const NcclManager::Participant* p,
DataType data_type) {
size_t num_elements = 0;
if (p->output) {
num_elements += p->output->NumElements();
} else if (p->input) {
num_elements += p->input->NumElements();
}
return num_elements * DataTypeSize(data_type);
}
}
void NcclManager::LoopKernelLaunches(NcclStream* nccl_stream) {
#if TENSORFLOW_USE_ROCM
se::Stream* comm_stream = nccl_stream->stream;
#else
se::Stream* comm_stream = nccl_stream->stream.get();
#endif
ScopedActivateContext scoped_context(nccl_stream->executor);
cudaStream_t cu_stream = reinterpret_cast<cudaStream_t>(
comm_stream->platform_specific_handle().stream);
while (true) {
std::pair<Collective*, int> next_launch;
{
VLOG(3) << "Locking mutex nccl_stream " << nccl_stream;
mutex_lock l(nccl_stream->mu);
while (nccl_stream->pending_launches_.empty()) {
if (nccl_stream->shutdown_requested) {
return;
}
nccl_stream->cv.wait(l);
}
next_launch = nccl_stream->pending_launches_.back();
nccl_stream->pending_launches_.pop_back();
}
Collective* collective = next_launch.first;
tensorflow::profiler::TraceMeConsumer traceme("Run Collective",
collective->trace_context);
ncclDataType_t data_type = ToNcclType(collective->data_type);
int p_idx = next_launch.second;
Participant* p = collective->participants[p_idx].get();
auto nccl_comm = collective->communicator->members[p_idx].nccl_comm;
ncclResult_t nccl_result = ncclSuccess;
switch (collective->type) {
case kAllReduce: {
const void* sendbuff = p->input->tensor_data().data();
void* recvbuff = const_cast<char*>(p->output->tensor_data().data());
VLOG(2) << "call NcclAllReduce collective_key "
<< collective->collective_key << " participant " << p_idx
<< " num_participants " << collective->participants.size()
<< " sendbuff " << sendbuff << " recvbuff " << recvbuff
<< " nccl_comm " << nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclAllReduce",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "all_reduce"}});
});
nccl_result = ncclAllReduce(sendbuff, recvbuff, p->input->NumElements(),
data_type, collective->reduction_op,
nccl_comm, cu_stream);
break;
}
case kBroadcast: {
const void* sendbuff = nullptr;
void* recvbuff = nullptr;
int num_elements = -1;
if (p->input) {
sendbuff = p->input->tensor_data().data();
num_elements = p->input->NumElements();
}
if (p->output) {
recvbuff = const_cast<char*>(p->output->tensor_data().data());
num_elements = p->output->NumElements();
} else {
recvbuff = const_cast<void*>(sendbuff);
}
if (num_elements < 0) {
p->done_callback(errors::Internal(
"Both input and output are null in ncclBroadcast"));
collective->Unref();
continue;
}
VLOG(2) << "call NcclBroadcast collective_key "
<< collective->collective_key << " participant " << p_idx
<< " sendbuff " << sendbuff << " recvbuff " << recvbuff
<< " nccl_comm " << nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclBroadcast",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "broadcast"}});
});
nccl_result =
ncclBroadcast(sendbuff, recvbuff, num_elements, data_type,
collective->root_rank, nccl_comm, cu_stream);
break;
}
case kReduce: {
const void* sendbuff = p->input->tensor_data().data();
void* recvbuff =
p->output ? const_cast<char*>(p->output->tensor_data().data())
: nullptr;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"buffer_size",
{{"output_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "reduce"}});
});
nccl_result = ncclReduce(sendbuff, recvbuff, p->input->NumElements(),
data_type, collective->reduction_op,
collective->root_rank, nccl_comm, cu_stream);
break;
}
case kAllGather: {
const void* sendbuff = p->input->tensor_data().data();
void* recvbuff = const_cast<char*>(p->output->tensor_data().data());
VLOG(2) << "call NcclAllGather collective_key "
<< collective->collective_key << " participant " << p_idx
<< " sendbuff " << sendbuff << " sendcount "
<< p->input->NumElements() << " recvbuff " << recvbuff
<< " recvcount " << p->output->NumElements() << " nccl_comm "
<< nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclAllGather",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "all_gather"}});
});
nccl_result = ncclAllGather(sendbuff, recvbuff, p->input->NumElements(),
data_type, nccl_comm, cu_stream);
break;
}
case kReduceScatter: {
const void* sendbuff = p->input->tensor_data().data();
void* recvbuff = const_cast<char*>(p->output->tensor_data().data());
VLOG(2) << "call NcclReduceScatter collective_key "
<< collective->collective_key << " participant " << p_idx
<< " num_participants " << collective->participants.size()
<< " sendbuff " << sendbuff << " recvbuff " << recvbuff
<< " nccl_comm " << nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclReduceScatter",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "reduce_scatter"}});
});
nccl_result = ncclReduceScatter(
sendbuff, recvbuff, p->output->NumElements(), data_type,
collective->reduction_op, nccl_comm, cu_stream);
break;
}
case kAllToAll: {
const char* sendbuff = p->input->tensor_data().data();
char* recvbuff = const_cast<char*>(p->output->tensor_data().data());
size_t count =
p->input->NumElements() / collective->participants.size();
size_t rank_offset = count * DataTypeSize(collective->data_type);
VLOG(2) << "call Nccl All to All collective_key "
<< collective->collective_key << " participant " << p_idx
<< " num_participants " << collective->participants.size()
<< " sendbuff " << static_cast<const void*>(sendbuff)
<< " recvbuff " << static_cast<void*>(recvbuff) << " nccl_comm "
<< nccl_comm << " comm_stream " << comm_stream
<< " cuda_stream " << cu_stream;
profiler::AnnotatedTraceMe traceme([&] {
return profiler::TraceMeEncode(
"ncclAllToAll",
{{"buffer_size", ComputeBufferSize(p, collective->data_type)},
{"collective_type", "all_to_all"}});
});
ncclGroupStart();
for (int i = 0; i < collective->participants.size(); ++i) {
ncclSend(sendbuff + i * rank_offset, count, data_type,
collective->participants[i]->global_rank, nccl_comm,
cu_stream);
ncclRecv(recvbuff + i * rank_offset, count, data_type,
collective->participants[i]->global_rank, nccl_comm,
cu_stream);
}
nccl_result = ncclGroupEnd();
break;
}
}
auto done_callback = [collective, p_idx, nccl_result]() {
VLOG(2) << "done Nccl kernel collective_key "
<< collective->collective_key << " participant " << p_idx
<< " ncclResult " << nccl_result;
if (nccl_result == ncclSuccess) {
collective->participants[p_idx]->done_callback(OkStatus());
} else {
collective->participants[p_idx]->done_callback(errors::Unknown(
"Error invoking NCCL: ", ncclGetErrorString(nccl_result)));
}
collective->Unref();
};
p->event_mgr->ThenExecute(comm_stream, done_callback);
}
}
void NcclManager::StartAbort(const Status& s) {
absl::flat_hash_map<string, Collective*> collectives;
std::vector<std::unique_ptr<Communicator>> communicators;
{
mutex_lock l(mu_);
if (!status_.ok()) {
LOG(WARNING)
<< "NcclManager already aborted, ignoring subsequent StartAbort with "
<< s;
return;
}
status_ = s;
collectives.swap(collectives_);
communicators.swap(communicators_);
}
VLOG(2) << "Aborted NcclManager " << this << " with " << collectives.size()
<< " collectives and " << communicators.size()
<< " comms with status " << s;
for (const auto& item : collectives) {
for (const std::unique_ptr<Participant>& p : item.second->participants) {
p->done_callback(s);
}
item.second->Unref();
}
UnboundedWorkQueue queue(Env::Default(), "nccl_abort");
int num_comms = 0;
for (std::unique_ptr<Communicator>& communicator : communicators) {
num_comms += communicator->members.size();
}
BlockingCounter pending(num_comms);
for (std::unique_ptr<Communicator>& communicator : communicators) {
for (CommunicatorMember& member : communicator->members) {
queue.Schedule([&member, &pending]() {
ncclCommAbort(member.nccl_comm);
member.nccl_comm = nullptr;
pending.DecrementCount();
});
}
}
pending.Wait();
}
void NcclManager::Reset() {
mutex_lock l(mu_);
status_ = Status();
VLOG(2) << "Reset NcclManager " << this;
}
}
#endif | #include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM
#include <algorithm>
#include <random>
#include <vector>
#include "tensorflow/core/common_runtime/device_factory.h"
#include "tensorflow/core/common_runtime/gpu/gpu_device.h"
#include "tensorflow/core/framework/tensor_testutil.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/nccl/nccl_manager.h"
#include "tensorflow/core/platform/test.h"
#include "tensorflow/core/platform/unbounded_work_queue.h"
namespace tensorflow {
static std::vector<std::unique_ptr<BaseGPUDevice>> GetGPUDevices() {
std::vector<std::unique_ptr<Device>> devices;
TF_CHECK_OK(DeviceFactory::GetFactory(DEVICE_GPU)
->AddDevices(SessionOptions(), "", &devices));
std::vector<std::unique_ptr<BaseGPUDevice>> gpus;
for (std::unique_ptr<Device>& device : devices) {
if (device->device_type() == "GPU") {
gpus.emplace_back(static_cast<BaseGPUDevice*>(device.release()));
}
}
return gpus;
}
template <typename Scalar>
class NcclManagerTest : public ::testing::Test {
public:
struct TestCase {
TestCase(int num_nodes, int num_ranks_per_node)
: num_nodes(num_nodes), num_ranks_per_node(num_ranks_per_node) {}
std::vector<Tensor> ins;
std::vector<Tensor> outs;
Tensor expected;
const int num_nodes;
const int num_ranks_per_node;
mutex mu;
Status final_status;
int num_completed TF_GUARDED_BY(mu) = 0;
condition_variable done_cv;
};
static void SetUpTestSuite() {
setenv("NCCL_DEBUG", "INFO", 1 );
setenv("NCCL_LAUNCH_MODE", "PARALLEL", 1 );
devices_ = new std::vector<std::unique_ptr<BaseGPUDevice>>(GetGPUDevices());
VLOG(1) << "Running test with " << devices_->size() << " gpus";
if (devices_->size() <= 1) {
LOG(FATAL) << "Cannot run NCCL test without multiple GPUs";
}
work_queue_ = new UnboundedWorkQueue(Env::Default(), "nccl_manager_test");
}
void SetUp() override {
ASSERT_GT(devices_->size(), 0) << "No GPUs found";
ASSERT_NE(work_queue_, nullptr);
}
static int32 NumGPUs() { return static_cast<int32>(devices_->size()); }
static void PopulateMultiNodeParams(int* num_nodes, int* num_ranks_per_node) {
const auto num_gpus = NumGPUs();
CHECK_GT(num_gpus, 1);
*num_nodes = 2;
if (num_gpus % 2 == 0) {
*num_ranks_per_node = num_gpus / 2;
} else {
*num_ranks_per_node = (num_gpus - 1) / 2;
}
}
static void TearDownTestSuite() {
delete devices_;
delete work_queue_;
}
TestCase* MakeReductionTestCase(int num_nodes, int num_ranks_per_node,
ncclRedOp_t reduction_op, TensorShape shape,
float value_offset) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, shape);
if (reduction_op == ncclProd) {
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(1); });
} else if (reduction_op == ncclSum) {
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(0); });
} else if (reduction_op == ncclMax) {
test::FillFn<Scalar>(&test_case->expected, [](int) { return -max_; });
} else if (reduction_op == ncclMin) {
test::FillFn<Scalar>(&test_case->expected, [](int) { return max_; });
} else {
LOG(FATAL) << "Invalid reduction_op " << reduction_op;
}
float value_scale = 0.01;
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
Tensor in_cpu(data_type_, shape);
test::FillFn<Scalar>(&in_cpu, [&](int index) {
return static_cast<Scalar>((index + 1) * value_scale + value_offset);
});
for (int j = 0; j < shape.num_elements(); ++j) {
auto in_val = in_cpu.flat<Scalar>()(j);
auto out_expr = test_case->expected.template flat<Scalar>();
if (reduction_op == ncclProd) {
out_expr(j) = out_expr(j) * in_val;
} else if (reduction_op == ncclSum) {
out_expr(j) = out_expr(j) + in_val;
} else if (reduction_op == ncclMax) {
if (in_val > out_expr(j)) {
out_expr(j) = in_val;
}
} else if (reduction_op == ncclMin) {
if (in_val < out_expr(j)) {
out_expr(j) = in_val;
}
}
}
value_scale *= 10;
test_case->ins.emplace_back(GpuAllocator(device), data_type_, shape);
test_case->outs.emplace_back(GpuAllocator(device), data_type_, shape);
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
}
}
return test_case;
}
TestCase* MakeGatherTestCase(int num_nodes, int num_ranks_per_node,
TensorShape in_shape, TensorShape out_shape) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, out_shape);
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(0); });
float value_scale = 0.01;
for (int node = 0; node < num_nodes; ++node) {
for (int i = 0; i < num_ranks_per_node; ++i) {
auto* device = GetDevice(num_ranks_per_node, node, i);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
Tensor in_cpu(data_type_, in_shape);
test::FillFn<Scalar>(&in_cpu, [&](int index) {
return static_cast<Scalar>((index + 1) * value_scale);
});
int32_t gather_idx =
(node * num_ranks_per_node + i) * in_shape.num_elements();
for (int j = 0; j < in_shape.num_elements(); ++j) {
auto in_val = in_cpu.flat<Scalar>()(j);
auto out_expr = test_case->expected.template flat<Scalar>();
out_expr(gather_idx + j) = in_val;
}
value_scale *= 10;
test_case->ins.emplace_back(GpuAllocator(device), data_type_, in_shape);
test_case->outs.emplace_back(GpuAllocator(device), data_type_,
out_shape);
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
}
}
return test_case;
}
TestCase* MakeBroadcastTestCase(int num_nodes, int num_ranks_per_node,
TensorShape shape, int src_node, int src_rank,
bool in_place) {
TestCase* test_case = new TestCase(num_nodes, num_ranks_per_node);
test_case->expected = Tensor(data_type_, shape);
test::FillFn<Scalar>(&test_case->expected,
[](int) { return static_cast<Scalar>(1); });
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
if (node == src_node && local_rank == src_rank) {
test_case->ins.emplace_back(GpuAllocator(device), data_type_, shape);
if (in_place) {
test_case->outs.emplace_back(test_case->ins.back());
} else {
test_case->outs.emplace_back(GpuAllocator(device), data_type_,
shape);
}
Tensor in_cpu(data_type_, shape);
test::FillFn<Scalar>(&in_cpu,
[](int) { return static_cast<Scalar>(1); });
const Tensor& in_gpu = test_case->ins.back();
auto in_gpu_mem = AsDeviceMemory(in_gpu.flat<Scalar>().data());
auto* stream = device->tensorflow_accelerator_device_info()->stream;
TF_CHECK_OK(stream->Memcpy(&in_gpu_mem, in_cpu.flat<Scalar>().data(),
in_cpu.TotalBytes()));
} else {
test_case->ins.emplace_back(Tensor());
test_case->outs.emplace_back(GpuAllocator(device), data_type_, shape);
}
}
}
return test_case;
}
void WaitForTestCompletion(TestCase* test_case) {
mutex_lock l(test_case->mu);
while (test_case->num_completed != test_case->outs.size()) {
test_case->done_cv.wait(l);
}
}
void VerifyResults(TestCase* test_case) {
WaitForTestCompletion(test_case);
TF_ASSERT_OK(test_case->final_status);
for (int node = 0; node < test_case->num_nodes; ++node) {
for (int local_rank = 0; local_rank < test_case->num_ranks_per_node;
++local_rank) {
auto* device =
GetDevice(test_case->num_ranks_per_node, node, local_rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(test_case->num_ranks_per_node, node, local_rank);
const Tensor& out_gpu = test_case->outs[global_rank];
Tensor out_cpu(data_type_, out_gpu.shape());
auto out_gpu_mem = AsDeviceMemory(out_gpu.flat<Scalar>().data());
TF_CHECK_OK(stream->Memcpy(out_cpu.flat<Scalar>().data(), out_gpu_mem,
out_cpu.TotalBytes()));
TF_ASSERT_OK(stream->BlockHostUntilDone());
VLOG(1) << "Verifying rank " << global_rank << " expected shape "
<< test_case->expected.shape() << " out shape "
<< out_cpu.shape();
test::ExpectClose(test_case->expected, out_cpu);
}
}
}
void VerifyError(TestCase* test_case) {
WaitForTestCompletion(test_case);
LOG(INFO) << test_case->final_status;
EXPECT_EQ(test_case->final_status.code(), error::INTERNAL);
}
NcclManager::DoneCallback CreateDoneCallback(TestCase* test_case) {
return [this, test_case](Status s) {
mutex_lock l(test_case->mu);
test_case->final_status.Update(s);
if (++test_case->num_completed == test_case->outs.size()) {
test_case->done_cv.notify_one();
}
};
}
struct NodeState {
NcclManager nccl_manager;
std::atomic<int> launched{0};
};
void RunMultiNodeAllReduceTest(const int num_nodes,
const int num_ranks_per_node) {
std::vector<NodeState> node_states(num_nodes);
RunMultiNodeAllReduceTest(node_states, num_ranks_per_node);
}
void RunMultiNodeAllReduceTest(std::vector<NodeState>& node_states,
const int num_ranks_per_node) {
const int num_nodes = node_states.size();
const int num_global_ranks = num_nodes * num_ranks_per_node;
const string collective_key = "allreduce";
const string communicator_key =
node_states[0].nccl_manager.GenerateCommunicatorKey();
for (int op = 0; op < 4; ++op) {
ncclRedOp_t reduction_op = static_cast<ncclRedOp_t>(op);
std::unique_ptr<TestCase> test_case(
this->MakeReductionTestCase(num_nodes, num_ranks_per_node,
reduction_op, TensorShape({2, 3}), 0.0f));
for (int node = 0; node < num_nodes; ++node) {
auto node_fn = [this, node, num_ranks_per_node, num_global_ranks,
&node_states, &communicator_key, &collective_key,
reduction_op, &test_case] {
for (int local_rank = 0; local_rank < num_ranks_per_node;
++local_rank) {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(num_ranks_per_node, node, local_rank);
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[global_rank],
&test_case->outs[global_rank], global_rank,
this->CreateDoneCallback(test_case.get()));
node_states[node].nccl_manager.AddToAllReduce(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, -1},
reduction_op);
VLOG(1) << "AddToAllReduce node " << node << " global_rank "
<< global_rank;
}
node_states[node].nccl_manager.SignalMultiNodeReady(collective_key);
};
this->work_queue_->Schedule(node_fn);
}
VLOG(2) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
void RunMultiNodeBroadcastTest(const int num_nodes,
const int num_ranks_per_node,
const int src_node, const int src_local_rank,
const bool in_place) {
const int num_global_ranks = num_nodes * num_ranks_per_node;
const int src_global_rank = src_node * num_ranks_per_node + src_local_rank;
const string collective_key = "broadcast";
std::vector<NodeState> node_states(num_nodes);
const string communicator_key =
node_states[0].nccl_manager.GenerateCommunicatorKey();
std::unique_ptr<TestCase> test_case(this->MakeBroadcastTestCase(
num_nodes, num_ranks_per_node, TensorShape({5, 6}), src_node,
src_local_rank, in_place));
for (int node = 0; node < num_nodes; ++node) {
for (int local_rank = 0; local_rank < num_ranks_per_node; ++local_rank) {
auto rank_fn = [this, node, num_ranks_per_node, num_global_ranks,
src_global_rank, local_rank, &node_states,
&collective_key, &communicator_key, &test_case]() {
auto* device = GetDevice(num_ranks_per_node, node, local_rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
const int global_rank =
GlobalRank(num_ranks_per_node, node, local_rank);
auto* input = global_rank == src_global_rank
? &test_case->ins[global_rank]
: nullptr;
auto* output = test_case->outs[global_rank].NumElements() == 0
? nullptr
: &test_case->outs[global_rank];
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, input, output, global_rank,
this->CreateDoneCallback(test_case.get()));
if (global_rank == src_global_rank) {
node_states[node].nccl_manager.AddBroadcastSend(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, src_global_rank});
} else {
node_states[node].nccl_manager.AddBroadcastRecv(
std::move(participant),
{collective_key, num_ranks_per_node, num_global_ranks,
communicator_key, src_global_rank});
}
if (++node_states[node].launched == num_ranks_per_node) {
node_states[node].nccl_manager.SignalMultiNodeReady(collective_key);
}
};
this->work_queue_->Schedule(std::move(rank_fn));
}
}
VLOG(2) << "Verifying results";
this->VerifyResults(test_case.get());
}
static int GlobalRank(int num_ranks_per_node, int node, int local_rank) {
return node * num_ranks_per_node + local_rank;
}
static BaseGPUDevice* GetDevice(int num_ranks_per_node, int node,
int local_rank) {
const int device_idx = GlobalRank(num_ranks_per_node, node, local_rank);
CHECK_LT(device_idx, devices_->size());
return (*devices_)[device_idx].get();
}
static UnboundedWorkQueue* work_queue_;
private:
static Allocator* GpuAllocator(BaseGPUDevice* device) {
return device->GetAllocator(AllocatorAttributes());
}
static se::DeviceMemory<Scalar> AsDeviceMemory(const Scalar* cuda_memory) {
se::DeviceMemoryBase wrapped(const_cast<Scalar*>(cuda_memory));
se::DeviceMemory<Scalar> typed(wrapped);
return typed;
}
static std::vector<std::unique_ptr<BaseGPUDevice>>* devices_;
static const DataType data_type_;
static const Scalar max_;
};
template <typename Scalar>
std::vector<std::unique_ptr<BaseGPUDevice>>* NcclManagerTest<Scalar>::devices_ =
nullptr;
template <typename Scalar>
const DataType NcclManagerTest<Scalar>::data_type_ =
DataTypeToEnum<Scalar>::value;
template <typename Scalar>
const Scalar NcclManagerTest<Scalar>::max_ =
Eigen::NumTraits<Scalar>::highest();
template <typename Scalar>
UnboundedWorkQueue* NcclManagerTest<Scalar>::work_queue_ = nullptr;
using TypeList = ::testing::Types<float, double>;
TYPED_TEST_SUITE(NcclManagerTest, TypeList);
TYPED_TEST(NcclManagerTest, BasicSumReduction) {
const int num_ranks = this->NumGPUs();
for (int op = 0; op < 4; ++op) {
ncclRedOp_t reduction_op = static_cast<ncclRedOp_t>(op);
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, reduction_op,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
VLOG(2) << "rank " << rank << " device " << device->name();
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{"allreduce", num_ranks,
num_ranks, "",
-1},
reduction_op);
}
LOG(INFO) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
TYPED_TEST(NcclManagerTest, MultipleCallers) {
const int num_ranks = this->NumGPUs();
const int num_collectives_per_iteration = 10;
const int time_limit_micros = 1 * 1000 * 1000;
int64_t start = Env::Default()->NowMicros();
srand(Env::Default()->NowMicros());
for (;;) {
std::vector<std::pair<int, int>> case_and_rank;
std::vector<std::unique_ptr<typename TestFixture::TestCase>> test_cases;
for (int i = 0; i < num_collectives_per_iteration; ++i) {
test_cases.emplace_back(this->MakeReductionTestCase(
1, num_ranks, ncclSum,
TensorShape({100, i % 5 + 1, i % 3 + 1}), 1.1f * i));
for (int j = 0; j < num_ranks; ++j) {
case_and_rank.emplace_back(i, j);
}
}
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* stream = device->tensorflow_accelerator_device_info()->stream;
TF_ASSERT_OK(stream->BlockHostUntilDone());
}
std::shuffle(case_and_rank.begin(), case_and_rank.end(),
std::mt19937(std::random_device()()));
mutex mu;
const int to_schedule = case_and_rank.size();
for (int i = 0; i < to_schedule; ++i) {
auto fn = [&]() {
int rank;
int test_num;
{
mutex_lock l(mu);
test_num = case_and_rank.back().first;
rank = case_and_rank.back().second;
case_and_rank.pop_back();
}
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
typename TestFixture::TestCase* test_case = test_cases[test_num].get();
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{strings::StrCat("allreduce", test_num),
num_ranks,
num_ranks,
"", -1},
ncclSum);
};
this->work_queue_->Schedule(fn);
}
VLOG(2) << "Verifying results for " << num_collectives_per_iteration
<< " collectives";
for (int i = 0; i < test_cases.size(); ++i) {
this->VerifyResults(test_cases[i].get());
}
int64_t delta = Env::Default()->NowMicros() - start;
if (delta > time_limit_micros) {
LOG(INFO) << "Ran for " << delta << " microsecs, now quitting";
break;
}
}
}
TYPED_TEST(NcclManagerTest, BasicAllGather) {
const int num_ranks = this->NumGPUs();
for (int i = 0; i < num_ranks; ++i) {
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeGatherTestCase(1, num_ranks,
TensorShape({2, 3}),
TensorShape({2 * num_ranks, 3})));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
VLOG(2) << "rank " << rank << " device " << device->name();
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllGather(
std::move(participant),
{"allgather", num_ranks,
num_ranks, "",
-1});
}
LOG(INFO) << "Verifying results";
this->VerifyResults(test_case.get());
}
}
TYPED_TEST(NcclManagerTest, BasicBroadcast) {
this->RunMultiNodeBroadcastTest(1,
this->NumGPUs(),
0, 0,
false);
}
TYPED_TEST(NcclManagerTest, InPlaceBroadcast) {
this->RunMultiNodeBroadcastTest(1,
this->NumGPUs(),
0, 0,
true);
}
TYPED_TEST(NcclManagerTest, BroadcastWithDifferentRanks) {
for (int num_ranks = 1; num_ranks <= this->NumGPUs(); ++num_ranks) {
const int src_rank = static_cast<int>(random::New64() % num_ranks);
for (int in_place_idx = 0; in_place_idx <= 1; ++in_place_idx) {
const bool in_place = in_place_idx == 0;
this->RunMultiNodeBroadcastTest(1, num_ranks,
0, src_rank, in_place);
}
}
}
TEST(NcclManagerTest, CommunicatorKey) {
const string communicator_key =
NcclManager::instance()->GenerateCommunicatorKey();
EXPECT_EQ(communicator_key.size(), NCCL_UNIQUE_ID_BYTES);
}
#if !TENSORFLOW_USE_ROCM
TYPED_TEST(NcclManagerTest, MultiNode) {
int num_nodes;
int num_ranks_per_node;
this->PopulateMultiNodeParams(&num_nodes, &num_ranks_per_node);
VLOG(1) << "Calling RunMultiNodeAllReduceTest with num_nodes=" << num_nodes
<< " and num_ranks_per_node=" << num_ranks_per_node;
this->RunMultiNodeAllReduceTest(num_nodes, num_ranks_per_node);
}
#endif
TYPED_TEST(NcclManagerTest, MultiNodeSingle) {
this->RunMultiNodeAllReduceTest(1,
this->NumGPUs());
}
#if !TENSORFLOW_USE_ROCM
TYPED_TEST(NcclManagerTest, MultiNodeBroadcast) {
int num_nodes;
int num_ranks_per_node;
this->PopulateMultiNodeParams(&num_nodes, &num_ranks_per_node);
VLOG(1) << "Calling RunMultiNodeBroadcastTest with num_nodes=" << num_nodes
<< " and num_ranks_per_node=" << num_ranks_per_node;
this->RunMultiNodeBroadcastTest(num_nodes, num_ranks_per_node,
0, 0,
true);
}
#endif
TYPED_TEST(NcclManagerTest, ConsistentCollectiveType) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
if (rank == 0) {
NcclManager::instance()->AddToAllReduce(std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
"",
-1},
ncclSum);
} else {
NcclManager::instance()->AddBroadcastSend(
std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
"", -1});
}
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, ConsistentCommunicatorKey) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(
std::move(participant),
{"bad_coll_type",
num_ranks,
num_ranks,
rank == 0 ? "" : NcclManager::instance()->GenerateCommunicatorKey(),
-1},
ncclSum);
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, ConsistentNumberOfDevices) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(1, num_ranks, ncclSum,
TensorShape({2, 3}), 0.0f));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
int num_devices = rank == 0 ? num_ranks : num_ranks + 1;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[rank],
&test_case->outs[rank], -1,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddToAllReduce(std::move(participant),
{"bad_coll_type",
num_devices,
num_devices,
"",
-1},
ncclSum);
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, BroadcastNoSource) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeBroadcastTestCase(1, num_ranks,
TensorShape({2, 3}), -1,
-1, false));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, nullptr, &test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddBroadcastRecv(std::move(participant),
{"bcast_no_send",
num_ranks,
num_ranks,
"",
-1});
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, BroadcastMultipleSends) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeBroadcastTestCase(1, num_ranks,
TensorShape({2, 3}), -1,
-1, false));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->outs[rank],
&test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddBroadcastSend(std::move(participant),
{"bcast_multiple_send",
num_ranks,
num_ranks,
"",
-1});
}
this->VerifyError(test_case.get());
}
TYPED_TEST(NcclManagerTest, BroadcastInconsistentSource) {
const int num_ranks = 2;
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeBroadcastTestCase(1, num_ranks,
TensorShape({2, 3}), -1,
-1, false));
for (int rank = 0; rank < num_ranks; ++rank) {
auto* device = this->GetDevice(num_ranks, 0, rank);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->outs[rank],
&test_case->outs[rank], rank,
this->CreateDoneCallback(test_case.get()));
NcclManager::instance()->AddBroadcastRecv(std::move(participant),
{"bcast_inconsistent_source",
num_ranks,
num_ranks,
"",
rank});
}
this->VerifyError(test_case.get());
}
#if !TENSORFLOW_USE_ROCM
TYPED_TEST(NcclManagerTest, AbortThenReset) {
using NodeState = typename TestFixture::NodeState;
using TestCase = typename TestFixture::TestCase;
const int num_nodes = 2;
std::vector<NodeState> nodes(num_nodes);
this->RunMultiNodeAllReduceTest(nodes, 1);
const string collective_key = "allreduce";
ncclRedOp_t reduction_op = static_cast<ncclRedOp_t>(0);
auto node_fn = [&](TestCase* test_case, int node,
const string& communicator_key) {
auto* device = this->GetDevice( 1, node,
0);
auto* info = device->tensorflow_accelerator_device_info();
auto* stream = device->tensorflow_accelerator_device_info()->stream;
auto participant = absl::make_unique<NcclManager::Participant>(
device->executor(), stream, info, &test_case->ins[node],
&test_case->outs[node], node,
this->CreateDoneCallback(test_case));
nodes[node].nccl_manager.AddToAllReduce(
std::move(participant),
{collective_key, 1,
num_nodes, communicator_key,
-1},
reduction_op);
nodes[node].nccl_manager.SignalMultiNodeReady(collective_key);
};
string communicator_key = nodes[0].nccl_manager.GenerateCommunicatorKey();
{
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(
num_nodes, 1, reduction_op,
TensorShape({2, 3}), 0.0f));
for (int i = 0; i < num_nodes; ++i) {
this->work_queue_->Schedule(
[&node_fn, &test_case, i, communicator_key]() {
node_fn(test_case.get(), i, communicator_key);
});
}
this->VerifyResults(test_case.get());
}
ASSERT_GT(num_nodes, 1);
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(
num_nodes, 1, reduction_op,
TensorShape({2, 3}), 0.0f));
node_fn(test_case.get(), 0, communicator_key);
Env::Default()->SleepForMicroseconds(1000000);
for (auto& node : nodes) {
node.nccl_manager.StartAbort(errors::Unavailable("peer down"));
}
{
mutex_lock l(test_case->mu);
while (test_case->num_completed != 1) {
test_case->done_cv.wait(l);
}
}
for (auto& node : nodes) {
node.nccl_manager.Reset();
}
communicator_key = nodes[0].nccl_manager.GenerateCommunicatorKey();
{
std::unique_ptr<typename TestFixture::TestCase> test_case(
this->MakeReductionTestCase(
num_nodes, 1, reduction_op,
TensorShape({2, 3}), 0.0f));
for (int i = 0; i < num_nodes; ++i) {
this->work_queue_->Schedule(
[&node_fn, &test_case, i, communicator_key]() {
node_fn(test_case.get(), i, communicator_key);
});
}
this->VerifyResults(test_case.get());
}
}
#endif
}
#endif | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/nccl/nccl_manager.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/nccl/nccl_manager_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
02979f14-d915-4bdb-9b1d-3ff75a0e9139 | cpp | tensorflow/tensorflow | stablehlo_op_quant_spec | tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc | tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc | #include "tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.h"
#include <memory>
#include "absl/status/statusor.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OpDefinition.h"
#include "mlir/IR/Operation.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/Value.h"
#include "mlir/Support/LLVM.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/lift_as_function_call.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/stablehlo/quantization_config.pb.h"
#include "tensorflow/compiler/mlir/quantization/tensorflow/quantization_options.pb.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tsl/platform/protobuf.h"
#define DEBUG_TYPE "stablehlo_opt_quant_spec"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::stablehlo::DotGeneralOp;
using ::stablehlo::quantization::Method;
using ::stablehlo::quantization::StaticRangePtq;
bool IsDenylistedLiftedFunction(Operation* op) {
if (auto xla_call_module_op = dyn_cast_or_null<TF::XlaCallModuleOp>(op);
xla_call_module_op != nullptr) {
absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op);
if (method.ok() && method->has_no_quantization()) {
return true;
}
}
return false;
}
void PopulateCoeffOpQuantDimIfPerChannelQuantized(
TF::XlaCallModuleOp xla_call_module_op, OpQuantSpec& spec) {
absl::StatusOr<Method> method = GetQuantizationMethod(xla_call_module_op);
if (method.ok() && method->has_static_range_ptq()) {
const StaticRangePtq& static_range_ptq_spec = method->static_range_ptq();
for (const auto& [operand_idx, quantized_type] :
static_range_ptq_spec.input_quantized_types()) {
if (quantized_type.has_dimension_specs()) {
spec.coeff_op_quant_dim[operand_idx] =
quantized_type.dimension_specs().dimension();
}
}
}
}
}
std::unique_ptr<OpQuantSpec> GetStableHloOpQuantSpec(Operation* op) {
auto spec = std::make_unique<OpQuantSpec>();
if (auto call_op = dyn_cast_or_null<TF::XlaCallModuleOp>(op)) {
auto entry_function =
call_op->getAttrOfType<FlatSymbolRefAttr>("_entry_function");
StringRef function_name = entry_function.getValue();
if (!function_name.starts_with("composite_")) {
return spec;
}
if (function_name.contains("conv")) {
PopulateCoeffOpQuantDimIfPerChannelQuantized(call_op, *spec);
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
} else if (function_name.contains("dot_general")) {
const auto module_op = call_op->getParentOfType<ModuleOp>();
const SymbolTable symbol_table(module_op);
auto entry_func_op =
dyn_cast_or_null<func::FuncOp>(symbol_table.lookup(function_name));
auto dot_general_op = *entry_func_op.getOps<DotGeneralOp>().begin();
if (auto optional_dim = GetDotGeneralQuantizationDim(dot_general_op);
optional_dim) {
spec->coeff_op_quant_dim[1] = optional_dim.value();
} else {
spec->coeff_op_quant_dim[1] = -1;
}
if (function_name.contains("with_bias")) {
spec->biases_params[2] = {{0, 1},
quant::GetUniformQuantizedTypeForBias};
}
}
for (const auto [operand_idx, per_channel_dim] : spec->coeff_op_quant_dim) {
spec->quantizable_operands.insert(operand_idx);
}
}
return spec;
}
std::unique_ptr<OpQuantScaleSpec> GetStableHloQuantConstraints(Operation* op) {
auto scale_spec = std::make_unique<OpQuantScaleSpec>();
if (llvm::isa<mlir::stablehlo::BroadcastInDimOp,
mlir::stablehlo::ConcatenateOp,
mlir::stablehlo::DynamicReshapeOp,
mlir::stablehlo::DynamicSliceOp, mlir::stablehlo::GatherOp,
mlir::stablehlo::PadOp, mlir::stablehlo::ReduceWindowOp,
mlir::stablehlo::ReshapeOp, mlir::stablehlo::SelectOp,
mlir::stablehlo::SliceOp, mlir::stablehlo::TransposeOp>(op)) {
scale_spec->has_same_scale_requirement = true;
}
if (llvm::isa<mlir::stablehlo::DynamicSliceOp, mlir::stablehlo::GatherOp,
mlir::stablehlo::PadOp, mlir::stablehlo::SliceOp>(op)) {
scale_spec->has_same_operand_and_result_type_requirement = true;
}
return scale_spec;
}
bool IsOpQuantizableStableHlo(Operation* op) {
if (isa<func::ConstantOp, mlir::stablehlo::ConstantOp>(op)) {
return true;
} else if (op->hasTrait<OpTrait::IsTerminator>() ||
isa<quantfork::QuantizeCastOp, quantfork::DequantizeCastOp>(op)) {
return false;
}
if (IsDenylistedLiftedFunction(op)) {
LLVM_DEBUG(llvm::errs() << "Denylisted quantizable unit: \n" << op << "\n");
return false;
}
if (GetStableHloQuantConstraints(op)->has_same_scale_requirement) {
return true;
}
const bool attr_enforced_quantizable =
op->hasAttrOfType<StringAttr>(kQuantTraitAttrName) &&
op->getAttrOfType<StringAttr>(kQuantTraitAttrName).getValue().str() ==
QuantTraitValues[QuantizationTrait::FullyQuantizable];
return attr_enforced_quantizable;
}
} | #include "tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/Support/LogicalResult.h"
#include "stablehlo/dialect/StablehloOps.h"
#include "tensorflow/compiler/mlir/lite/quantization/ir/QuantOps.h"
#include "tensorflow/compiler/mlir/quantization/common/attrs_and_constraints.h"
#include "tensorflow/compiler/mlir/quantization/common/func.h"
#include "tensorflow/compiler/mlir/quantization/common/quantization_lib/quantization_utils.h"
#include "tensorflow/compiler/mlir/quantization/common/test_base.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/core/platform/test.h"
namespace mlir::quant::stablehlo {
namespace {
using ::mlir::stablehlo::GatherOp;
using ::testing::IsEmpty;
using ::testing::IsTrue;
using ::testing::NotNull;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
using IsOpQuantizableStableHloTest = ::mlir::quant::QuantizationTestBase;
constexpr absl::string_view kModuleConstantAdd = R"mlir(
module {
func.func @constant_add() -> (tensor<3x2xf32>) {
%cst1 = stablehlo.constant dense<2.4> : tensor<3x2xf32>
%cst2 = stablehlo.constant dense<5.7> : tensor<3x2xf32>
%add = stablehlo.add %cst1, %cst2 : (tensor<3x2xf32>, tensor<3x2xf32>) -> tensor<3x2xf32>
func.return %add : tensor<3x2xf32>
}
}
)mlir";
constexpr absl::string_view kModuleCompositeSameScale = R"mlir(
module {
func.func @same_scale_after_composite() -> tensor<3x1xf32> {
%0 = "tf.XlaCallModule"() {Sout = [#tf_type.shape<1x3>], _entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _stablehlo_module_attrs = {}, _tfl_quant_trait = "fully_quantizable", device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : () -> tensor<1x3xf32>
%1 = "quantfork.qcast"(%0) {volatile} : (tensor<1x3xf32>) -> tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
%2 = "quantfork.dcast"(%1) : (tensor<1x3x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<1x3xf32>
%3 = stablehlo.reshape %2 : (tensor<1x3xf32>) -> tensor<3x1xf32>
%4 = "quantfork.qcast"(%3) {volatile} : (tensor<3x1xf32>) -> tensor<3x1x!quant.uniform<i8:f32, 0.13170163023705575:-1>>
%5 = "quantfork.dcast"(%4) : (tensor<3x1x!quant.uniform<i8:f32, 0.13170163023705575:-1>>) -> tensor<3x1xf32>
return %5 : tensor<3x1xf32>
}
}
)mlir";
constexpr absl::string_view kModuleCompositeNoAttr = R"mlir(
module {
func.func @composite_without_attr() -> tensor<1x3xf32> {
%0 = "tf.XlaCallModule"() {Sout = [#tf_type.shape<1x3>], _entry_function = @non_quantizable_composite, _original_entry_function = "non_quantizable_composite", _stablehlo_module_attrs = {}, device = "", dim_args_spec = [], disabled_checks = [], has_token_input_output = false, module = "", platforms = [], version = 5 : i64} : () -> tensor<1x3xf32>
return %0 : tensor<1x3xf32>
}
}
)mlir";
TEST_F(IsOpQuantizableStableHloTest, ConstantOpQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto constant_op =
FindOperationOfType<mlir::stablehlo::ConstantOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(constant_op));
}
TEST_F(IsOpQuantizableStableHloTest, TerminatorOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto return_op = FindOperationOfType<func::ReturnOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(return_op));
}
TEST_F(IsOpQuantizableStableHloTest, SameScaleOpQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto reshape_op = FindOperationOfType<mlir::stablehlo::ReshapeOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(reshape_op));
}
TEST_F(IsOpQuantizableStableHloTest, NonSameScaleOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleConstantAdd);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>("constant_add");
ASSERT_THAT(test_func, NotNull());
auto add_op = FindOperationOfType<mlir::stablehlo::AddOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(add_op));
}
TEST_F(IsOpQuantizableStableHloTest, ValidXlaCallModuleOpQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, InvalidXlaCallModuleOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleCompositeNoAttr);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("composite_without_attr");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, QuantizeDequantizeOpNotQuantizable) {
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleCompositeSameScale);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("same_scale_after_composite");
ASSERT_THAT(test_func, NotNull());
auto quantize_op = FindOperationOfType<quantfork::QuantizeCastOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(quantize_op));
auto dequantize_op =
FindOperationOfType<quantfork::DequantizeCastOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(dequantize_op));
}
TEST_F(IsOpQuantizableStableHloTest,
XlaCallModuleOpQuantizableWhenNotDenylisted) {
constexpr absl::string_view
kModuleXlaCallModuleOpWithDefaultQuantizationMethod = R"mlir(
func.func @xla_call_module_default_quantization_method(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _quantization_method = "", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}, _tfl_quant_trait = "fully_quantizable"} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleXlaCallModuleOpWithDefaultQuantizationMethod);
ASSERT_TRUE(module_op);
auto test_func = module_op->lookupSymbol<func::FuncOp>(
"xla_call_module_default_quantization_method");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_TRUE(IsOpQuantizableStableHlo(xla_call_module_op));
}
TEST_F(IsOpQuantizableStableHloTest, DenylistedXlaCallModuleOpNotQuantizable) {
constexpr absl::string_view kModuleDenylistedXlaCallModuleOp = R"mlir(
func.func @xla_call_module_denylisted(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}> {_entry_function = @composite_dot_general_fn_1, _original_entry_function = "composite_dot_general_fn_1", _quantization_method = "no_quantization {}", _stablehlo_module_attrs = {jax.uses_shape_polymorphism = true}, _tfl_quant_trait = "fully_quantizable"} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kModuleDenylistedXlaCallModuleOp);
ASSERT_TRUE(module_op);
auto test_func =
module_op->lookupSymbol<func::FuncOp>("xla_call_module_denylisted");
ASSERT_THAT(test_func, NotNull());
auto xla_call_module_op = FindOperationOfType<TF::XlaCallModuleOp>(test_func);
EXPECT_FALSE(IsOpQuantizableStableHlo(xla_call_module_op));
}
using GetStableHloOpQuantSpecTest = ::mlir::quant::QuantizationTestBase;
TEST_F(GetStableHloOpQuantSpecTest,
EmptyCoeffOpQuantDimForPerTensorQuantizedConvolution) {
constexpr absl::string_view
kXlaCallModuleOpWithPerTensorQuantizedConvolution = R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_conv_fn_1,
_original_entry_function = "composite_conv_fn_1",
_quantization_method = "static_range_ptq {}",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true},
_tfl_quant_trait = "fully_quantizable"
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithPerTensorQuantizedConvolution);
ASSERT_TRUE(module_op);
const FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const std::unique_ptr<OpQuantSpec> op_quant_spec =
GetStableHloOpQuantSpec(*xla_call_module_op);
ASSERT_THAT(op_quant_spec, NotNull());
EXPECT_THAT(op_quant_spec->coeff_op_quant_dim, IsEmpty());
}
TEST_F(GetStableHloOpQuantSpecTest,
EmptyCoeffOpQuantDimForPerChannelQuantizedConvolution) {
constexpr absl::string_view
kXlaCallModuleOpWithPerChannelQuantizedConvolution = R"mlir(
func.func @main(%arg0: tensor<1x1x3xf32>, %arg1: tensor<3x4xf32>) -> tensor<1x1x4xf32> {
%0 = "tf.XlaCallModule"(%arg0, %arg1) <{Sout = [#tf_type.shape<1x1x4>], dim_args_spec = [], disabled_checks = [], function_list = [], has_token_input_output = false, module = "", platforms = ["CPU"], version = 9 : i64}>
{
_entry_function = @composite_conv_fn_1,
_original_entry_function = "composite_conv_fn_1",
_quantization_method = "static_range_ptq {input_quantized_types {key: 1, value {dimension_specs {dimension: 3}}}}",
_stablehlo_module_attrs = {jax.uses_shape_polymorphism = true},
_tfl_quant_trait = "fully_quantizable"
} : (tensor<1x1x3xf32>, tensor<3x4xf32>) -> tensor<1x1x4xf32>
return %0 : tensor<1x1x4xf32>
}
)mlir";
const OwningOpRef<ModuleOp> module_op =
ParseModuleOpString(kXlaCallModuleOpWithPerChannelQuantizedConvolution);
ASSERT_TRUE(module_op);
const FailureOr<TF::XlaCallModuleOp> xla_call_module_op =
FindFirstOpFromMainFunc<TF::XlaCallModuleOp>(*module_op);
ASSERT_TRUE(succeeded(xla_call_module_op));
const std::unique_ptr<OpQuantSpec> op_quant_spec =
GetStableHloOpQuantSpec(*xla_call_module_op);
ASSERT_THAT(op_quant_spec, NotNull());
EXPECT_THAT(op_quant_spec->coeff_op_quant_dim,
UnorderedElementsAre(Pair(1, 3)));
}
using GetStableHloQuantConstraintsTest = ::mlir::quant::QuantizationTestBase;
TEST_F(GetStableHloQuantConstraintsTest,
HasSameOperandAndResultTypeRequirementSucceeds) {
constexpr absl::string_view kModuleGather = R"mlir(
module {
func.func @main() -> (tensor<2x3x2x2xf32>) {
%0 = stablehlo.constant dense<1.0> : tensor<3x4x2xf32>
%1 = stablehlo.constant dense<2> : tensor<2x3x2xi64>
%2 = "stablehlo.gather"(%0, %1) {
dimension_numbers = #stablehlo.gather<
offset_dims = [2, 3],
collapsed_slice_dims = [0],
start_index_map = [1, 0],
index_vector_dim = 2>,
slice_sizes = array<i64: 1, 2, 2>,
indices_are_sorted = false
} : (tensor<3x4x2xf32>, tensor<2x3x2xi64>) -> tensor<2x3x2x2xf32>
func.return %2 : tensor<2x3x2x2xf32>
}
}
)mlir";
OwningOpRef<ModuleOp> module_op = ParseModuleOpString(kModuleGather);
ASSERT_TRUE(module_op);
func::FuncOp main_fn = FindMainFuncOp(*module_op);
ASSERT_THAT(main_fn, NotNull());
Operation* gather_op = FindOperationOfType<GatherOp>(main_fn);
const auto spec = GetStableHloQuantConstraints(gather_op);
EXPECT_THAT(spec, NotNull());
EXPECT_THAT(spec->has_same_operand_and_result_type_requirement, IsTrue());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/quantization/stablehlo/ops/stablehlo_op_quant_spec_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
8515d29c-d1da-488d-9277-faa53268774a | cpp | tensorflow/tensorflow | dialect | tensorflow/core/ir/types/dialect.cc | tensorflow/core/ir/types/dialect_test.cc | #include "tensorflow/core/ir/types/dialect.h"
#include <cstdint>
#include <optional>
#include <string>
#include "absl/strings/escaping.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/TypeSwitch.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SMLoc.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Traits.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/DialectImplementation.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OpImplementation.h"
#include "mlir/IR/OperationSupport.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Support/LogicalResult.h"
#define GET_ATTRDEF_CLASSES
#include "tensorflow/core/ir/types/attributes.cc.inc"
#include "tensorflow/core/ir/types/attributes_enum.cc.inc"
#define GET_TYPEDEF_CLASSES
#include "tensorflow/core/ir/types/types.cc.inc"
#include "tensorflow/core/ir/types/dialect.cpp.inc"
namespace mlir {
namespace tf_type {
void TFTypeDialect::initialize() {
addAttributes<
#define GET_ATTRDEF_LIST
#include "tensorflow/core/ir/types/attributes.cc.inc"
>();
addTypes<ControlType, OpaqueTensorType,
#define HANDLE_TF_TYPE(tftype, enumerant, name) tftype##Type,
#define HANDLE_LAST_TF_TYPE(tftype, enumerant, name) tftype##Type
#include "tensorflow/core/ir/types/types.def"
>();
}
namespace {
template <typename TypeWithSubtype>
Type ParseTypeWithSubtype(MLIRContext* context, DialectAsmParser& parser) {
if (failed(parser.parseOptionalLess())) return TypeWithSubtype::get(context);
SmallVector<TensorType, 1> subtypes;
do {
TensorType tensor_ty;
if (parser.parseType(tensor_ty)) return Type();
if (!IsValidTFTensorType(tensor_ty)) {
parser.emitError(parser.getNameLoc()) << "invalid subtype: " << tensor_ty;
return Type();
}
subtypes.push_back(tensor_ty);
} while (succeeded(parser.parseOptionalComma()));
if (parser.parseGreater()) return Type();
return TypeWithSubtype::get(subtypes, context);
}
template <typename TypeWithSubtype>
void PrintTypeWithSubtype(StringRef type, TypeWithSubtype ty,
DialectAsmPrinter& os) {
os << type;
ArrayRef<TensorType> subtypes = ty.getSubtypes();
if (subtypes.empty()) return;
os << "<";
interleaveComma(subtypes, os);
os << ">";
}
Type ParseResourceType(MLIRContext* context, DialectAsmParser& parser) {
return ParseTypeWithSubtype<ResourceType>(context, parser);
}
void PrintResourceType(ResourceType ty, DialectAsmPrinter& os) {
return PrintTypeWithSubtype("resource", ty, os);
}
Type ParseVariantType(MLIRContext* context, DialectAsmParser& parser) {
return ParseTypeWithSubtype<VariantType>(context, parser);
}
void PrintVariantType(VariantType ty, DialectAsmPrinter& os) {
return PrintTypeWithSubtype("variant", ty, os);
}
}
Type TFTypeDialect::parseType(DialectAsmParser& parser) const {
StringRef type_tag;
llvm::SMLoc loc = parser.getNameLoc();
Type genType;
auto parse_result = generatedTypeParser(parser, &type_tag, genType);
if (parse_result.has_value()) return genType;
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (type_tag == name) return tftype##Type::get(getContext());
#define HANDLE_CUSTOM_TF_TYPE(tftype, enumerant, name)
#include "tensorflow/core/ir/types/types.def"
if (type_tag.starts_with("resource")) {
Type ret = ParseResourceType(getContext(), parser);
if (!ret) parser.emitError(loc, "invalid resource type");
return ret;
}
if (type_tag.starts_with("variant")) {
Type ret = ParseVariantType(getContext(), parser);
if (!ret) parser.emitError(loc, "invalid variant type");
return ret;
}
parser.emitError(parser.getNameLoc(),
"unknown type in TF graph dialect: " + type_tag);
return {};
}
void TFTypeDialect::printType(Type type, DialectAsmPrinter& printer) const {
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (auto derived_ty = type.dyn_cast<tftype##Type>()) { \
printer << name; \
return; \
}
#define HANDLE_CUSTOM_TF_TYPE(tftype, enumerant, name) \
if (auto derived_ty = type.dyn_cast<tftype##Type>()) { \
Print##tftype##Type(derived_ty, printer); \
return; \
}
#include "tensorflow/core/ir/types/types.def"
if (failed(generatedTypePrinter(type, printer)))
llvm::report_fatal_error("unexpected tensorflow graph type kind");
}
Attribute VersionAttr::parse(AsmParser& parser, Type) {
if (failed(parser.parseLess())) return {};
int32_t producer, min_consumer;
if (parser.parseKeyword("producer", " in tf_type version") ||
parser.parseEqual() || parser.parseInteger(producer) ||
parser.parseComma() ||
parser.parseKeyword("min_consumer", " in tf_type version") ||
parser.parseEqual() || parser.parseInteger(min_consumer))
return {};
SmallVector<int32_t, 4> bad_consumers;
if (!parser.parseOptionalComma()) {
if (parser.parseKeyword("bad_consumers", " in tf_type version") ||
parser.parseEqual() || parser.parseLSquare())
return {};
do {
int32_t bad_consumer;
if (parser.parseInteger(bad_consumer)) return {};
bad_consumers.push_back(bad_consumer);
} while (!parser.parseOptionalComma());
if (parser.parseRSquare()) return {};
}
if (failed(parser.parseGreater())) return {};
return VersionAttr::get(parser.getContext(), producer, min_consumer,
bad_consumers);
}
void VersionAttr::print(AsmPrinter& printer) const {
llvm::raw_ostream& os = printer.getStream();
os << "<producer = " << getProducer()
<< ", min_consumer = " << getMinConsumer();
ArrayRef<int32_t> badConsumers = getBadConsumers();
if (!badConsumers.empty()) {
os << ", bad_consumers = [";
llvm::interleaveComma(badConsumers, os);
os << "]";
}
os << ">";
}
FailureOr<FullTypeAttr> RawFullTypeAttrParser(AsmParser& parser) {
SmallVector<FullTypeAttr> args;
llvm::StringRef type_id_str;
if (failed(parser.parseKeyword(&type_id_str))) {
parser.emitError(
parser.getCurrentLocation(),
"failed to parse TFType_FullTypeAttr parameter keyword for "
"'type_id'");
return failure();
}
std::optional<FullTypeId> type_id = symbolizeFullTypeId(type_id_str);
if (!type_id) {
parser.emitError(parser.getCurrentLocation(),
"failed to parse TFType_FullTypeAttr parameter "
"'type_id'");
return failure();
}
if (parser.parseCommaSeparatedList(AsmParser::Delimiter::OptionalLessGreater,
[&]() {
FailureOr<tf_type::FullTypeAttr> arg =
RawFullTypeAttrParser(parser);
if (failed(arg)) return failure();
args.push_back(*arg);
return success();
}))
return failure();
Attribute attr;
parser.parseOptionalAttribute(attr);
return FullTypeAttr::get(
parser.getContext(),
mlir::IntegerAttr::get(mlir::IntegerType::get(parser.getContext(), 32),
static_cast<int32_t>(*type_id)),
args, attr);
}
Attribute FullTypeAttr::parse(AsmParser& parser, Type odsType) {
if (failed(parser.parseLess())) return {};
FailureOr<tf_type::FullTypeAttr> ret = RawFullTypeAttrParser(parser);
if (succeeded(ret) && failed(parser.parseGreater())) return {};
return ret.value_or(FullTypeAttr());
}
static void RawFullTypeAttrPrint(FullTypeAttr tfattr, AsmPrinter& printer) {
printer << stringifyFullTypeId(
tf_type::FullTypeId(tfattr.getTypeId().getInt()));
if (!tfattr.getArgs().empty()) {
printer << "<";
llvm::interleaveComma(tfattr.getArgs(), printer, [&](Attribute arg) {
if (auto t = mlir::dyn_cast<FullTypeAttr>(arg))
RawFullTypeAttrPrint(t, printer);
else
printer << "<<INVALID ARG>>";
});
printer << ">";
}
if (tfattr.getAttr()) {
printer << ' ';
printer.printStrippedAttrOrType(tfattr.getAttr());
}
}
void FullTypeAttr::print(AsmPrinter& printer) const {
printer << "<";
RawFullTypeAttrPrint(*this, printer);
printer << ">";
}
void FuncAttr::print(AsmPrinter& os) const {
if (getName().getRootReference().getValue().empty())
os << "<\"\", " << getAttrs() << ">";
else
os << "<" << getName() << ", " << getAttrs() << ">";
}
Attribute FuncAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
llvm::SMLoc loc = parser.getCurrentLocation();
Attribute name, dict;
if (failed(parser.parseAttribute(name))) {
parser.emitError(loc) << "expected symbol while parsing tf.func attribute";
return {};
}
if (auto func_name_str = mlir::dyn_cast<StringAttr>(name)) {
if (!func_name_str.getValue().empty()) {
parser.emitError(loc)
<< "expected empty string or symbol while parsing tf.func "
"attribute";
return {};
}
name = SymbolRefAttr::get(parser.getContext(), "");
}
if (!mlir::isa<SymbolRefAttr>(name)) {
parser.emitError(loc) << "expected symbol while parsing tf.func attribute";
return {};
}
if (failed(parser.parseComma())) return {};
loc = parser.getCurrentLocation();
if (failed(parser.parseAttribute(dict)) || !mlir::isa<DictionaryAttr>(dict)) {
parser.emitError(loc)
<< "expected Dictionary attribute while parsing tf.func attribute";
return {};
}
if (failed(parser.parseGreater())) return {};
return FuncAttr::get(parser.getContext(), mlir::cast<SymbolRefAttr>(name),
mlir::cast<DictionaryAttr>(dict));
}
void PlaceholderAttr::print(AsmPrinter& os) const {
os << "<" << StringAttr::get(getContext(), getValue()) << ">";
}
Attribute PlaceholderAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
std::string content;
if (failed(parser.parseOptionalString(&content))) {
parser.emitError(parser.getCurrentLocation())
<< "expected string while parsing tf.placeholder attribute";
return {};
}
if (failed(parser.parseGreater())) return {};
return PlaceholderAttr::get(parser.getContext(), content);
}
void ShapeAttr::print(AsmPrinter& os) const {
os << "<";
if (hasRank()) {
auto print_dim = [&](int64_t dim) {
if (dim != ShapedType::kDynamic)
os << dim;
else
os << "?";
};
llvm::interleave(getShape(), os, print_dim, "x");
} else {
os << "*";
}
os << ">";
}
Attribute ShapeAttr::parse(AsmParser& parser, Type type) {
if (failed(parser.parseLess())) return {};
if (succeeded(parser.parseOptionalStar())) {
if (failed(parser.parseGreater())) {
parser.emitError(parser.getCurrentLocation())
<< "expected `>` after `*` when parsing a tf.shape "
"attribute";
return {};
}
return ShapeAttr::get(parser.getContext(), std::nullopt);
}
SmallVector<int64_t> shape;
if (failed(parser.parseOptionalGreater())) {
auto parse_element = [&]() {
shape.emplace_back();
llvm::SMLoc loc = parser.getCurrentLocation();
if (succeeded(parser.parseOptionalQuestion())) {
shape.back() = ShapedType::kDynamic;
} else if (failed(parser.parseDecimalInteger(shape.back()))) {
parser.emitError(loc)
<< "expected an integer or `?` when parsing a tf.shape attribute";
return failure();
}
return success();
};
if (failed(parse_element())) return {};
while (failed(parser.parseOptionalGreater())) {
if (failed(parser.parseXInDimensionList()) || failed(parse_element()))
return {};
}
}
return ShapeAttr::get(parser.getContext(), llvm::ArrayRef(shape));
}
ShapeAttr ShapeAttr::get(MLIRContext* context,
std::optional<ArrayRef<int64_t>> shape) {
if (shape) return Base::get(context, *shape, false);
return Base::get(context, ArrayRef<int64_t>(), true);
}
ShapeAttr ShapeAttr::get(MLIRContext* context, ShapedType shaped_type) {
if (shaped_type.hasRank())
return Base::get(context, shaped_type.getShape(), false);
return Base::get(context, ArrayRef<int64_t>(), true);
}
std::optional<ArrayRef<int64_t>> ShapeAttr::getValue() const {
if (hasRank()) return getShape();
return std::nullopt;
}
bool ShapeAttr::hasRank() const { return !getImpl()->unranked; }
int64_t ShapeAttr::getRank() const {
assert(hasRank());
return getImpl()->shape.size();
}
bool ShapeAttr::hasStaticShape() const {
if (!hasRank()) return false;
for (auto dim : getShape()) {
if (dim < 0) return false;
}
return true;
}
namespace {
std::optional<ArrayRef<int64_t>> GetShape(Value value) {
auto shaped_type = mlir::cast<ShapedType>(value.getType());
if (shaped_type.hasRank()) return shaped_type.getShape();
return std::nullopt;
}
bool GetCastCompatibleShape(ArrayRef<int64_t> a_shape,
ArrayRef<int64_t> b_shape,
SmallVectorImpl<int64_t>* refined_shape) {
if (a_shape.size() != b_shape.size()) return false;
int64_t rank = a_shape.size();
refined_shape->reserve(rank);
for (auto dims : llvm::zip(a_shape, b_shape)) {
int64_t dim1 = std::get<0>(dims);
int64_t dim2 = std::get<1>(dims);
if (ShapedType::isDynamic(dim1)) {
refined_shape->push_back(dim2);
continue;
}
if (ShapedType::isDynamic(dim2)) {
refined_shape->push_back(dim1);
continue;
}
if (dim1 == dim2) {
refined_shape->push_back(dim1);
continue;
}
return false;
}
return true;
}
}
OperandShapeIterator::OperandShapeIterator(Operation::operand_iterator it)
: llvm::mapped_iterator<Operation::operand_iterator,
std::optional<ArrayRef<int64_t>> (*)(Value)>(
it, &GetShape) {}
ResultShapeIterator::ResultShapeIterator(Operation::result_iterator it)
: llvm::mapped_iterator<Operation::result_iterator,
std::optional<ArrayRef<int64_t>> (*)(Value)>(
it, &GetShape) {}
bool TensorFlowType::classof(Type type) {
return llvm::isa<TFTypeDialect>(type.getDialect());
}
bool TensorFlowRefType::classof(Type type) {
return mlir::isa<
#define HANDLE_TF_TYPE(tftype, enumerant, name)
#define HANDLE_TF_REF_TYPE(tftype, enumerant, name) tftype##Type,
#define HANDLE_LAST_TF_TYPE(tftype, enumerant, name) tftype##Type
#include "tensorflow/core/ir/types/types.def"
>(type);
}
TensorFlowType TensorFlowRefType::get(Type type) {
MLIRContext* ctx = type.getContext();
type = getElementTypeOrSelf(type);
if (type.isF16()) {
return HalfRefType::get(ctx);
} else if (type.isF32()) {
return FloatRefType::get(ctx);
} else if (type.isF64()) {
return DoubleRefType::get(ctx);
} else if (type.isBF16()) {
return Bfloat16RefType::get(ctx);
} else if (type.isFloat8E4M3FN()) {
return Float8E4M3FNRefType::get(ctx);
} else if (type.isFloat8E5M2()) {
return Float8E5M2RefType::get(ctx);
} else if (auto complex_type = mlir::dyn_cast<ComplexType>(type)) {
Type etype = complex_type.getElementType();
if (etype.isF32()) {
return Complex64RefType::get(ctx);
} else if (etype.isF64()) {
return Complex128RefType::get(ctx);
}
llvm_unreachable("unexpected complex type");
} else if (auto itype = mlir::dyn_cast<IntegerType>(type)) {
switch (itype.getWidth()) {
case 1:
return BoolRefType::get(ctx);
case 4:
return itype.isUnsigned() ? TensorFlowType(Uint4RefType::get(ctx))
: Int4RefType::get(ctx);
case 8:
return itype.isUnsigned() ? TensorFlowType(Uint8RefType::get(ctx))
: Int8RefType::get(ctx);
case 16:
return itype.isUnsigned() ? TensorFlowType(Uint16RefType::get(ctx))
: Int16RefType::get(ctx);
case 32:
return itype.isUnsigned() ? TensorFlowType(Uint32RefType::get(ctx))
: Int32RefType::get(ctx);
case 64:
return itype.isUnsigned() ? TensorFlowType(Uint64RefType::get(ctx))
: Int64RefType::get(ctx);
default:
llvm_unreachable("unexpected integer type");
}
}
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (auto derived_ty = type.dyn_cast<tftype##Type>()) \
return tftype##RefType::get(ctx);
#define HANDLE_TF_REF_TYPE(tftype, enumerant, name)
#include "tensorflow/core/ir/types/types.def"
llvm_unreachable("unexpected type kind");
}
Type TensorFlowRefType::RemoveRef() {
MLIRContext* ctx = getContext();
if (mlir::isa<HalfRefType>(*this)) return FloatType::getF16(ctx);
if (mlir::isa<FloatRefType>(*this)) return FloatType::getF32(ctx);
if (mlir::isa<DoubleRefType>(*this)) return FloatType::getF64(ctx);
if (mlir::isa<Bfloat16RefType>(*this)) return FloatType::getBF16(ctx);
if (mlir::isa<Float8E4M3FNType>(*this))
return FloatType::getFloat8E4M3FN(ctx);
if (mlir::isa<Float8E5M2Type>(*this)) return FloatType::getFloat8E5M2(ctx);
if (mlir::isa<BoolRefType>(*this)) return IntegerType::get(ctx, 1);
if (mlir::isa<Int4RefType>(*this))
return IntegerType::get(ctx, 4, IntegerType::Signed);
if (mlir::isa<Int8RefType>(*this)) return IntegerType::get(ctx, 8);
if (mlir::isa<Int16RefType>(*this)) return IntegerType::get(ctx, 16);
if (mlir::isa<Int32RefType>(*this)) return IntegerType::get(ctx, 32);
if (mlir::isa<Int64RefType>(*this)) return IntegerType::get(ctx, 64);
if (mlir::isa<Uint4RefType>(*this))
return IntegerType::get(ctx, 4, IntegerType::Unsigned);
if (mlir::isa<Uint8RefType>(*this))
return IntegerType::get(ctx, 8, IntegerType::Unsigned);
if (mlir::isa<Uint16RefType>(*this))
return IntegerType::get(ctx, 16, IntegerType::Unsigned);
if (mlir::isa<Uint32RefType>(*this))
return IntegerType::get(ctx, 32, IntegerType::Unsigned);
if (mlir::isa<Uint64RefType>(*this))
return IntegerType::get(ctx, 64, IntegerType::Unsigned);
if (mlir::isa<Complex64RefType>(*this))
return ComplexType::get(FloatType::getF32(ctx));
if (mlir::isa<Complex128RefType>(*this))
return ComplexType::get(FloatType::getF64(ctx));
#define HANDLE_TF_TYPE(tftype, enumerant, name) \
if (isa<tftype##RefType>()) return tftype##Type::get(ctx);
#define HANDLE_TF_REF_TYPE(tftype, enumerant, name)
#include "tensorflow/core/ir/types/types.def"
llvm_unreachable("unexpected tensorflow ref type kind");
}
bool TensorFlowTypeWithSubtype::classof(Type type) {
return mlir::isa<ResourceType, VariantType>(type);
}
Type TensorFlowTypeWithSubtype::RemoveSubtypes() {
MLIRContext* ctx = getContext();
if (mlir::isa<VariantType>(*this)) return VariantType::get(ctx);
if (mlir::isa<ResourceType>(*this)) return ResourceType::get(ctx);
llvm_unreachable("unexpected tensorflow type with subtypes kind");
}
TensorFlowTypeWithSubtype TensorFlowTypeWithSubtype::clone(
ArrayRef<TensorType> new_subtypes) {
MLIRContext* ctx = getContext();
if (mlir::isa<VariantType>(*this))
return mlir::cast<TensorFlowTypeWithSubtype>(
VariantType::get(new_subtypes, ctx));
if (mlir::isa<ResourceType>(*this))
return mlir::cast<TensorFlowTypeWithSubtype>(
ResourceType::get(new_subtypes, ctx));
llvm_unreachable("unexpected tensorflow type with subtypes kind");
}
ArrayRef<TensorType> TensorFlowTypeWithSubtype::GetSubtypes() {
if (auto variant_type = mlir::dyn_cast<VariantType>(*this))
return variant_type.getSubtypes();
if (auto resource_type = mlir::dyn_cast<ResourceType>(*this))
return resource_type.getSubtypes();
llvm_unreachable("unexpected tensorflow type with subtypes kind");
}
bool BroadcastCompatible(TypeRange lhs, TypeRange rhs) {
if (lhs.size() != rhs.size()) return false;
for (auto types : llvm::zip(lhs, rhs)) {
auto lhs_type = DropRefType(std::get<0>(types));
auto rhs_type = DropRefType(std::get<1>(types));
auto lhs_tt = mlir::dyn_cast<TensorType>(lhs_type);
auto rhs_tt = mlir::dyn_cast<TensorType>(rhs_type);
if (!lhs_tt || !rhs_tt) {
if (lhs_type != rhs_type) return false;
continue;
}
auto lhs_et = lhs_tt.getElementType();
auto rhs_et = rhs_tt.getElementType();
if (lhs_et != rhs_et) {
auto lhs_wst = mlir::dyn_cast<TensorFlowTypeWithSubtype>(lhs_et);
auto rhs_wst = mlir::dyn_cast<TensorFlowTypeWithSubtype>(rhs_et);
if (!lhs_wst || !rhs_wst) return false;
auto lhs_wst_st = lhs_wst.GetSubtypes();
auto rhs_wst_st = rhs_wst.GetSubtypes();
if (!lhs_wst_st.empty() && !rhs_wst_st.empty()) {
for (auto subtypes : llvm::zip(lhs_wst_st, rhs_wst_st)) {
if (!BroadcastCompatible(std::get<0>(subtypes),
std::get<1>(subtypes)))
return false;
}
}
}
auto lhs_rt = mlir::dyn_cast<RankedTensorType>(lhs_type);
auto rhs_rt = mlir::dyn_cast<RankedTensorType>(rhs_type);
if (!lhs_rt || !rhs_rt) return true;
SmallVector<int64_t, 4> shape;
return OpTrait::util::getBroadcastedShape(lhs_rt.getShape(),
rhs_rt.getShape(), shape);
}
return true;
}
Type GetCastCompatibleType(Type a, Type b, bool may_ignore_ref_type_a) {
if (a == b) return b;
auto a_tt = mlir::dyn_cast<TensorType>(a);
auto b_tt = mlir::dyn_cast<TensorType>(b);
if (static_cast<bool>(a_tt) ^ static_cast<bool>(b_tt)) return nullptr;
if (!a_tt && !b_tt) {
if (may_ignore_ref_type_a) {
if (auto ref_type = mlir::dyn_cast<TensorFlowRefType>(a)) {
a = ref_type.RemoveRef();
if (a == b) return a;
}
}
if (a.getTypeID() != b.getTypeID()) return nullptr;
auto a_wst = mlir::dyn_cast<TensorFlowTypeWithSubtype>(a);
auto b_wst = mlir::dyn_cast<TensorFlowTypeWithSubtype>(b);
if (!a_wst || !b_wst) return nullptr;
if (mlir::isa<VariantType>(a)) return a;
if (mlir::isa<VariantType>(b)) return b;
auto a_wst_st = a_wst.GetSubtypes();
auto b_wst_st = b_wst.GetSubtypes();
if (a_wst_st.empty()) return b;
if (b_wst_st.empty()) return a;
if (a_wst_st.size() != b_wst_st.size()) return nullptr;
SmallVector<TensorType, 4> refined_subtypes;
for (auto subtypes : llvm::zip(a_wst_st, b_wst_st)) {
Type refined_st =
GetCastCompatibleType(std::get<0>(subtypes), std::get<1>(subtypes),
false);
if (!refined_st) return nullptr;
refined_subtypes.push_back(mlir::cast<TensorType>(refined_st));
}
return ResourceType::get(refined_subtypes, a.getContext());
}
Type refined_element_ty = GetCastCompatibleType(
a_tt.getElementType(), b_tt.getElementType(), may_ignore_ref_type_a);
if (!refined_element_ty) return nullptr;
if (!a_tt.hasRank() && !b_tt.hasRank()) {
return UnrankedTensorType::get(refined_element_ty);
}
if (!a_tt.hasRank()) {
return RankedTensorType::get(b_tt.getShape(), refined_element_ty);
}
if (!b_tt.hasRank()) {
return RankedTensorType::get(a_tt.getShape(), refined_element_ty);
}
SmallVector<int64_t, 4> refined_shape;
if (!GetCastCompatibleShape(a_tt.getShape(), b_tt.getShape(), &refined_shape))
return nullptr;
return RankedTensorType::get(refined_shape, refined_element_ty);
}
bool HasCompatibleElementTypes(Type lhs, Type rhs,
bool may_ignore_ref_type_lhs) {
return GetCastCompatibleType(lhs, rhs, may_ignore_ref_type_lhs) != nullptr;
}
bool AreCastCompatible(TypeRange types) {
Type common = types.front();
for (auto type : types.drop_front()) {
Type refined_type =
GetCastCompatibleType(common, type, false);
if (!refined_type) return false;
common = refined_type;
}
return true;
}
bool ArraysAreCastCompatible(TypeRange lhs, TypeRange rhs) {
if (lhs.size() != rhs.size()) return false;
for (auto pair : llvm::zip(lhs, rhs)) {
auto lhs_i = std::get<0>(pair);
auto rhs_i = std::get<1>(pair);
if (!AreCastCompatible({lhs_i, rhs_i})) return false;
}
return true;
}
static Type GetDefaultTypeOf(TensorFlowRefType type) {
return type.RemoveRef();
}
template <typename ComposedType>
Type DropTypeHelper(Type ty) {
Type element_ty = getElementTypeOrSelf(ty);
auto composed_type = mlir::dyn_cast<ComposedType>(element_ty);
if (!composed_type) return ty;
Type default_ty = GetDefaultTypeOf(composed_type);
if (auto ranked_ty = mlir::dyn_cast<RankedTensorType>(ty)) {
return RankedTensorType::get(ranked_ty.getShape(), default_ty);
} else if (mlir::dyn_cast<UnrankedTensorType>(ty)) {
return UnrankedTensorType::get(default_ty);
} else {
return default_ty;
}
}
Type DropSubTypes(Type ty) {
return DropTypeHelper<TensorFlowTypeWithSubtype>(ty);
}
Type DropRefType(Type ty) { return DropTypeHelper<TensorFlowRefType>(ty); }
Type DropRefAndSubTypes(Type ty) { return DropRefType(DropSubTypes(ty)); }
Attribute TensorProtoAttr::parse(AsmParser& parser, Type type) {
if (parser.parseColon()) {
return nullptr;
}
std::string data;
if (parser.parseString(&data)) {
return nullptr;
}
if (data.size() < 2 || data.substr(0, 2) != "0x") {
parser.emitError(parser.getNameLoc(), "Hex string doesn't start with `0x`");
return nullptr;
}
auto shapedType = mlir::dyn_cast<ShapedType>(type);
if (!shapedType) return nullptr;
std::string bytes_data = absl::HexStringToBytes(data.substr(2));
return TensorProtoAttr::get(shapedType, bytes_data);
}
void TensorProtoAttr::print(mlir::AsmPrinter& printer) const {
StringRef bytes_str = getValue();
printer << " : \"0x" << llvm::toHex(bytes_str) << "\"";
}
}
} | #include "tensorflow/core/ir/types/dialect.h"
#include <cstdint>
#include <limits>
#include <gmock/gmock.h>
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Operation.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Support/LLVM.h"
#include "tensorflow/core/platform/test.h"
namespace mlir {
namespace tfg {
namespace {
TEST(TFTypesDialect, TestFuncAttrSubElement) {
const char *const code = R"mlir(
"test.op"() {func = #tf_type.func<@foo, {bar = @foo}>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
Builder b(&context);
StringAttr baz = b.getStringAttr("baz");
ASSERT_TRUE(succeeded(SymbolTable::replaceAllSymbolUses(
b.getStringAttr("foo"), baz, test_op.getParentRegion())));
auto func_attr = mlir::dyn_cast<tf_type::FuncAttr>(test_op.getAttr("func"));
ASSERT_TRUE(func_attr);
auto sym_ref = FlatSymbolRefAttr::get(baz);
EXPECT_TRUE(func_attr.getName() == sym_ref);
auto bar_ref = func_attr.getAttrs().get("bar");
EXPECT_TRUE(bar_ref == sym_ref);
}
TEST(TFTypesDialect, ParsesDimensionListWithZero) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<0x128>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(), testing::ElementsAre(0, 128));
}
TEST(TFTypesDialect, ParsesDimensionListWithQuestionMark) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<0x?x2>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(),
testing::ElementsAre(0, std::numeric_limits<int64_t>::min(), 2));
}
TEST(TFTypesDialect, ParsesDimensionListWithNegativeOne) {
const char *const code = R"mlir(
"test.op"() {shape = #tf_type.shape<0x-1x2>} : () -> ()
)mlir";
MLIRContext context;
context.allowUnregisteredDialects();
context.getOrLoadDialect<tf_type::TFTypeDialect>();
OwningOpRef<mlir::ModuleOp> module =
mlir::parseSourceString<mlir::ModuleOp>(code, &context);
Operation &test_op = module->front();
auto shape_attr =
mlir::dyn_cast<tf_type::ShapeAttr>(test_op.getAttr("shape"));
ASSERT_TRUE(shape_attr);
EXPECT_THAT(shape_attr.getShape(), testing::ElementsAre(0, -1, 2));
}
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/types/dialect.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/core/ir/types/dialect_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
9bda38a9-493d-4316-85e9-9599ccb99c9a | cpp | tensorflow/tensorflow | dynamic_padder | third_party/xla/xla/service/dynamic_padder.cc | third_party/xla/xla/service/dynamic_padder_test.cc | #include "xla/service/dynamic_padder.h"
#include <cstdint>
#include <functional>
#include <iterator>
#include <set>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_window_utils.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/tsl/lib/monitoring/gauge.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
auto* dynamic_padding_gauge = tsl::monitoring::Gauge<bool, 0>::New(
"/tensorflow/core/use_dynamic_padding_gauge",
"Tracks if dynamic padder is used.");
absl::StatusOr<HloInstruction*> ChooseIdentityValue(HloInstruction* inst,
int64_t operand_number) {
if (inst->IsElementwise()) {
return nullptr;
}
if (inst->opcode() == HloOpcode::kSelectAndScatter ||
inst->IsCustomCall("DynamicSelectAndScatterSamePadding")) {
if (operand_number == 1) {
return inst->mutable_operand(2);
}
TF_RET_CHECK(operand_number == 0);
HloComputation* select = inst->called_computations()[0];
if (Match(select->root_instruction(),
match::Compare(match::Parameter(), match::Parameter())
.WithComparisonDirection(ComparisonDirection::kGe))) {
return inst->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::MinValue(inst->operand(0)->shape().element_type())));
} else {
return Unimplemented(
"Only select and scatter with `max` as select function is "
"supported, got %s",
select->ToString());
}
}
switch (inst->opcode()) {
case HloOpcode::kReduce: {
auto* reduce = Cast<HloReduceInstruction>(inst);
TF_RET_CHECK(operand_number < reduce->input_count())
<< "Only data operand with dynamic dimension is valid.";
int64_t init_value_index = reduce->input_count() + operand_number;
return inst->mutable_operand(init_value_index);
}
case HloOpcode::kReduceWindow: {
auto* reduce_window = Cast<HloReduceWindowInstruction>(inst);
TF_RET_CHECK(operand_number < reduce_window->input_count())
<< "Only data operand with dynamic dimension is valid.";
int64_t init_value_index = reduce_window->input_count() + operand_number;
return inst->mutable_operand(init_value_index);
}
case HloOpcode::kConvolution:
case HloOpcode::kDot: {
PrimitiveType ptype = inst->operand(0)->shape().element_type();
return inst->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(ptype)));
}
case HloOpcode::kPad:
return inst->mutable_operand(1);
case HloOpcode::kScatter: {
if (operand_number != 1) {
return nullptr;
}
PrimitiveType indices_ptype =
inst->operand(operand_number)->shape().element_type();
return inst->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::MaxValue(indices_ptype)));
}
case HloOpcode::kParameter:
case HloOpcode::kGather:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGetDimensionSize:
case HloOpcode::kSetDimensionSize:
case HloOpcode::kConcatenate:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kTuple:
case HloOpcode::kAllReduce:
case HloOpcode::kReduceScatter:
case HloOpcode::kBroadcast:
case HloOpcode::kTranspose:
case HloOpcode::kSort:
case HloOpcode::kSlice:
case HloOpcode::kDomain:
return nullptr;
case HloOpcode::kCustomCall:
return nullptr;
default:
return UnimplementedStrCat("Unimplemented padding for instruction: ",
inst->ToString());
}
}
absl::StatusOr<bool> ReplaceGetSize(
HloInstruction* instr,
DynamicDimensionInference* dynamic_dimension_inference) {
if (instr->opcode() != HloOpcode::kGetDimensionSize) {
return false;
}
HloComputation* computation = instr->parent();
TF_ASSIGN_OR_RETURN(auto legal_shape,
ShapeInference::InferGetDimensionSizeShape(
instr->operand(0)->shape(), instr->dimension()));
TF_RET_CHECK(ShapeUtil::Equal(instr->shape(), legal_shape))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "legal_shape " << legal_shape.ToString();
TF_RET_CHECK(ShapeUtil::HasPrimitiveType(instr->shape(), S32));
HloInstruction* operand = instr->mutable_operand(0);
int64_t dim = instr->dimension();
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, dim);
if (dynamic_size != nullptr) {
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(dynamic_size));
dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(
instr, dynamic_size);
} else {
int32_t size = instr->operand(0)->shape().dimensions(dim);
HloInstruction* new_instr = computation->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(size)));
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(new_instr));
dynamic_dimension_inference->ReplaceAllDynamicDimensionUsesWith(instr,
new_instr);
}
return true;
}
absl::StatusOr<bool> ReplaceSetSize(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kSetDimensionSize) {
return false;
}
TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()(
instr->shape(), instr->operand(0)->shape()))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "instruction operand shape " << instr->operand(0)->shape();
HloInstruction* operand = instr->mutable_operand(0);
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand));
return true;
}
absl::StatusOr<bool> ReplaceSetBound(HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kCustomCall ||
instr->custom_call_target() != "SetBound") {
return false;
}
TF_RET_CHECK(Shape::Equal().IgnoreDynamicDimension()(
instr->shape(), instr->operand(0)->shape()))
<< "instr->shape() " << instr->shape().ToString() << " , "
<< "instruction operand shape " << instr->operand(0)->shape();
HloInstruction* operand = instr->mutable_operand(0);
TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(operand));
return true;
}
bool ShouldSkipPadOnOperand(
const HloInstruction* inst, int64_t operand_num, int64_t dimension,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
switch (inst->opcode()) {
case HloOpcode::kConvolution: {
if (operand_num == 0) {
if (dimension ==
inst->convolution_dimension_numbers().input_batch_dimension()) {
return true;
}
const auto& spatial_dims =
inst->convolution_dimension_numbers().input_spatial_dimensions();
for (int64_t spatial_dim = 0; spatial_dim < spatial_dims.size();
++spatial_dim) {
if (spatial_dims[spatial_dim] == dimension &&
inst->window().dimensions(spatial_dim).size() == 1) {
return true;
}
}
}
return operand_num == 1 &&
(dimension == inst->convolution_dimension_numbers()
.kernel_output_feature_dimension());
}
case HloOpcode::kDot: {
if (operand_num == 0) {
return !absl::c_linear_search(
inst->dot_dimension_numbers().lhs_contracting_dimensions(),
dimension);
}
return !absl::c_linear_search(
inst->dot_dimension_numbers().rhs_contracting_dimensions(),
dimension);
}
case HloOpcode::kReduce:
return !absl::c_linear_search(inst->dimensions(), dimension);
case HloOpcode::kSelectAndScatter:
case HloOpcode::kReduceWindow:
return inst->window().dimensions(dimension).size() == 1;
case HloOpcode::kAsyncStart:
if (!HloInstruction::IsThreadIncluded(inst->async_execution_thread(),
execution_threads)) {
return true;
}
return false;
default:
return false;
}
}
HloInstruction* PadWithScalar(HloInstruction* inst, int64_t dim,
HloInstruction* dynamic_size,
HloInstruction* padding_scalar) {
CHECK(inst != nullptr && dynamic_size != nullptr &&
padding_scalar != nullptr);
const Shape mask_shape =
ShapeUtil::MakeShape(xla::S32, inst->shape().dimensions());
const Shape pred_shape =
ShapeUtil::MakeShape(xla::PRED, inst->shape().dimensions());
HloInstruction* iota =
inst->AddInstruction(HloInstruction::CreateIota(mask_shape, dim));
HloInstruction* broadcasted_effective_size = inst->AddInstruction(
HloInstruction::CreateBroadcast(mask_shape, dynamic_size, {}));
HloInstruction* pred = inst->AddInstruction(HloInstruction::CreateCompare(
pred_shape, iota, broadcasted_effective_size, ComparisonDirection::kLt));
HloInstruction* broadcasted_identity_value =
inst->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::MakeStaticShape(inst->shape()), padding_scalar, {}));
HloInstruction* padded = inst->AddInstruction(HloInstruction::CreateTernary(
ShapeUtil::MakeStaticShape(inst->shape()), HloOpcode::kSelect, pred, inst,
broadcasted_identity_value));
return padded;
}
HloInstruction* GenerateBinaryMask(
HloInstruction* reshape, int64_t input_dim,
absl::Span<const int64_t> output_dims,
absl::Span<HloInstruction*> output_dynamic_dims, HloInstruction* one,
HloInstruction* zero, bool split_input) {
Shape input_shape =
split_input ? reshape->operand(0)->shape() : reshape->shape();
Shape output_shape =
split_input ? reshape->shape() : reshape->operand(0)->shape();
const Shape mask_input_shape =
ShapeUtil::MakeShape(xla::S32, {input_shape.dimensions(input_dim)});
const Shape pred_input_shape =
ShapeUtil::MakeShape(xla::PRED, {input_shape.dimensions(input_dim)});
HloInstruction* pred_true = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<bool>(true)));
HloInstruction* input_shape_pred_mask = reshape->AddInstruction(
HloInstruction::CreateBroadcast(pred_input_shape, pred_true, {}));
bool need_rewrite = false;
HloInstruction* iota =
reshape->AddInstruction(HloInstruction::CreateIota(mask_input_shape, 0));
for (int64_t i = 1; i < output_dims.size(); ++i) {
if (output_dynamic_dims[output_dims[i]] != nullptr) {
need_rewrite = true;
break;
}
}
if (!need_rewrite) {
return nullptr;
}
for (int64_t i = output_dims.size() - 1; i > 0; i--) {
const int64_t output_dim = output_dims[i];
HloInstruction* dynamic_size = output_dynamic_dims[output_dim];
HloInstruction* static_output_dim_size = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
output_shape.dimensions(output_dim))));
HloInstruction* broadcasted_static_output_dim_size =
reshape->AddInstruction(HloInstruction::CreateBroadcast(
mask_input_shape, static_output_dim_size, {}));
if (dynamic_size != nullptr) {
HloInstruction* dim_index =
reshape->AddInstruction(HloInstruction::CreateBinary(
mask_input_shape, HloOpcode::kRemainder, iota,
broadcasted_static_output_dim_size));
HloInstruction* broadcasted_effective_size = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, dynamic_size, {}));
HloInstruction* selected =
reshape->AddInstruction(HloInstruction::CreateCompare(
pred_input_shape, dim_index, broadcasted_effective_size,
ComparisonDirection::kLt));
input_shape_pred_mask = reshape->AddInstruction(
HloInstruction::CreateBinary(pred_input_shape, HloOpcode::kAnd,
input_shape_pred_mask, selected));
}
iota = reshape->AddInstruction(
HloInstruction::CreateBinary(mask_input_shape, HloOpcode::kDivide, iota,
broadcasted_static_output_dim_size));
}
HloInstruction* broadcasted_one = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, one, {}));
HloInstruction* broadcasted_zero = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, zero, {}));
return reshape->AddInstruction(HloInstruction::CreateTernary(
mask_input_shape, HloOpcode::kSelect, input_shape_pred_mask,
broadcasted_one, broadcasted_zero));
}
absl::StatusOr<bool> RewriteDynamicReshapeSplitInput(
HloInstruction* reshape, int64_t input_dim,
absl::Span<const int64_t> output_dims,
absl::Span<HloInstruction*> output_dynamic_dims,
DynamicDimensionInference* dynamic_dimension_inference) {
VLOG(2) << "Reshaping input dim " << input_dim << " to "
<< VectorString(output_dims);
const Shape operand_shape = reshape->operand(0)->shape();
TF_RET_CHECK(output_dims.size() > 1);
const Shape mask_input_shape =
ShapeUtil::MakeShape(xla::S32, {operand_shape.dimensions(input_dim)});
const Shape pred_input_shape =
ShapeUtil::MakeShape(xla::PRED, {operand_shape.dimensions(input_dim)});
HloInstruction* zero = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
HloInstruction* one = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
HloInstruction* input_shape_binary_mask =
GenerateBinaryMask(reshape, input_dim, output_dims, output_dynamic_dims,
one, zero, true);
if (input_shape_binary_mask == nullptr) {
VLOG(2) << "No need to rewrite";
return false;
}
auto embedded_builder = HloComputation::Builder("add");
{
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(S32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(S32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
}
HloComputation* add =
reshape->GetModule()->AddEmbeddedComputation(embedded_builder.Build());
Window cumsum_window;
WindowDimension* dim = cumsum_window.add_dimensions();
dim->set_size(operand_shape.dimensions(input_dim));
dim->set_stride(1);
dim->set_padding_low(operand_shape.dimensions(input_dim) - 1);
dim->set_padding_high(0);
dim->set_window_dilation(1);
dim->set_base_dilation(1);
HloInstruction* cumsum =
reshape->AddInstruction(HloInstruction::CreateReduceWindow(
mask_input_shape, input_shape_binary_mask, zero, cumsum_window, add));
HloInstruction* broadcast_ones = reshape->AddInstruction(
HloInstruction::CreateBroadcast(mask_input_shape, one, {}));
cumsum = reshape->AddInstruction(HloInstruction::CreateBinary(
mask_input_shape, HloOpcode::kSubtract, cumsum, broadcast_ones));
GatherDimensionNumbers gather_dim_numbers;
for (int64_t i = 0; i < operand_shape.dimensions_size(); ++i) {
if (i != input_dim) {
gather_dim_numbers.add_offset_dims(i);
}
}
gather_dim_numbers.add_start_index_map(input_dim);
gather_dim_numbers.set_index_vector_dim(1);
gather_dim_numbers.add_collapsed_slice_dims(input_dim);
HloInstruction* operand_static_dim_size =
reshape->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(operand_shape.dimensions(input_dim))));
HloInstruction* operand_static =
reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
operand_shape, reshape->mutable_operand(0), operand_static_dim_size,
input_dim));
std::vector<int64_t> slice_sizes(operand_shape.dimensions().begin(),
operand_shape.dimensions().end());
slice_sizes[input_dim] = 1;
HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather(
ShapeUtil::MakeShape(operand_shape.element_type(),
operand_shape.dimensions()),
operand_static, cumsum, gather_dim_numbers, slice_sizes, true));
TF_RETURN_IF_ERROR(reshape->ReplaceOperandWith(0, gather));
HloInstruction* reshape_dynamic = reshape;
auto users = reshape->users();
for (int64_t output_dim : output_dims) {
HloInstruction* output_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim);
if (output_dynamic_size != nullptr) {
reshape_dynamic =
reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
reshape->shape(), reshape_dynamic, output_dynamic_size,
output_dim));
}
}
for (auto* user : users) {
TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, reshape_dynamic));
}
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, reshape_dynamic, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicReshapeCombineInput(
HloInstruction* reshape, absl::Span<const int64_t> input_dims,
int64_t output_dim, absl::Span<HloInstruction*> input_dynamic_dims,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* zero = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
HloInstruction* one = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
const Shape output_shape = reshape->shape();
const Shape input_shape = reshape->operand(0)->shape();
const Shape mask_output_shape =
ShapeUtil::MakeShape(xla::S32, {output_shape.dimensions(output_dim)});
HloInstruction* output_shape_binary_mask =
GenerateBinaryMask(reshape, output_dim, input_dims, input_dynamic_dims,
one, zero, false);
if (output_shape_binary_mask == nullptr) {
VLOG(2) << "No need to rewrite";
return false;
}
HloInstruction* iota =
reshape->AddInstruction(HloInstruction::CreateIota(mask_output_shape, 0));
HloComputation::Builder comp_builder("compare");
HloInstruction* lhs_key =
comp_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeScalarShape(S32), "lhs_key"));
HloInstruction* rhs_key =
comp_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeScalarShape(S32), "rhs_key"));
comp_builder.AddInstruction(HloInstruction::CreateParameter(
2, ShapeUtil::MakeScalarShape(S32), "lhs_value"));
comp_builder.AddInstruction(HloInstruction::CreateParameter(
3, ShapeUtil::MakeScalarShape(S32), "rhs_value"));
comp_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), lhs_key,
rhs_key, ComparisonDirection::kGt));
HloComputation* compare =
reshape->GetModule()->AddEmbeddedComputation(comp_builder.Build());
HloInstruction* sort = reshape->AddInstruction(HloInstruction::CreateSort(
ShapeUtil::MakeTupleShape({mask_output_shape, mask_output_shape}), 0,
{output_shape_binary_mask, iota}, compare,
true));
HloInstruction* gather_indices = reshape->AddInstruction(
HloInstruction::CreateGetTupleElement(mask_output_shape, sort, 1));
GatherDimensionNumbers gather_dim_numbers;
for (int64_t i = 0; i < output_shape.dimensions_size(); ++i) {
if (i != output_dim) {
gather_dim_numbers.add_offset_dims(i);
}
}
gather_dim_numbers.add_start_index_map(output_dim);
gather_dim_numbers.set_index_vector_dim(1);
gather_dim_numbers.add_collapsed_slice_dims(output_dim);
HloInstruction* static_dim_size = reshape->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
reshape->shape().dimensions(output_dim))));
Shape reshape_static_shape = reshape->shape();
reshape_static_shape.set_dynamic_dimension(output_dim, false);
HloInstruction* reshape_static =
reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
reshape_static_shape, reshape, static_dim_size, output_dim));
std::vector<int64_t> gather_slice_sizes(output_shape.dimensions().begin(),
output_shape.dimensions().end());
gather_slice_sizes[output_dim] = 1;
HloInstruction* gather = reshape->AddInstruction(HloInstruction::CreateGather(
output_shape, reshape_static, gather_indices, gather_dim_numbers,
gather_slice_sizes, true));
HloInstruction* output_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reshape, {}, output_dim);
TF_RET_CHECK(output_dynamic_size != nullptr);
gather = reshape->AddInstruction(HloInstruction::CreateSetDimensionSize(
gather->shape(), gather, output_dynamic_size, output_dim));
auto users = reshape->users();
for (auto* user : users) {
if (user != reshape_static && user != output_dynamic_size) {
TF_RETURN_IF_ERROR(reshape->ReplaceUseWith(user, gather));
}
}
if (reshape == reshape->parent()->root_instruction()) {
reshape->parent()->set_root_instruction(gather);
}
TF_RETURN_IF_ERROR(
dynamic_dimension_inference->ForwardDynamicSize(reshape, gather, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicReshapeSingleGroup(
HloInstruction* reshape, absl::Span<const int64_t> input_dims,
absl::Span<const int64_t> output_dims,
absl::Span<HloInstruction*> input_dynamic_dims,
absl::Span<HloInstruction*> output_dynamic_dims,
DynamicDimensionInference* dynamic_dimension_inference) {
VLOG(2) << "Rewriting dynamic reshape " << reshape->ToString()
<< " input dims: " << VectorString(input_dims)
<< " output dims: " << VectorString(output_dims);
const Shape operand_shape = reshape->operand(0)->shape();
const Shape output_shape = reshape->shape();
if (input_dims.size() == 1) {
int64_t input_dim = input_dims[0];
if (operand_shape.dimensions()[input_dim] == 1) {
return false;
}
return RewriteDynamicReshapeSplitInput(reshape, input_dim, output_dims,
output_dynamic_dims,
dynamic_dimension_inference);
}
if (output_dims.size() == 1) {
int64_t output_dim = output_dims[0];
if (output_shape.dimensions()[output_dim] == 1) {
return false;
}
return RewriteDynamicReshapeCombineInput(reshape, input_dims, output_dim,
input_dynamic_dims,
dynamic_dimension_inference);
}
TF_RET_CHECK(false);
return false;
}
absl::StatusOr<bool> RewriteReverse(
HloInstruction* reverse,
DynamicDimensionInference* dynamic_dimension_inference) {
auto reverse_dims = reverse->dimensions();
const Shape& reverse_shape = reverse->shape();
std::set<int64_t> dynamic_reverse_dims;
for (int64_t reverse_dim : reverse_dims) {
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reverse, {}, reverse_dim);
if (dynamic_size == nullptr) {
continue;
}
dynamic_reverse_dims.insert(reverse_dim);
}
if (dynamic_reverse_dims.empty()) {
return false;
}
PaddingConfig padding;
Shape pad_shape = reverse_shape;
for (int i = 0; i < reverse_shape.rank(); ++i) {
auto dimension = padding.add_dimensions();
if (dynamic_reverse_dims.count(i) > 0) {
dimension->set_edge_padding_low(0);
dimension->set_edge_padding_high(reverse_shape.dimensions(i));
dimension->set_interior_padding(0);
pad_shape.set_dimensions(i, 2 * pad_shape.dimensions(i));
}
}
HloInstruction* cloned_reverse = reverse->AddInstruction(reverse->Clone());
HloInstruction* zero = reverse->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(pad_shape.element_type())));
HloInstruction* pad = reverse->AddInstruction(
HloInstruction::CreatePad(pad_shape, cloned_reverse, zero, padding));
std::vector<HloInstruction*> start_indices;
start_indices.reserve(reverse_shape.rank());
for (int i = 0; i < reverse_shape.rank(); ++i) {
if (dynamic_reverse_dims.count(i) > 0) {
HloInstruction* bound_size =
reverse->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(reverse_shape.dimensions(i))));
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reverse, {}, i);
HloInstruction* start_offset =
reverse->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kSubtract, bound_size,
dynamic_size));
start_indices.push_back(start_offset);
} else {
HloInstruction* zero = reverse->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
start_indices.push_back(zero);
}
}
HloInstruction* dynamic_reverse =
reverse->AddInstruction(HloInstruction::CreateDynamicSlice(
reverse_shape, pad, start_indices, reverse_shape.dimensions()));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reverse, dynamic_reverse, {}));
TF_RETURN_IF_ERROR(reverse->ReplaceAllUsesWith(dynamic_reverse));
return true;
}
HloInstruction* RewriteInputWithDynamicPadding(
HloInstruction* conv, HloInstruction* input, HloInstruction* padding_value,
absl::Span<HloInstruction*> padding_before, Window* input_window,
absl::FunctionRef<int64_t(int64_t)> window_dim_to_shape_dim) {
HloInstruction* zero_s32 = conv->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::Zero(S32)));
Shape padded_shape = input->shape();
PaddingConfig padding_configs;
for (int64_t i = 0; i < input->shape().rank(); ++i) {
PaddingConfig::PaddingConfigDimension padding_dim;
*padding_configs.add_dimensions() = padding_dim;
}
std::vector<HloInstruction*> start_indices(input->shape().rank(), zero_s32);
for (int64_t dim_index = 0; dim_index < input_window->dimensions_size();
++dim_index) {
if (padding_before[dim_index] == nullptr) {
continue;
}
int64_t shape_dim = window_dim_to_shape_dim(dim_index);
WindowDimension* window_dim = input_window->mutable_dimensions(dim_index);
auto* padding_dim = padding_configs.mutable_dimensions(shape_dim);
const int64_t dilated_window_size = window_util::DilatedBound(
window_dim->size(), window_dim->window_dilation());
padding_dim->set_edge_padding_low(dilated_window_size);
padding_dim->set_edge_padding_high(window_dim->padding_high() +
window_dim->padding_low());
padding_dim->set_interior_padding(window_dim->base_dilation() - 1);
HloInstruction* slicing_start =
conv->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kSubtract,
conv->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
padding_dim->edge_padding_low()))),
padding_before[dim_index]));
start_indices[shape_dim] = slicing_start;
padded_shape.mutable_dimensions()[shape_dim] =
window_dim->padding_low() +
window_util::DilatedBound(padded_shape.dimensions(shape_dim),
window_dim->base_dilation()) +
window_dim->padding_high();
window_dim->clear_padding_high();
window_dim->clear_padding_low();
window_dim->set_base_dilation(1);
input->mutable_shape()->set_dynamic_dimension(shape_dim, false);
}
HloInstruction* pad =
MakePadHlo(input, padding_value, padding_configs).value();
input = conv->AddInstruction(HloInstruction::CreateDynamicSlice(
padded_shape, pad, start_indices, padded_shape.dimensions()));
return input;
}
absl::StatusOr<bool> RewriteDynamicConvolutionInputGrad(
HloInstruction* custom_call_conv,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* grad = custom_call_conv->mutable_operand(1);
HloInstruction* kernel = custom_call_conv->mutable_operand(2);
TF_RET_CHECK(kernel->shape().is_static());
auto dnums = custom_call_conv->convolution_dimension_numbers();
Window window = custom_call_conv->window();
HloInstruction* zero =
custom_call_conv->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(custom_call_conv->shape().element_type())));
std::vector<HloInstruction*> padding_before(
dnums.input_spatial_dimensions_size(), nullptr);
for (int64_t spatial_dim_index = 0;
spatial_dim_index < dnums.input_spatial_dimensions_size();
++spatial_dim_index) {
int64_t input_spatial_dim =
dnums.input_spatial_dimensions(spatial_dim_index);
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(1), {}, input_spatial_dim);
if (operand_dynamic_size == nullptr) {
continue;
}
grad = PadWithScalar(grad, input_spatial_dim, operand_dynamic_size, zero);
HloInstruction* slice =
custom_call_conv->AddInstruction(HloInstruction::CreateSlice(
ShapeUtil::MakeShape(S32, {1}),
custom_call_conv->mutable_operand(0), {input_spatial_dim},
{input_spatial_dim + 1}, {1}));
HloInstruction* dynamic_input_size = custom_call_conv->AddInstruction(
HloInstruction::CreateReshape(ShapeUtil::MakeScalarShape(S32), slice));
const WindowDimension& window_dim = window.dimensions(spatial_dim_index);
DynamicWindowDims dynamic_window_dims = GetWindowedInputGradSize(
dynamic_input_size, window_dim.size(),
window_dim.window_dilation(),
window_dim.base_dilation(),
custom_call_conv->padding_type());
padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;
}
if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {
grad = RewriteInputWithDynamicPadding(
custom_call_conv, grad, zero, absl::MakeSpan(padding_before), &window,
[&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });
}
PrecisionConfig precision_config;
if (custom_call_conv->precision_config().operand_precision_size() == 3) {
*precision_config.mutable_operand_precision() = {
custom_call_conv->precision_config().operand_precision().begin() + 1,
custom_call_conv->precision_config().operand_precision().end()};
}
HloInstruction* static_conv =
custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(
custom_call_conv->shape(), grad, kernel,
custom_call_conv->feature_group_count(),
custom_call_conv->batch_group_count(), window,
custom_call_conv->convolution_dimension_numbers(),
custom_call_conv->precision_config()));
TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
custom_call_conv, static_conv, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicConvolutionForward(
HloInstruction* custom_call_conv,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* input = custom_call_conv->mutable_operand(0);
HloInstruction* kernel = custom_call_conv->mutable_operand(1);
Window window = custom_call_conv->window();
auto dnums = custom_call_conv->convolution_dimension_numbers();
HloInstruction* zero =
custom_call_conv->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(custom_call_conv->shape().element_type())));
std::vector<HloInstruction*> padding_before(
dnums.input_spatial_dimensions_size(), nullptr);
for (int64_t spatial_dim_index = 0;
spatial_dim_index < dnums.input_spatial_dimensions_size();
++spatial_dim_index) {
int64_t input_spatial_dim =
dnums.input_spatial_dimensions(spatial_dim_index);
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(0), {}, input_spatial_dim);
if (operand_dynamic_size == nullptr) {
continue;
}
input = PadWithScalar(input, input_spatial_dim, operand_dynamic_size, zero);
const WindowDimension& window_dim = window.dimensions(spatial_dim_index);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), custom_call_conv->padding_type());
padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;
}
const int64_t input_feature_dim = dnums.input_feature_dimension();
if (HloInstruction* input_feature_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(0), {}, input_feature_dim)) {
input = PadWithScalar(input, input_feature_dim, input_feature_dynamic_size,
zero);
}
if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {
input = RewriteInputWithDynamicPadding(
custom_call_conv, input, zero, absl::MakeSpan(padding_before), &window,
[&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });
}
HloInstruction* static_conv =
custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(
custom_call_conv->shape(), input, kernel,
custom_call_conv->feature_group_count(),
custom_call_conv->batch_group_count(), window,
custom_call_conv->convolution_dimension_numbers(),
custom_call_conv->precision_config()));
TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
custom_call_conv, static_conv, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicConvolutionKernelGrad(
HloInstruction* custom_call_conv,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* activations = custom_call_conv->mutable_operand(0);
HloInstruction* gradients = custom_call_conv->mutable_operand(1);
TF_RET_CHECK(dynamic_dimension_inference->HasDynamicDimension(activations));
TF_RET_CHECK(dynamic_dimension_inference->HasDynamicDimension(gradients));
Window window = custom_call_conv->window();
auto dnums = custom_call_conv->convolution_dimension_numbers();
HloInstruction* zero =
custom_call_conv->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(custom_call_conv->shape().element_type())));
std::vector<HloInstruction*> padding_before(
dnums.input_spatial_dimensions_size(), nullptr);
for (int64_t spatial_dim_index = 0;
spatial_dim_index < dnums.input_spatial_dimensions_size();
++spatial_dim_index) {
int64_t input_spatial_dim =
dnums.input_spatial_dimensions(spatial_dim_index);
int64_t kernel_spatial_dim =
dnums.kernel_spatial_dimensions(spatial_dim_index);
HloInstruction* activations_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(0), {}, input_spatial_dim);
if (activations_dynamic_size != nullptr) {
activations = PadWithScalar(activations, input_spatial_dim,
activations_dynamic_size, zero);
}
HloInstruction* gradients_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(1), {}, kernel_spatial_dim);
if (gradients_dynamic_size != nullptr) {
gradients = PadWithScalar(gradients, kernel_spatial_dim,
gradients_dynamic_size, zero);
}
if (activations_dynamic_size == nullptr ||
gradients_dynamic_size == nullptr) {
TF_RET_CHECK(activations_dynamic_size == nullptr &&
gradients_dynamic_size == nullptr);
continue;
}
int64_t output_spatial_dim =
dnums.output_spatial_dimensions(spatial_dim_index);
const WindowDimension& window_dim = window.dimensions(spatial_dim_index);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
activations_dynamic_size,
custom_call_conv->shape().dimensions(output_spatial_dim),
window_dim.stride(),
window_dim.window_dilation(),
custom_call_conv->padding_type());
padding_before[spatial_dim_index] = dynamic_window_dims.padding_before;
}
const int64_t input_feature_dim = dnums.input_feature_dimension();
if (HloInstruction* input_feature_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(
custom_call_conv->mutable_operand(0), {}, input_feature_dim)) {
activations = PadWithScalar(activations, input_feature_dim,
input_feature_dynamic_size, zero);
}
if (custom_call_conv->padding_type() == PaddingType::PADDING_SAME) {
activations = RewriteInputWithDynamicPadding(
custom_call_conv, activations, zero, absl::MakeSpan(padding_before),
&window,
[&](int64_t dim) { return dnums.input_spatial_dimensions(dim); });
}
HloInstruction* static_conv =
custom_call_conv->AddInstruction(HloInstruction::CreateConvolve(
custom_call_conv->shape(), activations, gradients,
custom_call_conv->feature_group_count(),
custom_call_conv->batch_group_count(), window,
custom_call_conv->convolution_dimension_numbers(),
custom_call_conv->precision_config()));
TF_RETURN_IF_ERROR(custom_call_conv->ReplaceAllUsesWith(static_conv));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
custom_call_conv, static_conv, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicReduceWindowSamePadding(
HloInstruction* hlo,
DynamicDimensionInference* dynamic_dimension_inference) {
if (hlo->shape().IsTuple()) {
return Unimplemented("DynamicReduceWindowSamePadding not yet supported.");
}
HloInstruction* input = hlo->mutable_operand(0);
HloInstruction* init = hlo->mutable_operand(1);
int64_t rank = hlo->shape().rank();
Window window = hlo->window();
std::vector<HloInstruction*> padding_before(hlo->shape().rank(), nullptr);
for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(0), {},
dim_index);
if (operand_dynamic_size == nullptr) {
continue;
}
const WindowDimension& window_dim = window.dimensions(dim_index);
if (window_util::IsTrivialWindowDimension(window_dim)) {
continue;
}
input = PadWithScalar(input, dim_index, operand_dynamic_size, init);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), PaddingType::PADDING_SAME);
padding_before[dim_index] = dynamic_window_dims.padding_before;
}
input = RewriteInputWithDynamicPadding(
hlo, input, init, absl::MakeSpan(padding_before), &window,
[](int64_t dim) { return dim; });
HloInstruction* rewritten =
hlo->AddInstruction(HloInstruction::CreateReduceWindow(
hlo->shape(), input, init, window, hlo->called_computations()[0]));
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rewritten));
TF_RETURN_IF_ERROR(
dynamic_dimension_inference->ForwardDynamicSize(hlo, rewritten, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicSelectAndScatterSamePadding(
HloInstruction* hlo,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* input = hlo->mutable_operand(0);
HloInstruction* source = hlo->mutable_operand(1);
HloInstruction* init = hlo->mutable_operand(2);
TF_ASSIGN_OR_RETURN(HloInstruction * input_padding_value,
ChooseIdentityValue(hlo, 0));
int64_t rank = hlo->shape().rank();
Window window = hlo->window();
std::vector<HloInstruction*> padding_before(hlo->shape().rank(), nullptr);
for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {
const WindowDimension& window_dim = window.dimensions(dim_index);
if (window_util::IsTrivialWindowDimension(window_dim)) {
continue;
}
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(0), {},
dim_index);
if (operand_dynamic_size == nullptr) {
continue;
}
input = PadWithScalar(input, dim_index, operand_dynamic_size,
input_padding_value);
HloInstruction* source_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(hlo->mutable_operand(1), {},
dim_index);
if (source_dynamic_size == nullptr) {
continue;
}
source = PadWithScalar(source, dim_index, source_dynamic_size, init);
DynamicWindowDims dynamic_window_dims = GetWindowedOutputSize(
operand_dynamic_size, window_dim.size(), window_dim.window_dilation(),
window_dim.stride(), PaddingType::PADDING_SAME);
padding_before[dim_index] = dynamic_window_dims.padding_before;
}
input = RewriteInputWithDynamicPadding(
hlo, input, input_padding_value, absl::MakeSpan(padding_before), &window,
[](int64_t dim) { return dim; });
HloInstruction* rewritten =
hlo->AddInstruction(HloInstruction::CreateSelectAndScatter(
input->shape(), input, hlo->called_computations()[0], window, source,
init, hlo->called_computations()[1]));
std::vector<HloInstruction*> start_indices(
input->shape().rank(), hlo->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::Zero(S32))));
PaddingConfig padding_configs;
for (int64_t dim_index = 0; dim_index < rank; ++dim_index) {
PaddingConfig::PaddingConfigDimension padding_dim;
if (padding_before[dim_index] != nullptr) {
const WindowDimension& window_dim = window.dimensions(dim_index);
const int64_t dilated_window_size = window_util::DilatedBound(
window_dim.size(), window_dim.window_dilation());
padding_dim.set_edge_padding_high(dilated_window_size);
start_indices[dim_index] = padding_before[dim_index];
}
*padding_configs.add_dimensions() = padding_dim;
}
HloInstruction* padded = MakePadHlo(rewritten, init, padding_configs).value();
rewritten = hlo->AddInstruction(HloInstruction::CreateDynamicSlice(
hlo->shape(), padded, start_indices, hlo->shape().dimensions()));
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWith(rewritten));
TF_RETURN_IF_ERROR(
dynamic_dimension_inference->ForwardDynamicSize(hlo, rewritten, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicConcat(
HloInstruction* concat,
DynamicDimensionInference* dynamic_dimension_inference) {
const int64_t concat_dim = concat->concatenate_dimension();
if (dynamic_dimension_inference->GetDynamicSize(concat, {}, concat_dim) ==
nullptr) {
return false;
}
std::vector<HloInstruction*> offsets;
offsets.reserve(concat->shape().dimensions_size());
for (int64_t i = 0; i < concat->shape().dimensions_size(); ++i) {
offsets.push_back(concat->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(0))));
}
HloInstruction* rewritten_concat = concat;
auto prev_users = concat->users();
for (int64_t i = 0; i < concat->operand_count(); ++i) {
HloInstruction* operand = concat->mutable_operand(i);
rewritten_concat =
concat->AddInstruction(HloInstruction::CreateDynamicUpdateSlice(
rewritten_concat->shape(), rewritten_concat, operand, offsets));
HloInstruction* dynamic_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, concat_dim);
if (dynamic_size == nullptr) {
HloInstruction* static_size = concat->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
operand->shape().dimensions(concat_dim))));
offsets[concat_dim] = concat->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kAdd, offsets[concat_dim],
static_size));
} else {
offsets[concat_dim] = concat->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(S32), HloOpcode::kAdd, offsets[concat_dim],
dynamic_size));
}
}
TF_RETURN_IF_ERROR(concat->ReplaceUsesWith(prev_users, rewritten_concat));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
concat, rewritten_concat, {}));
return true;
}
absl::StatusOr<bool> RewriteDynamicSort(
HloInstruction* hlo,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* dynamic_size = nullptr;
HloSortInstruction* sort = Cast<HloSortInstruction>(hlo);
int64_t sort_dim = sort->sort_dimension();
for (auto* operand : sort->operands()) {
if (dynamic_size == nullptr) {
dynamic_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, sort_dim);
}
}
if (dynamic_size == nullptr) {
return false;
}
Shape operand_shape =
ShapeUtil::ChangeElementType(sort->operand(0)->shape(), S32);
Shape broadcast_shape = ShapeUtil::MakeStaticShape(operand_shape);
HloInstruction* iota = hlo->AddInstruction(
HloInstruction::CreateIota(broadcast_shape, sort_dim));
HloInstruction* dynamic_size_broadcasted = hlo->AddInstruction(
HloInstruction::CreateBroadcast(broadcast_shape, dynamic_size, {}));
HloInstruction* lt = hlo->AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::ChangeElementType(broadcast_shape, PRED), iota,
dynamic_size_broadcasted, ComparisonDirection::kLt));
sort->AppendOperand(lt);
const int64_t param_number_before_rewritten =
sort->called_computations()[0]->num_parameters();
auto new_param_0 = HloInstruction::CreateParameter(
param_number_before_rewritten, ShapeUtil::MakeScalarShape(PRED),
"inbound_lhs");
auto new_param_1 = HloInstruction::CreateParameter(
param_number_before_rewritten + 1, ShapeUtil::MakeScalarShape(PRED),
"inbound_rhs");
std::vector<const HloInstruction*> extra_parameters{new_param_0.get(),
new_param_1.get()};
HloComputation* sort_comp = sort->GetModule()->AddEmbeddedComputation(
sort->called_computations()[0]->CloneWithReplacements(
nullptr, extra_parameters));
auto inbound_lhs =
sort_comp->parameter_instruction(param_number_before_rewritten);
auto inbound_rhs =
sort_comp->parameter_instruction(param_number_before_rewritten + 1);
sort->ReplaceCalledComputations(
[&](HloComputation* comp) { return sort_comp; });
auto out_of_bound_rhs = sort_comp->AddInstruction(HloInstruction::CreateUnary(
ShapeUtil::MakeScalarShape(PRED), HloOpcode::kNot, inbound_rhs));
auto sort_comp_or_out_of_bound_rhs =
sort_comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(PRED), HloOpcode::kOr,
sort_comp->root_instruction(), out_of_bound_rhs));
auto new_root = sort_comp->AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeScalarShape(PRED), HloOpcode::kAnd, inbound_lhs,
sort_comp_or_out_of_bound_rhs));
sort_comp->set_root_instruction(new_root);
if (sort->shape().IsTuple()) {
*sort->mutable_shape()->add_tuple_shapes() =
ShapeUtil::ChangeElementType(operand_shape, PRED);
} else {
auto sort_users = sort->users();
auto sort_clone = hlo->AddInstruction(sort->Clone());
*sort_clone->mutable_shape() = ShapeUtil::MakeTupleShape(
{sort->shape(), ShapeUtil::ChangeElementType(operand_shape, PRED)});
auto rewritten_sort = hlo->AddInstruction(
HloInstruction::CreateGetTupleElement(sort->shape(), sort_clone, 0));
for (HloInstruction* user : sort_users) {
TF_RETURN_IF_ERROR(sort->ReplaceUseWith(user, rewritten_sort));
}
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
sort, rewritten_sort, {}));
if (hlo->parent()->root_instruction() == sort) {
hlo->parent()->set_root_instruction(rewritten_sort);
}
}
return true;
}
absl::StatusOr<bool> RewriteDynamicBinaryOp(
HloInstruction* binary,
DynamicDimensionInference* dynamic_dimension_inference) {
HloInstruction* operand_0 = binary->mutable_operand(0);
HloInstruction* operand_1 = binary->mutable_operand(1);
TF_RET_CHECK(operand_0->shape().rank() == operand_1->shape().rank());
auto dims_0 = dynamic_dimension_inference->GetDynamicSizes(operand_0, {});
auto dims_1 = dynamic_dimension_inference->GetDynamicSizes(operand_1, {});
bool changed = false;
for (int64_t i = 0; i < dims_0.size(); ++i) {
HloInstruction* dim_0 = dims_0[i];
HloInstruction* dim_1 = dims_1[i];
if (dims_0[i] != dims_1[i] && dims_0[i] != nullptr &&
dims_1[i] != nullptr) {
changed = true;
auto rewrite_operand = [&](HloInstruction* pred,
HloInstruction* operand) -> HloInstruction* {
Shape static_shape = ShapeUtil::MakeStaticShape(operand->shape());
pred = binary->AddInstruction(HloInstruction::CreateBroadcast(
ShapeUtil::ChangeElementType(static_shape, PRED), pred, {}));
Shape slice_shape = static_shape;
slice_shape.set_dimensions(i, 1);
std::vector<int64_t> start_indices(slice_shape.rank(), 0);
std::vector<int64_t> strides(slice_shape.rank(), 1);
HloInstruction* slice = binary->AddInstruction(
HloInstruction::CreateSlice(slice_shape, operand, start_indices,
slice_shape.dimensions(), strides));
Shape reshape_shape = ShapeUtil::DeleteDimension(i, slice_shape);
HloInstruction* reshape = binary->AddInstruction(
HloInstruction::CreateReshape(reshape_shape, slice));
std::vector<int64_t> broadcast_dims;
broadcast_dims.reserve(static_shape.rank() - 1);
for (int64_t j = 0; j < static_shape.rank(); ++j) {
if (j != i) {
broadcast_dims.push_back(j);
}
}
HloInstruction* broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateBroadcast(static_shape, reshape,
broadcast_dims),
"implicit_broadcast");
HloInstruction* select =
binary->AddInstruction(HloInstruction::CreateTernary(
static_shape, HloOpcode::kSelect, pred, broadcast, operand));
return select;
};
HloInstruction* one = binary->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::One(S32)));
auto operand_0_needs_broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_0,
dim_1, ComparisonDirection::kLt),
"lhs_less_than_rhs");
auto is_one = binary->parent()->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_0,
one, ComparisonDirection::kEq),
"lhs_is_one");
operand_0_needs_broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}),
HloOpcode::kAnd, is_one,
operand_0_needs_broadcast),
"lhs_needs_implicit_broadcast");
operand_0 = rewrite_operand(operand_0_needs_broadcast, operand_0);
auto operand_1_needs_broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_1,
dim_0, ComparisonDirection::kLt),
"rhs_less_than_lhs");
is_one = binary->parent()->AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), dim_1,
one, ComparisonDirection::kEq),
"rhs_is_one");
operand_1_needs_broadcast = binary->parent()->AddInstruction(
HloInstruction::CreateBinary(ShapeUtil::MakeShape(PRED, {}),
HloOpcode::kAnd, is_one,
operand_1_needs_broadcast),
"lhs_needs_implicit_broadcast");
operand_1 = rewrite_operand(operand_1_needs_broadcast, operand_1);
}
}
if (changed) {
TF_RETURN_IF_ERROR(binary->ReplaceOperandWith(0, operand_0));
TF_RETURN_IF_ERROR(binary->ReplaceOperandWith(1, operand_1));
}
return changed;
}
absl::StatusOr<bool> RewriteDynamicUpdateSlice(
HloInstruction* hlo,
DynamicDimensionInference* dynamic_dimension_inference) {
HloDynamicUpdateSliceInstruction* dus =
Cast<HloDynamicUpdateSliceInstruction>(hlo);
HloInstruction* update = dus->mutable_operand(1);
HloInstruction* base = dus->mutable_operand(0);
std::vector<HloInstruction*> dynamic_dims_in_partial_update(
update->shape().rank(), nullptr);
bool needs_rewrite = false;
for (int64_t i = 0; i < update->shape().rank(); ++i) {
if (update->shape().dimensions(i) < base->shape().dimensions(i)) {
HloInstruction* dynamic_dim =
dynamic_dimension_inference->GetDynamicSize(update, {}, i);
if (dynamic_dim != nullptr) {
dynamic_dims_in_partial_update[i] = dynamic_dim;
needs_rewrite = true;
}
}
}
if (!needs_rewrite) {
return false;
}
std::vector<HloInstruction*> indices;
indices.reserve(dus->operand_count() - 2);
for (int64_t i = 2; i < dus->operand_count(); ++i) {
indices.push_back(dus->mutable_operand(i));
}
HloInstruction* base_slice =
dus->AddInstruction(HloInstruction::CreateDynamicSlice(
update->shape(), base, indices, update->shape().dimensions()));
for (int64_t i = 0; i < dynamic_dims_in_partial_update.size(); ++i) {
HloInstruction* dynamic_dim = dynamic_dims_in_partial_update[i];
if (dynamic_dim != nullptr) {
Shape mask_shape_int = ShapeUtil::ChangeElementType(update->shape(), S32);
Shape mask_shape_pred =
ShapeUtil::ChangeElementType(update->shape(), PRED);
HloInstruction* iota =
dus->AddInstruction(HloInstruction::CreateIota(mask_shape_int, i));
HloInstruction* broadcast_dim = dus->AddInstruction(
HloInstruction::CreateBroadcast(mask_shape_int, dynamic_dim, {}));
HloInstruction* pred = dus->AddInstruction(HloInstruction::CreateCompare(
mask_shape_pred, iota, broadcast_dim, ComparisonDirection::kLt));
update = dus->AddInstruction(HloInstruction::CreateTernary(
update->shape(), HloOpcode::kSelect, pred, update, base_slice));
}
}
TF_RETURN_IF_ERROR(dus->ReplaceOperandWith(1, update));
return true;
}
absl::StatusOr<bool> RewriteDynamicReshape(
HloInstruction* reshape,
DynamicDimensionInference* dynamic_dimension_inference) {
bool changed = false;
HloInstruction* operand = reshape->mutable_operand(0);
std::vector<HloInstruction*> input_dynamic_dims;
input_dynamic_dims.reserve(operand->shape().dimensions_size());
for (int64_t dim = 0; dim < operand->shape().dimensions_size(); ++dim) {
input_dynamic_dims.push_back(
dynamic_dimension_inference->GetDynamicSize(operand, {}, dim));
}
std::vector<HloInstruction*> output_dynamic_dims;
output_dynamic_dims.reserve(reshape->shape().dimensions_size());
for (int64_t dim = 0; dim < reshape->shape().dimensions_size(); ++dim) {
output_dynamic_dims.push_back(
dynamic_dimension_inference->GetDynamicSize(reshape, {}, dim));
}
auto common_factors = CommonFactors(operand->shape().dimensions(),
reshape->shape().dimensions());
bool need_flatten_unflatten = false;
auto is_dynamic_dimension = [&](int64_t dim) {
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference->GetDynamicSize(reshape, {}, dim);
return operand_dynamic_size != nullptr ||
reshape->shape().is_dynamic_dimension(dim);
};
auto should_skip_common_factor_group = [&](DimensionVector input_dims,
DimensionVector output_dims) {
if (input_dims.empty() || output_dims.empty()) {
return true;
}
if (absl::c_none_of(output_dims, is_dynamic_dimension)) {
VLOG(2) << "All dimensions are static in this common factor group";
return true;
}
if (input_dims.size() == 1 && output_dims.size() == 1) {
return true;
}
return false;
};
for (int64_t i = 0; i < common_factors.size() - 1; ++i) {
auto start = common_factors[i];
auto end = common_factors[i + 1];
DimensionVector input_dims;
DimensionVector output_dims;
for (int64_t dim = start.first; dim < end.first; ++dim) {
input_dims.push_back(dim);
}
for (int64_t dim = start.second; dim < end.second; ++dim) {
output_dims.push_back(dim);
}
if (should_skip_common_factor_group(input_dims, output_dims)) {
continue;
}
if (input_dims.size() > 1 && output_dims.size() > 1) {
need_flatten_unflatten = true;
break;
}
}
if (need_flatten_unflatten) {
VLOG(2) << "Rewrite dynamic reshape to flatten-unflatten pair. "
<< reshape->ToString();
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flattened_shape =
ShapeUtil::MakeShape(operand->shape().element_type(), {num_elements});
HloInstruction* flatten = operand->parent()->AddInstruction(
HloInstruction::CreateReshape(flattened_shape, operand),
absl::StrCat(reshape->name(), ".flatten"));
HloInstruction* dynamic_size =
operand->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(num_elements)));
for (int64_t i = 0; i < operand->shape().rank(); i++) {
HloInstruction* dynamic_dim_size =
dynamic_dimension_inference->GetDynamicSize(operand, {}, i);
if (dynamic_dim_size != nullptr) {
HloInstruction* static_dim_size = operand->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(
operand->shape().dimensions(i))));
dynamic_size = operand->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kDivide, dynamic_size,
static_dim_size));
dynamic_size = operand->AddInstruction(HloInstruction::CreateBinary(
dynamic_size->shape(), HloOpcode::kMultiply, dynamic_size,
dynamic_dim_size));
}
}
dynamic_dimension_inference->SetDynamicSize(flatten, {}, 0, dynamic_size);
Shape unflattened_shape = ShapeUtil::MakeStaticShape(reshape->shape());
HloInstruction* unflatten = reshape->parent()->AddInstruction(
HloInstruction::CreateReshape(unflattened_shape, flatten),
absl::StrCat(reshape->name(), ".unflatten"));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, unflatten, {}));
TF_ASSIGN_OR_RETURN(
bool changed_unused,
RewriteDynamicReshape(flatten, dynamic_dimension_inference));
TF_ASSIGN_OR_RETURN(
changed_unused,
RewriteDynamicReshape(unflatten, dynamic_dimension_inference));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, unflatten, {}));
TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(unflatten));
return true;
}
for (int64_t i = 0; i < common_factors.size() - 1; ++i) {
auto start = common_factors[i];
auto end = common_factors[i + 1];
DimensionVector input_dims;
DimensionVector output_dims;
for (int64_t dim = start.first; dim < end.first; ++dim) {
input_dims.push_back(dim);
}
for (int64_t dim = start.second; dim < end.second; ++dim) {
output_dims.push_back(dim);
}
VLOG(2) << "input_dims: " << VectorString(input_dims);
VLOG(2) << "output_dims: " << VectorString(output_dims);
if (should_skip_common_factor_group(input_dims, output_dims)) {
continue;
}
if (input_dims.size() > 1 && output_dims.size() > 1) {
return Internal(
"Should be handled by decomposing reshape into "
"flatten-unflatten pair. %s",
reshape->ToString());
}
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicReshapeSingleGroup(
reshape, input_dims, output_dims,
absl::MakeSpan(input_dynamic_dims),
absl::MakeSpan(output_dynamic_dims),
dynamic_dimension_inference));
changed |= c;
}
if (reshape->opcode() == HloOpcode::kDynamicReshape) {
auto* static_reshape =
reshape->AddInstruction(HloInstruction::CreateReshape(
reshape->shape(), reshape->mutable_operand(0)));
TF_RETURN_IF_ERROR(reshape->ReplaceAllUsesWith(static_reshape));
TF_RETURN_IF_ERROR(dynamic_dimension_inference->ForwardDynamicSize(
reshape, static_reshape, {}));
changed = true;
}
return changed;
}
class DynamicShapeRemovingVisitor : public DfsHloRewriteVisitor {
public:
explicit DynamicShapeRemovingVisitor(
const OpSupportsDynamismHandler& op_supports_dynamism_handler,
DynamicDimensionInference* dynamic_dimension_inference,
const absl::flat_hash_set<absl::string_view>& execution_threads)
: op_supports_dynamism_handler_(op_supports_dynamism_handler),
dynamic_dimension_inference_(dynamic_dimension_inference),
execution_threads_(execution_threads) {}
absl::Status DefaultAction(HloInstruction* hlo) override;
absl::Status HandleCustomCall(HloInstruction* hlo) override;
absl::Status HandleTuple(HloInstruction* hlo) override;
absl::Status HandleGetTupleElement(HloInstruction* hlo) override;
absl::Status HandleParameter(HloInstruction* hlo) override;
absl::Status HandleInfeed(HloInstruction* hlo) override;
absl::Status HandleAsyncStart(HloInstruction* hlo) override;
absl::Status HandleAsyncUpdate(HloInstruction* hlo) override;
absl::Status HandleAsyncDone(HloInstruction* hlo) override;
absl::Status HandleWhile(HloInstruction* hlo) override;
absl::Status HandleConditional(HloInstruction* hlo) override;
absl::Status HandleGetDimensionSize(HloInstruction* hlo) override;
absl::Status HandleSetDimensionSize(HloInstruction* hlo) override;
static absl::StatusOr<bool> Run(
HloComputation* computation,
const OpSupportsDynamismHandler& op_supports_dynamism_handler,
DynamicDimensionInference* dynamic_shape_inference,
const absl::flat_hash_set<absl::string_view>& execution_threads,
bool require_dynamic_output) {
DynamicShapeRemovingVisitor visitor(op_supports_dynamism_handler,
dynamic_shape_inference,
execution_threads);
TF_RETURN_IF_ERROR(computation->Accept(&visitor));
if (require_dynamic_output) {
HloInstruction* root = computation->root_instruction();
if (dynamic_shape_inference->HasDynamicDimension(root)) {
TF_ASSIGN_OR_RETURN(HloInstruction * new_root,
visitor.ConvertToDynamic(root));
computation->set_root_instruction(new_root);
}
}
return visitor.changed();
}
private:
absl::StatusOr<HloInstruction*> ConvertToDynamic(HloInstruction* inst);
absl::Status ConvertOperandsToDynamic(HloInstruction* inst);
const OpSupportsDynamismHandler& op_supports_dynamism_handler_;
DynamicDimensionInference* dynamic_dimension_inference_;
absl::flat_hash_set<absl::string_view> execution_threads_;
};
absl::StatusOr<HloInstruction*> DynamicShapeRemovingVisitor::ConvertToDynamic(
HloInstruction* inst) {
if (!dynamic_dimension_inference_->HasDynamicDimension(inst)) {
return absl::OkStatus();
}
MarkAsChanged();
Shape shape = dynamic_dimension_inference_->GetDynamicShape(inst);
auto gtes = TupleUtil::DisassembleTupleInstruction(inst);
gtes.ForEachMutableElement([&](const ShapeIndex& index,
HloInstruction** element) {
const Shape& subshape = ShapeUtil::GetSubshape(shape, index);
if (!subshape.IsArray()) {
return;
}
if (!dynamic_dimension_inference_->HasDynamicDimension(inst, index)) {
return;
}
std::vector<HloInstruction*> slice_operand;
slice_operand.push_back(*element);
for (int64_t i = 0; i < subshape.dimensions_size(); ++i) {
auto dimension_size =
dynamic_dimension_inference_->GetDynamicSize(inst, index, i);
if (dimension_size == nullptr) {
dimension_size = inst->AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR0<int32_t>(subshape.dimensions(i))));
}
slice_operand.push_back(dimension_size);
}
*element = inst->AddInstruction(HloInstruction::CreateCustomCall(
subshape, slice_operand, "SliceToDynamic"));
});
return TupleUtil::AssembleTupleInstruction(inst->parent(), std::move(gtes));
}
absl::Status DynamicShapeRemovingVisitor::ConvertOperandsToDynamic(
HloInstruction* inst) {
for (int64_t i = 0; i < inst->operand_count(); ++i) {
auto operand = inst->mutable_operand(i);
if (dynamic_dimension_inference_->HasDynamicDimension(operand)) {
TF_ASSIGN_OR_RETURN(auto dynamic_operand,
ConvertToDynamic(inst->mutable_operand(i)));
TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(i, dynamic_operand));
MarkAsChanged();
}
}
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::DefaultAction(HloInstruction* hlo) {
OpDynamismSupport op_support = OpDynamismSupport::kNoSupport;
if (op_supports_dynamism_handler_) {
op_support = op_supports_dynamism_handler_(hlo);
}
if (op_support == OpDynamismSupport::kRequired) {
VLOG(1) << "op doesn't support static tensor: " << hlo->ToString();
return ConvertOperandsToDynamic(hlo);
}
const bool input_is_dynamic = absl::c_any_of(
hlo->operands(),
[](const HloInstruction* hlo) { return hlo->shape().is_dynamic(); });
if (!input_is_dynamic) {
return absl::OkStatus();
}
TF_RET_CHECK(op_support != OpDynamismSupport::kNoSupport)
<< "Dynamic input unexpectedly found for unsupported instruction: "
<< hlo->ToString();
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleGetTupleElement(
HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleTuple(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleInfeed(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleParameter(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleCustomCall(
HloInstruction* hlo) {
if (hlo->custom_call_target() == "SliceToDynamic" ||
hlo->custom_call_target() == "PadToStatic") {
return absl::OkStatus();
}
return DefaultAction(hlo);
}
absl::Status DynamicShapeRemovingVisitor::HandleAsyncStart(
HloInstruction* hlo) {
if (HloInstruction::IsThreadIncluded(hlo->async_execution_thread(),
execution_threads_)) {
return absl::OkStatus();
}
return ConvertOperandsToDynamic(hlo);
}
absl::Status DynamicShapeRemovingVisitor::HandleAsyncUpdate(
HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleAsyncDone(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleWhile(HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleConditional(
HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleGetDimensionSize(
HloInstruction* hlo) {
return absl::OkStatus();
}
absl::Status DynamicShapeRemovingVisitor::HandleSetDimensionSize(
HloInstruction* hlo) {
*hlo->mutable_shape() = hlo->operand(0)->shape();
hlo->mutable_shape()->set_dynamic_dimension(hlo->dimension(), false);
return absl::OkStatus();
}
}
absl::StatusOr<bool> DynamicPadder::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(2) << "Pre DynamicPadder HLO:";
XLA_VLOG_LINES(2, module->ToString());
HloDCE dce;
TF_ASSIGN_OR_RETURN(bool changed, dce.Run(module, execution_threads));
TF_ASSIGN_OR_RETURN(
DynamicDimensionInference dynamic_dimension_inference,
DynamicDimensionInference::Run(
module, options_.op_supports_dynamism_handler,
options_.custom_call_handler, options_.shape_check_mode,
options_.assertion_generator, execution_threads));
changed |= dynamic_dimension_inference.changed();
std::vector<HloComputation*> computations =
module->MakeComputationPostOrder(execution_threads);
for (HloComputation* computation : computations) {
for (HloInstruction* inst : computation->MakeInstructionPostOrder()) {
OpDynamismSupport has_dynamism_support = OpDynamismSupport::kNoSupport;
if (options_.op_supports_dynamism_handler != nullptr) {
has_dynamism_support = options_.op_supports_dynamism_handler(inst);
}
if (has_dynamism_support != OpDynamismSupport::kNoSupport) {
continue;
}
if (inst->opcode() == HloOpcode::kConcatenate) {
TF_ASSIGN_OR_RETURN(
bool c, RewriteDynamicConcat(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->opcode() == HloOpcode::kReverse) {
TF_ASSIGN_OR_RETURN(bool c,
RewriteReverse(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->opcode() == HloOpcode::kSort) {
TF_ASSIGN_OR_RETURN(
bool c, RewriteDynamicSort(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->opcode() == HloOpcode::kReshape ||
inst->opcode() == HloOpcode::kDynamicReshape) {
TF_ASSIGN_OR_RETURN(
bool c, RewriteDynamicReshape(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsElementwiseBinary()) {
TF_ASSIGN_OR_RETURN(
bool c, RewriteDynamicBinaryOp(inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->opcode() == HloOpcode::kDynamicUpdateSlice) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicUpdateSlice(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicConvolutionInputGrad")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionInputGrad(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicConvolutionForward")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionForward(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicConvolutionKernelGrad")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicConvolutionKernelGrad(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicReduceWindowSamePadding")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicReduceWindowSamePadding(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
if (inst->IsCustomCall("DynamicSelectAndScatterSamePadding")) {
TF_ASSIGN_OR_RETURN(bool c, RewriteDynamicSelectAndScatterSamePadding(
inst, &dynamic_dimension_inference));
changed |= c;
continue;
}
for (int64_t operand_num = 0; operand_num < inst->operand_count();
++operand_num) {
HloInstruction* original_operand = inst->mutable_operand(operand_num);
HloInstruction* operand = original_operand;
if (!operand->shape().IsArray()) {
continue;
}
for (int64_t input_dim = 0; input_dim < operand->shape().rank();
++input_dim) {
HloInstruction* operand_dynamic_size =
dynamic_dimension_inference.GetDynamicSize(original_operand, {},
input_dim);
if (operand_dynamic_size == nullptr) {
continue;
}
VLOG(2) << "Has dynamic dimension of operand" << operand_num << " @"
<< input_dim;
if (ShouldSkipPadOnOperand(inst, operand_num, input_dim,
execution_threads)) {
continue;
}
TF_ASSIGN_OR_RETURN(HloInstruction * identity_value,
ChooseIdentityValue(inst, operand_num));
if (identity_value == nullptr) {
continue;
}
HloInstruction* padded = PadWithScalar(
operand, input_dim, operand_dynamic_size, identity_value);
TF_RETURN_IF_ERROR(inst->ReplaceOperandWith(operand_num, padded));
operand = inst->mutable_operand(operand_num);
changed = true;
}
}
}
}
auto call_graph = CallGraph::Build(module, execution_threads);
computations = module->MakeComputationPostOrder(execution_threads);
for (auto it = computations.rbegin(); it != computations.rend(); ++it) {
HloComputation* computation = *it;
if (!call_graph->CanReach(module->entry_computation(), computation)) {
continue;
}
bool require_dynamic_output = options_.slice_dynamic_output &&
computation == module->entry_computation();
changed |= require_dynamic_output;
TF_ASSIGN_OR_RETURN(bool c,
DynamicShapeRemovingVisitor::Run(
computation, options_.op_supports_dynamism_handler,
&dynamic_dimension_inference, execution_threads,
require_dynamic_output));
changed |= c;
}
if (changed) {
dynamic_padding_gauge->GetCell()->Set(changed);
module->set_is_dynamic(true);
}
for (auto* computation : module->computations(execution_threads)) {
if (!call_graph->CanReach(module->entry_computation(), computation)) {
continue;
}
for (auto instruction : computation->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(
bool c, ReplaceGetSize(instruction, &dynamic_dimension_inference));
changed |= c;
}
}
for (auto* computation : module->computations(execution_threads)) {
if (!call_graph->CanReach(module->entry_computation(), computation)) {
continue;
}
for (auto instruction : computation->MakeInstructionPostOrder()) {
TF_ASSIGN_OR_RETURN(bool c, ReplaceSetSize(instruction));
changed |= c;
TF_ASSIGN_OR_RETURN(c, ReplaceSetBound(instruction));
changed |= c;
}
}
if (changed) {
HloDCE dce;
TF_ASSIGN_OR_RETURN(bool c, dce.Run(module, execution_threads));
changed |= c;
}
VLOG(2) << "Post DynamicPadder HLO:";
XLA_VLOG_LINES(2, module->ToString());
return changed;
}
} | #include "xla/service/dynamic_padder.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_replace.h"
#include "absl/types/span.h"
#include "xla/error_spec.h"
#include "xla/hlo/builder/xla_builder.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/dynamic_dimension_simplifier.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/llvm_irgen_test_base.h"
#include "xla/tests/test_macros.h"
#include "xla/tsl/lib/core/status_test_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test_benchmark.h"
#include "tsl/protobuf/error_codes.pb.h"
namespace xla {
namespace {
namespace m = ::xla::match;
namespace op = xla::testing::opcode_matchers;
OpDynamismSupport OpHasDynamismSupport(HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kCustomCall) {
return OpDynamismSupport::kNoSupport;
}
if (hlo->custom_call_target() == "OpWithDynamicLowering") {
return OpDynamismSupport::kRequired;
}
return OpDynamismSupport::kNoSupport;
}
absl::Status CustomCallDynamicDimensionInference(
HloInstruction* hlo, DynamicDimensionInference* inferencer) {
if (hlo->custom_call_target() == "OpWithDynamicLowering") {
if (hlo->shape().IsTuple()) {
HloInstruction* dynamic_size =
inferencer->GetDynamicSize(hlo->mutable_operand(0), {1}, 0);
inferencer->SetDynamicSize(hlo, {1}, 0, dynamic_size);
} else {
HloInstruction* dynamic_size =
inferencer->GetDynamicSize(hlo->mutable_operand(0), {}, 0);
inferencer->SetDynamicSize(hlo, {}, 0, dynamic_size);
}
}
return absl::OkStatus();
}
class DynamicPadderTest : public HloTestBase {
protected:
DynamicPadderTest() : HloTestBase() { module_ = CreateNewVerifiedModule(); }
std::unique_ptr<HloModule> GetHloModule(const std::string& hlo_text) {
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(hlo_text).value();
return module;
}
absl::StatusOr<bool> RunPadder(
bool slice_dynamic_output = false,
OpSupportsDynamismHandler op_supports_dynamism_handler =
OpHasDynamismSupport,
DynamicDimensionInference::CustomCallInferenceHandler
custom_call_handler = CustomCallDynamicDimensionInference) {
DynamicPadderOptions options;
options.slice_dynamic_output = slice_dynamic_output;
options.op_supports_dynamism_handler =
std::move(op_supports_dynamism_handler);
options.custom_call_handler = std::move(custom_call_handler);
DynamicPadder padder(std::move(options));
TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&padder, module_.get()));
if (!changed) return false;
TupleSimplifier tuple_simplifier;
TF_RETURN_IF_ERROR(RunHloPass(&tuple_simplifier, module_.get()).status());
AlgebraicSimplifier alg_simplifier(AlgebraicSimplifierOptions{});
TF_RETURN_IF_ERROR(RunHloPass(&alg_simplifier, module_.get()).status());
return true;
}
void ExpectPadded(const HloInstruction* inst) {
EXPECT_THAT(inst,
op::Select(op::Lt(op::Iota(), op::Broadcast(op::Parameter())),
::testing::_, op::Broadcast()));
}
HloComputation* GetScalarAddComputation() {
auto embedded_builder = HloComputation::Builder("add");
auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {}), "lhs"));
auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {}), "rhs"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs));
return module_->AddEmbeddedComputation(embedded_builder.Build());
}
std::unique_ptr<HloModule> module_;
const Shape scalar_shape_ = ShapeUtil::MakeShape(S32, {});
};
class MemoryAlignmentTest : public HloTestBase {};
TEST_F(MemoryAlignmentTest, DISABLED_ON_CPU(TestDataTypeFP16)) {
const std::string hlo_text = R"(
HloModule TestDataTypeFP16
update_add (p0: f16[], p1: f16[]) -> f16[] {
p0 = f16[] parameter(0)
p1 = f16[] parameter(1)
ROOT out = f16[] add(p0, p1)
}
ENTRY main () -> f16[<=1,1] {
c1 = s32[1]{0} constant({1})
c2 = f16[1,1]{1,0} constant({ {0.099976} })
shape = s32[] reshape(s32[1]{0} c1)
dim_size = f16[<=1,1]{1,0} set-dimension-size(f16[1,1]{1,0} c2, s32[] shape),
dimensions={0}
ROOT out = f16[<=1,1]{1,0} scatter(f16[<=1,1]{1,0} dim_size, s32[1]{0} c1, f16[1,1]{1,0} c2),
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
to_apply=update_add
}
)";
EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5}));
}
TEST_F(DynamicPadderTest, ReduceTest) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {1, 2, 2});
auto reduce_shape = ShapeUtil::MakeShape(F32, {2});
auto dynamic_shape =
ShapeUtil::MakeShape(F32, {1, 2, 2}, {false, false, true});
auto data_param = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "data_param"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
data_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, data_param, size_param, 2));
auto negate = builder.AddInstruction(HloInstruction::CreateUnary(
dynamic_shape, HloOpcode::kNegate, data_param));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
auto reduce = builder.AddInstruction(HloInstruction::CreateReduce(
reduce_shape, negate, init, {0, 2}, GetScalarAddComputation()));
EXPECT_FALSE(module_->is_dynamic());
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
ExpectPadded(reduce->operand(0));
EXPECT_TRUE(module_->is_dynamic());
}
TEST_F(DynamicPadderTest, DynamicLoweringTest) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
custom-call.1 = s32[<=5] custom-call(param_padded),
custom_call_target="OpWithDynamicLowering"
custom-call.2 = s32[<=5] custom-call(custom-call.1),
custom_call_target="OpWithDynamicLowering"
ROOT negate = s32[<=5] negate(custom-call.2)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
auto custom_call_1 =
module_->entry_computation()->GetInstructionWithName("custom-call.1");
auto custom_call_2 =
module_->entry_computation()->GetInstructionWithName("custom-call.2");
HloInstruction* slice_to_dynamic = custom_call_1->mutable_operand(0);
ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(slice_to_dynamic->custom_call_target(), "SliceToDynamic");
ASSERT_EQ(custom_call_2->user_count(), 1);
HloInstruction* pad_to_static = custom_call_2->users()[0];
ASSERT_THAT(pad_to_static->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(pad_to_static->custom_call_target(), "PadToStatic");
slice_to_dynamic = module_->entry_computation()->root_instruction();
ASSERT_THAT(slice_to_dynamic->opcode(), HloOpcode::kCustomCall);
ASSERT_THAT(slice_to_dynamic->custom_call_target(), "SliceToDynamic");
}
TEST_F(DynamicPadderTest, DynamicLoweringTestTupleInput) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
tuple_arg = (s32[], s32[<=5]) tuple(const, param_padded)
custom-call.1 = (s32[], s32[<=5]) custom-call(tuple_arg),
custom_call_target="OpWithDynamicLowering"
custom-call.2 = (s32[], s32[<=5]) custom-call(custom-call.1),
custom_call_target="OpWithDynamicLowering"
data = s32[<=5]{0} get-tuple-element(custom-call.2), index=1
ROOT negate = s32[<=5] negate(data)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_THAT(root, op::CustomCall(
{"SliceToDynamic"}, op::Negate(),
op::GetTupleElement(op::CustomCall({"PadToStatic"}))));
HloInstruction* negate = root->mutable_operand(0);
EXPECT_THAT(
negate,
op::Negate(op::GetTupleElement(op::CustomCall(
{"PadToStatic"}, op::GetTupleElement(op::CustomCall(
{"OpWithDynamicLowering"}, ::testing::_))))));
auto custom_call_1 =
module_->entry_computation()->GetInstructionWithName("custom-call.1");
EXPECT_THAT(custom_call_1,
op::CustomCall({"OpWithDynamicLowering"},
op::Tuple(op::Constant(),
op::CustomCall({"SliceToDynamic"}))));
}
TEST_F(DynamicPadderTest, DynamicOutputNestedTuple) {
const std::string hlo_text = R"(
HloModule DynamicLowering
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
const2 = s32[] constant(4)
param_padded = s32[<=5] set-dimension-size(param, const),
dimensions={0}
tuple0 = (s32[], s32[<=5]) tuple(const, param_padded)
ROOT tuple1 = (s32[], (s32[], s32[<=5])) tuple(const2, tuple0)
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
TF_ASSERT_OK(TupleSimplifier().Run(module_.get()).status());
XLA_LOG_LINES(INFO, module_->ToString());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_THAT(root, op::Tuple(op::Constant(), op::Tuple()));
HloInstruction* nested_tuple = root->mutable_operand(1);
EXPECT_THAT(nested_tuple,
op::Tuple(op::Constant(), op::CustomCall({"SliceToDynamic"})));
}
TEST_F(DynamicPadderTest, ConvolutionTest) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim});
auto xy_shape_dynamic =
ShapeUtil::MakeShape(F32, {xdim, ydim}, {false, true});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
xy_shape_dynamic, a_param, size_param, 1));
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
ExpectPadded(conv->operand(0));
}
TEST_F(DynamicPadderTest, ConvolutionNoPad) {
auto builder = HloComputation::Builder(TestName());
constexpr int xdim = 3;
constexpr int ydim = 2;
constexpr int zdim = 1;
auto xy_shape = ShapeUtil::MakeShape(F32, {xdim, ydim});
auto yz_shape = ShapeUtil::MakeShape(F32, {ydim, zdim});
auto zx_shape = ShapeUtil::MakeShape(F32, {zdim, xdim}, {false, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {xdim, ydim}, {true, false});
auto* a_param = builder.AddInstruction(HloInstruction::CreateParameter(
0, xy_shape, "A"));
auto* b_param = builder.AddInstruction(HloInstruction::CreateParameter(
1, yz_shape, "B"));
auto* size_param = builder.AddInstruction(HloInstruction::CreateParameter(
2, scalar_shape_, "size_param"));
a_param = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, a_param, size_param, 0));
auto dnums = XlaBuilder::CreateDefaultConvDimensionNumbers(0);
dnums.set_kernel_input_feature_dimension(0);
dnums.set_kernel_output_feature_dimension(1);
dnums.set_input_batch_dimension(0);
dnums.set_output_batch_dimension(1);
dnums.set_output_feature_dimension(0);
Window window;
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
zx_shape, a_param, b_param, 1,
1, window, dnums,
HloTestBase::DefaultPrecisionConfig(2)));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
EXPECT_THAT(conv->operand(0), op::Parameter());
}
TEST_F(DynamicPadderTest, ReduceWindowNoPadForTrivialWindow) {
auto builder = HloComputation::Builder(TestName());
auto input_shape = ShapeUtil::MakeShape(F32, {4, 5});
auto reduce_shape = ShapeUtil::MakeShape(F32, {3, 5}, {false, true});
auto dynamic_shape = ShapeUtil::MakeShape(F32, {4, 5}, {false, true});
auto input = builder.AddInstruction(
HloInstruction::CreateParameter(0, input_shape, "input"));
auto* size_param = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape_, "size_param"));
input = builder.AddInstruction(HloInstruction::CreateSetDimensionSize(
dynamic_shape, input, size_param, 1));
auto init = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0)));
TF_ASSERT_OK_AND_ASSIGN(Window window, ParseWindow("size=2x1 pad=0_0x0_0"));
auto output = builder.AddInstruction(HloInstruction::CreateReduceWindow(
reduce_shape, input, init, window, GetScalarAddComputation()));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(RunPadder().status());
EXPECT_THAT(output->operand(0), op::Parameter());
}
TEST_F(DynamicPadderTest, VariadicReduceWindowNoPadForTrivialWindow) {
const std::string hlo_text = R"(
HloModule VariadicReduceWindowNoPadForTrivialWindow
add_f32 (a: f32[], b: s32[], c: f32[], d: s32[]) -> (f32[], s32[]) {
a = f32[] parameter(0)
b = s32[] parameter(1)
c = f32[] parameter(2)
d = s32[] parameter(3)
add.0 = f32[] add(a, c)
add.1 = s32[] add(b, d)
ROOT out = tuple(add.0, add.1)
}
ENTRY main {
input.0 = f32[4, 5] parameter(0)
input.1 = s32[4, 5] parameter(1)
size_param.0 = s32[] parameter(2)
size_param.1 = s32[] parameter(3)
input_dynamic.0 = f32[4,<=5] set-dimension-size(input.0, size_param.0), dimensions={1}
input_dynamic.1 = s32[4,<=5] set-dimension-size(input.1, size_param.0), dimensions={1}
init.0 = f32[] constant(0.0)
init.1 = s32[] constant(0)
ROOT output = (f32[3, <=5], s32[3, <=5]) reduce-window(input_dynamic.0, input_dynamic.1, init.0, init.1), window={size=2x1 pad=0_0x0_0}, to_apply=add_f32
}
)";
const int kNumParams = 2;
module_ = ParseAndReturnVerifiedModule(hlo_text).value();
TF_ASSERT_OK(RunPadder().status());
for (int i = 0; i < kNumParams; ++i) {
EXPECT_THAT(module_->entry_computation()->root_instruction()->operand(i),
op::Parameter());
}
}
TEST_F(DynamicPadderTest, PadS8ToS32Dot) {
const std::string hlo_text = R"(
HloModule test
ENTRY test {
a = s8[<=16,32] parameter(0)
b = s8[32,64] parameter(1)
ROOT root = s32[<=16,64] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
EXPECT_THAT(module_->entry_computation()->root_instruction(),
GmockMatch(m::CustomCall({"SliceToDynamic"},
m::Dot(m::Op().WithShape(S8, {16, 32}),
m::Op().WithShape(S8, {32, 64}))
.WithShape(S32, {16, 64}),
m::Op(), m::Op())));
}
TEST_F(DynamicPadderTest, PadToStaticForCustomCall) {
const std::string hlo_text = R"(
HloModule test
ENTRY test {
a = f32[64] parameter(0)
ROOT c = f32[<=128] custom-call(a),
custom_call_target="UnknownOp"
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
EXPECT_THAT(module_->entry_computation()->root_instruction(),
GmockMatch(m::CustomCall({"UnknownOp"})));
}
TEST_F(DynamicPadderTest, WhileLoopDynamicShapeChangeToStatic) {
const std::string hlo_text = R"(
HloModule WhileLoopDynamicShapeChangeToStatic
%cond_wrapper.19447 {
param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0)
%get-tuple-element.184 = s32[] get-tuple-element(param), index=0
%get-tuple-element.185 = s32[] get-tuple-element(param), index=1
ROOT %compare.28 = pred[] compare(s32[] %get-tuple-element.184, s32[] %get-tuple-element.185), direction=LT
}
%while_body_78894_grad_83711__.18882 {
param = (s32[], s32[], f32[], f32[<=32,216]{1,0}) parameter(0)
%get-tuple-element.184 = s32[] get-tuple-element(param), index=0
%get-tuple-element.185 = s32[] get-tuple-element(param), index=1
%add.1 = s32[] add(get-tuple-element.184, get-tuple-element.184)
%gte.2 = f32[] get-tuple-element(param), index=2
%broadcast.19389 = f32[32,216]{1,0} broadcast(f32[] %gte.2), dimensions={}
%constant.32 = s32[] constant(32)
%set-dimension-size = f32[<=32,216]{1,0} set-dimension-size(f32[32,216]{1,0} %broadcast.19389, s32[] %constant.32), dimensions={0}
ROOT tuple = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(add.1, %get-tuple-element.185, %gte.2, %set-dimension-size)
}
ENTRY main {
param = f32[] parameter(0)
param.1 = f32[<=32,216]{1,0} parameter(1)
const = s32[] constant(3)
const2 = s32[] constant(4)
%tuple.18877 = (s32[], s32[], f32[], f32[<=32,216]{1,0}) tuple(const, const2, param, param.1)
%while.19451 = (s32[], s32[], f32[], f32[<=32,216]{1,0})
while((s32[], s32[], f32[], f32[<=32,216]{1,0})
%tuple.18877), condition=%cond_wrapper.19447, body=%while_body_78894_grad_83711__.18882
ROOT result = f32[<=32,216]{1,0} get-tuple-element(while.19451), index=3
}
)";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
XLA_LOG_LINES(INFO, module_->ToString());
auto* root = module_->entry_computation()->root_instruction();
EXPECT_EQ(root->shape(), ShapeUtil::MakeShape(F32, {32, 216}, {true, false}));
HloInstruction* while_inst = nullptr;
for (HloInstruction* inst :
module_->entry_computation()->MakeInstructionPostOrder()) {
if (inst->opcode() == HloOpcode::kWhile) {
ASSERT_EQ(while_inst, nullptr)
<< "while_inst: " << while_inst->name() << ", inst: " << inst->name();
while_inst = inst;
}
}
EXPECT_EQ(while_inst->shape(),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeScalarShape(S32),
ShapeUtil::MakeScalarShape(S32),
ShapeUtil::MakeScalarShape(F32),
ShapeUtil::MakeShape(F32, {32, 216}),
ShapeUtil::MakeScalarShape(S32)}));
}
TEST_F(DynamicPadderTest, WhileLoopCarriesRequiredDynamicShape) {
const std::string hlo_text = R"(
HloModule WhileLoopCarriesRequiredDynamicShape
%cond {
param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0)
current = s32[] get-tuple-element(param), index=5
last = s32[] get-tuple-element(param), index=6
ROOT result = pred[] compare(current, last), direction=LT
}
%body {
param = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) parameter(0)
var = f32[1024] get-tuple-element(param), index=0
input0 = f32[<=64] get-tuple-element(param), index=1
grad0 = f32[32] get-tuple-element(param), index=2
input1 = f32[<=64] get-tuple-element(param), index=3
act1 = f32[32] get-tuple-element(param), index=4
grad1 = f32[32] custom-call(act1), custom_call_target="ComputeGradients"
var1 = f32[1024] custom-call(var, input0, grad0), custom_call_target="ApplyGradients", output_to_operand_aliasing={{}: (0, {})}
token2 = token[] get-tuple-element(param), index=7
infeed2 = (f32[<=64], token[]) infeed(token2)
input2 = f32[<=64] get-tuple-element(infeed2), index=0
act2 = f32[32] custom-call(var1, input2), custom_call_target="ComputeActivations"
current = s32[] get-tuple-element(param), index=5
constant1 = s32[] constant(1)
add = s32[] add(current, constant1)
last = s32[] get-tuple-element(param), index=6
token3 = token[] get-tuple-element(infeed2), index=1
ROOT result = (f32[1024], f32[<=64], f32[32], f32[<=64], f32[32], s32[], s32[], token[]) tuple(var1, input1, grad1, input2, act2, add, last, token3)
}
ENTRY main {
last = s32[] parameter(0)
var = f32[1024] parameter(1)
token0 = token[] after-all()
infeed0 = (f32[<=64], token[]) infeed(token0)
input0 = f32[<=64] get-tuple-element(infeed0), index=0
act0 = f32[32] custom-call(var, input0), custom_call_target="ComputeActivations"
grad0 = f32[32] custom-call(act0), custom_call_target="ComputeGradients"
token1 = token[] get-tuple-element(infeed0), index=1
infeed1 = (f32[<=64], token[]) infeed(token1)
input1 = f32[<=64] get-tuple-element(infeed1), index=0
act1 = f32[32] custom-call(var, input1), custom_call_target="ComputeActivations"
token2 = token[] get-tuple-element(infeed1), index=1
zero = s32[] constant(0)
tuple = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) tuple(var, input0, grad0, input1, act1, zero, last, token2)
while = (f32[1024], f32[<=64], f32[32]{0}, f32[<=64], f32[32]{0}, s32[], s32[], token[]) while(tuple), condition=%cond, body=%body
ROOT result = f32[1024] get-tuple-element(while), index=0
}
)";
module_ = GetHloModule(hlo_text);
auto op_supports_dynamism = [](HloInstruction* hlo) {
if (hlo->opcode() != HloOpcode::kCustomCall) {
return OpDynamismSupport::kNoSupport;
}
if (hlo->custom_call_target() == "ComputeActivations" ||
hlo->custom_call_target() == "ApplyGradients") {
return OpDynamismSupport::kRequired;
}
return OpDynamismSupport::kNoSupport;
};
auto custom_call_handler = [](HloInstruction* hlo,
DynamicDimensionInference* inference) {
return absl::OkStatus();
};
TF_ASSERT_OK(
RunPadder(
true,
std::move(op_supports_dynamism),
std::move(custom_call_handler))
.status());
XLA_VLOG_LINES(1, module_->ToString());
for (HloComputation* computation : module_->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kCustomCall) {
EXPECT_NE(instruction->custom_call_target(), "PadToStatic");
EXPECT_NE(instruction->custom_call_target(), "SliceToDynamic");
if (instruction->custom_call_target() == "ComputeActivations") {
EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic());
} else if (instruction->custom_call_target() == "ApplyGradients") {
EXPECT_TRUE(instruction->operand(1)->shape().is_dynamic());
}
} else if (instruction->opcode() == HloOpcode::kWhile) {
const Shape& shape = instruction->shape();
EXPECT_TRUE(shape.tuple_shapes(1).is_dynamic());
EXPECT_TRUE(shape.tuple_shapes(3).is_dynamic());
}
}
}
}
TEST_F(DynamicPadderTest, HandleReshapeCheckPastReshape) {
auto hlo_text = R"(
HloModule ReshapeDynamicDimension
ENTRY main {
p0 = f32[4,511,432]{2,1,0} parameter(0)
p1 = s32[] parameter(1)
p2 = f32[432,337]{1,0:T(8,128)} parameter(2)
p0_dynamic = f32[<=4,511,432] set-dimension-size(p0, p1), dimensions={0}
reshape.4179 = f32[<=2044,432]{1,0} reshape(p0_dynamic)
dot.4180 = f32[<=2044,337]{1,0} dot(reshape.4179, p2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
transpose.4181 = f32[<=2044,337]{1,0} transpose(dot.4180), dimensions={0,1}
ROOT reshape.4183 = f32[<=4,511,337]{2,1,0} reshape(transpose.4181)
})";
module_ = GetHloModule(hlo_text);
TF_ASSERT_OK(RunPadder(true).status());
VLOG(3) << module_->ToString();
CHECK(module_->is_dynamic());
CHECK(module_->entry_computation()
->root_instruction()
->shape()
.is_dynamic_dimension(0));
}
class ExecutionTest : public HloTestBase {
protected:
std::unique_ptr<HloModule> GetHloModule(const std::string& hlo_text) {
std::unique_ptr<HloModule> module =
ParseAndReturnVerifiedModule(hlo_text).value();
return module;
}
absl::StatusOr<Literal> PadAndExecute(std::unique_ptr<HloModule> module,
absl::Span<Literal* const> arguments,
bool slice_dynamic_output = true) {
if (!slice_dynamic_output) {
auto new_config = module->config();
new_config.mutable_entry_computation_layout()
->mutable_result_layout()
->ClearDynamicShape();
module->set_config(new_config);
}
DynamicPadderOptions options;
options.slice_dynamic_output = slice_dynamic_output;
DynamicPadder padder(options);
TF_CHECK_OK(padder.Run(module.get()).status());
HloDCE dce;
TF_CHECK_OK(dce.Run(module.get()).status());
return Execute(std::move(module), {arguments});
}
};
XLA_TEST_F(ExecutionTest, ScatterUpdate) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[3,3] parameter(0)
indices = s32[INDICES_BOUND] parameter(1)
updates = s32[INDICES_BOUND,3] parameter(2)
dynamic_size = s32[] parameter(3)
indices_dynamic = s32[<=INDICES_BOUND] set-dimension-size(indices, dynamic_size), dimensions={0}
updates_dynamic = s32[<=INDICES_BOUND,3] set-dimension-size(updates, dynamic_size), dimensions={0}
ROOT scatter = s32[3,3] scatter(operand, indices_dynamic, updates_dynamic),
to_apply=update_s32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
const std::string hlo_text_not_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "2"}});
auto module_not_padded = GetHloModule(hlo_text_not_padded);
Literal operand =
LiteralUtil::CreateR2<int32_t>({{1, 2, 3}, {4, 5, 6}, {7, 8, 9}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<int32_t>({{10, 20, 30}, {70, 80, 90}});
Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(2);
Literal not_padded =
ExecuteAndTransfer(std::move(module_not_padded),
{&operand, &scatter_indices, &updates, &dynamic_size});
const std::string hlo_text_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "4"}});
auto module_padded = GetHloModule(hlo_text_padded);
Literal scatter_indices_padded = LiteralUtil::CreateR1<int32_t>({0, 2, 0, 4});
Literal updates_padded = LiteralUtil::CreateR2<int32_t>(
{{10, 20, 30}, {70, 80, 90}, {30, 22, 11}, {-1, 20, -1}});
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module_padded.get()).status());
TF_ASSERT_OK_AND_ASSIGN(Literal padded,
PadAndExecute(std::move(module_padded),
{&operand, &scatter_indices_padded,
&updates_padded, &dynamic_size}));
EXPECT_EQ(padded, not_padded);
}
XLA_TEST_F(ExecutionTest, ScatterUpdateWindowDim) {
const std::string hlo_text = R"(
HloModule ScatterUpdateWindowDim
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
operand = s32[1,2,3] parameter(0)
indices = s32[1] parameter(1)
updates = s32[2,3,1] parameter(2)
dynamic_size = s32[] constant(1)
operand_dynamic = s32[1, <=2, 3] set-dimension-size(operand, dynamic_size),
dimensions={1}
updates_dynamic = s32[<=2, 3, 1] set-dimension-size(updates, dynamic_size),
dimensions={0}
ROOT scatter = s32[1, <=2, 3] scatter(operand_dynamic, indices, updates_dynamic),
to_apply=update_s32,
update_window_dims={0, 1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
auto hlo_module = GetHloModule(hlo_text);
Literal operand = LiteralUtil::CreateR3<int32_t>({{{0, 0, 0}, {0, 0, 0}}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0});
Literal updates =
LiteralUtil::CreateR3<int32_t>({{{10}, {20}, {30}}, {{70}, {80}, {90}}});
TF_ASSERT_OK_AND_ASSIGN(
Literal padded,
PadAndExecute(std::move(hlo_module),
{&operand, &scatter_indices, &updates}, false));
Literal expected =
LiteralUtil::CreateR3<int32_t>({{{10, 20, 30}, {70, 80, 90}}});
EXPECT_EQ(padded, expected);
}
XLA_TEST_F(ExecutionTest, ScatterUpdateF32) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_f32 (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
ROOT rhs = f32[] parameter(1)
}
ENTRY main {
operand = f32[3,3] parameter(0)
indices = s32[2] parameter(1)
updates = f32[2,3] parameter(2)
dynamic_size = s32[] parameter(3)
indices_dynamic = s32[<=2] set-dimension-size(indices, dynamic_size), dimensions={0}
updates_dynamic = f32[<=2,3] set-dimension-size(updates, dynamic_size), dimensions={0}
ROOT scatter = f32[3,3] scatter(operand, indices_dynamic, updates_dynamic),
to_apply=update_f32,
update_window_dims={1},
inserted_window_dims={0},
scatter_dims_to_operand_dims={0},
index_vector_dim=1
}
)";
auto module_not_padded = GetHloModule(hlo_text);
Literal operand = LiteralUtil::CreateR2<float>(
{{1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}});
Literal scatter_indices = LiteralUtil::CreateR1<int32_t>({0, 2});
Literal updates =
LiteralUtil::CreateR2<float>({{10.0, 20.0, 30.0}, {70.0, 80.0, 90.0}});
Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(1);
auto module_padded = GetHloModule(hlo_text);
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module_padded.get()).status());
TF_ASSERT_OK_AND_ASSIGN(
Literal not_padded,
PadAndExecute(std::move(module_padded),
{&operand, &scatter_indices, &updates, &dynamic_size}));
EXPECT_EQ(LiteralUtil::CreateR2<float>(
{{10.0, 20.0, 30.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0}}),
not_padded);
}
XLA_TEST_F(ExecutionTest, WholeDimensionGather) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[3, 2, 1] parameter(0)
size = s32[] constant(1)
param_padded = s32[3, <=2, 1] set-dimension-size(param, size), dimensions={1}
index = s32[] constant(1)
gather = s32[<=2,1]{1,0} gather(param_padded, index),
offset_dims={0,1},
collapsed_slice_dims={0},
start_index_map={0},
index_vector_dim=0,
slice_sizes={1,2,1}
init = s32[] constant(0)
ROOT reduce = s32[] reduce(gather, init),
dimensions={0, 1},
to_apply=update_s32
}
)";
Literal operand =
LiteralUtil::CreateR3<int32_t>({{{1}, {2}}, {{3}, {4}}, {{5}, {6}}});
auto module = GetHloModule(hlo_text);
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module.get()).status());
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(3);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, TwoDimensionReduce) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[INDICES_BOUND, INDICES_BOUND] parameter(0)
dynamic_size = s32[] parameter(1)
param_0 = s32[<=INDICES_BOUND,INDICES_BOUND] set-dimension-size(param, dynamic_size), dimensions={0}
param_1 = s32[<=INDICES_BOUND,INDICES_BOUND] set-dimension-size(param_0, dynamic_size), dimensions={1}
const = s32[] constant(0)
ROOT reduce = s32[] reduce(param_1, const),
dimensions={0, 1},
to_apply=update_s32
}
)";
const std::string hlo_text_not_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "2"}});
auto module_not_padded = GetHloModule(hlo_text_not_padded);
Literal operand = LiteralUtil::CreateR2<int32_t>({{1, 2}, {4, 5}});
Literal dynamic_size = LiteralUtil::CreateR0<int32_t>(2);
Literal not_padded = ExecuteAndTransfer(std::move(module_not_padded),
{&operand, &dynamic_size});
const std::string hlo_text_padded =
absl::StrReplaceAll(hlo_text, {{"INDICES_BOUND", "4"}});
auto module_padded = GetHloModule(hlo_text_padded);
Literal operand_padded = LiteralUtil::CreateR2<int32_t>(
{{1, 2, 3, 4}, {4, 5, 6, 7}, {1, 2, 3, 4}, {4, 5, 6, 7}});
DynamicPadder padder;
TF_CHECK_OK(padder.Run(module_padded.get()).status());
TF_ASSERT_OK_AND_ASSIGN(Literal padded,
PadAndExecute(std::move(module_padded),
{&operand_padded, &dynamic_size}));
EXPECT_EQ(padded, not_padded);
}
XLA_TEST_F(ExecutionTest, DynamicDimensionClamp) {
const std::string hlo_text = R"(
HloModule TensorFlowTenaryV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}
clamp = s32[<=5] clamp(param_padded, param_padded, param_padded)
init = s32[] constant(0)
ROOT reduce = s32[] reduce(clamp, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(6);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicConcat) {
const std::string hlo_text = R"(
HloModule DynamicConcat
ENTRY main {
param_0 = s32[3] parameter(0)
param_1 = s32[3] parameter(1)
param_2 = s32[3] parameter(2)
size = s32[] constant(2)
param_padded_0 = s32[<=3] set-dimension-size(param_0, size), dimensions={0}
param_padded_2 = s32[<=3] set-dimension-size(param_2, size), dimensions={0}
ROOT %concatenate = s32[<=9]
concatenate(s32[<=3] param_padded_0, s32[<=3] param_1, s32[<=3] param_padded_2),
dimensions={0}
}
)";
Literal operand_0 =
LiteralUtil::CreateR1<int32_t>({1, 2, -1});
Literal operand_1 =
LiteralUtil::CreateR1<int32_t>({3, 4, 5});
Literal operand_2 =
LiteralUtil::CreateR1<int32_t>({6, 7, -1});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
PadAndExecute(std::move(module), {&operand_0, &operand_1, &operand_2},
false));
result.SetDynamicSize(0, 7);
Literal expected = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5, 6, 7});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReverseSingleDim) {
const std::string hlo_text = R"(
HloModule DynamicConcat
ENTRY main {
param_0 = s32[3] parameter(0)
size = s32[] constant(2)
param_padded_0 = s32[<=3] set-dimension-size(param_0, size), dimensions={0}
ROOT %reverse = s32[<=3]
reverse(s32[<=3] param_padded_0),
dimensions={0}
}
)";
Literal operand_0 =
LiteralUtil::CreateR1<int32_t>({1, 2, -1});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(
Literal result, PadAndExecute(std::move(module), {&operand_0}, false));
result.SetDynamicSize(0, 2);
Literal expected = LiteralUtil::CreateR1<int32_t>({2, 1});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReverseMultiDims) {
const std::string hlo_text = R"(
HloModule DynamicConcat
ENTRY main {
param_0 = s32[3, 3] parameter(0)
size = s32[] constant(2)
param_padded_0 = s32[<=3, 3] set-dimension-size(param_0, size), dimensions={0}
param_padded_1 = s32[<=3, <=3] set-dimension-size(param_padded_0, size),
dimensions={1}
ROOT %reverse = s32[<=3, <=3]
reverse(s32[<=3, <=3] param_padded_1),
dimensions={0, 1}
}
)";
Literal operand_0 = LiteralUtil::CreateR2<int32_t>(
{{1, 2, -1}, {3, 4, -1}, {-1, -1, -1}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(
Literal result, PadAndExecute(std::move(module), {&operand_0}, false));
result.SetDynamicSize(0, 2);
result.SetDynamicSize(1, 2);
Literal expected = LiteralUtil::CreateR2<int32_t>({{4, 3}, {2, 1}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicDimensionReduce) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}
init = s32[] constant(0)
ROOT reduce = s32[] reduce(param_padded, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3, 4, 5});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(6);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, InputMinorDimensionReshape) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[1, 2, 5, 1] parameter(0)
const = s32[] constant(3)
param_padded = s32[1, 2, <=5, 1] set-dimension-size(param, const), dimensions={2}
reshaped = s32[<=10] reshape(param_padded)
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR4<int32_t>(
{{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(18);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, SliceSingleElement) {
const std::string hlo_text = R"(
HloModule Slicing
ENTRY main {
param = s32[5] parameter(0)
const = s32[] constant(3)
param_padded = s32[<=5] set-dimension-size(param, const), dimensions={0}
ROOT slice = s32[1]{0} slice(param_padded), slice={[0:1]}
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 1, 2, 3, 4});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR1<int32_t>({0});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshape) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[12] parameter(0)
const = s32[] constant(8)
param_padded = s32[<=12] set-dimension-size(param, const), dimensions={0}
reshaped = s32[2, <=3, 2] reshape(param_padded), inferred_dimension=1
init = s32[] constant(0)
ROOT reduce = s32[2, 2] reduce(reshaped, init),
dimensions={1},
to_apply=update_s32
}
)";
Literal operand =
LiteralUtil::CreateR1<int32_t>({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR2<int32_t>({{2, 4}, {10, 12}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshapeWithUnchangedDimMajor) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[2, 6] parameter(0)
const = s32[] constant(4)
param_padded = s32[2, <=6] set-dimension-size(param, const), dimensions={1}
reshaped = s32[2, 2, <=3] reshape(param_padded), inferred_dimension=2
init = s32[] constant(0)
ROOT reduce = s32[2, 2] reduce(reshaped, init),
dimensions={2},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR2<int32_t>(
{{0, 1, 2, 3, 4, 5}, {6, 7, 8, 9, 10, 11}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR2<int32_t>({{1, 5}, {13, 17}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, OutputMinorDimensionReshapeWithUnchangedDimMinor) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[6, 2] parameter(0)
const = s32[] constant(4)
param_padded = s32[<=6, 2] set-dimension-size(param, const), dimensions={0}
reshaped = s32[2, <=3, 2] reshape(param_padded), inferred_dimension=1
init = s32[] constant(0)
ROOT reduce = s32[2, 2] reduce(reshaped, init),
dimensions={1},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR2<int32_t>(
{{0, 1}, {2, 3}, {4, 5}, {6, 7}, {8, 9}, {10, 11}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR2<int32_t>({{2, 4}, {10, 12}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicInputFeature) {
const std::string hlo_text = R"(
HloModule DynamicInputFeature
ENTRY main {
param = f32[1, 1, 5] parameter(0)
const = s32[] constant(5)
one = f32[] constant(1)
kernel = f32[1,5,1]{2,1,0} broadcast(f32[] one), dimensions={}
param_dynamic = f32[1,1,<=5] set-dimension-size(param, const), dimensions={2}
ROOT conv = f32[1, 1, 1]{2,1,0} custom-call(f32[1, 1, <=5] param_dynamic, f32[1,<=5,1]{2,1,0} kernel),
window={size=1 pad=0_0},
dim_labels=b0f_0io->b0f,
padding_type=PADDING_VALID,
custom_call_target="DynamicConvolutionForward"
}
)";
Literal operand = LiteralUtil::CreateR3<float>({{{1, 2, 3, 4, 5}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR3<float>({{{15}}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(LlvmIrGenTestBase, LargeDynamicInput) {
#ifndef XLA_TEST_BACKEND_GPU
GTEST_SKIP();
#endif
const std::string hlo_text = R"(
HloModule LargeDynamicInput
add (lhs: f32[], rhs: f32[]) -> f32[] {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY main {
param = f32[<=20,<=20,<=20,<=20,<=20,<=20,<=20,<=20] parameter(0)
zero = f32[] constant(0)
ROOT out = reduce(param, zero), to_apply=add, dimensions={0,1,2,3,4,5,6,7}
}
)";
CompileAndVerifyIr(hlo_text, R"(
CHECK: ret void
)",
true);
}
XLA_TEST_F(ExecutionTest, DynamicDimensionReshapeUnchanged) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[1, 2, 5, 1] parameter(0)
const = s32[] constant(3)
param_padded = s32[1, 2, <=5, 1] set-dimension-size(param, const), dimensions={2}
reshaped = s32[2, <=5] reshape(param_padded)
init = s32[] constant(0)
ROOT reduce = s32[2] reduce(reshaped, init),
dimensions={1},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR4<int32_t>(
{{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR1<int32_t>({6, 12});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DegeneratedDimension) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[1, 2, 5, 1] parameter(0)
size = s32[] constant(0)
param_padded = s32[<=1, 2, 5, 1] set-dimension-size(param, size),
dimensions={0}
reshaped = s32[<=10] reshape(param_padded)
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR4<int32_t>(
{{{{1}, {2}, {3}, {4}, {5}}, {{2}, {4}, {6}, {7}, {8}}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(0);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, ReshapeSplitCombineSameTime) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[4, 2, 2] parameter(0)
two = s32[] constant(2)
one = s32[] constant(1)
param_padded_partial = s32[<=4, 2, 2] set-dimension-size(param, two),
dimensions={0}
param_padded_dynamic = s32[<=4, 2, <=2] set-dimension-size(param_padded_partial,
one),
dimensions={2}
reshaped = s32[2, <=2, <=4] reshape(param_padded_dynamic),
inferred_dimension=1
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0, 1, 2},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>({{{0, -1}, {1, -1}},
{{2, -1}, {3, -1}},
{{-1, -1}, {-1, -1}},
{{-1, -1}, {-1, -1}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(6);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, ReshapeComplicated) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[2, 4, 4] parameter(0)
two = s32[] constant(2)
param_padded_dynamic = s32[2, <=4, 4] set-dimension-size(param, two),
dimensions={1}
reshaped = s32[<=16, 2] reshape(param_padded_dynamic), inferred_dimension=0
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0, 1},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{1, 2, 3, 4}, {5, 6, 7, 8}, {-1, -1, -1, -1}, {-1, -1, -1, -1}},
{{9, 10, 11, 12},
{13, 14, 15, 16},
{-1, -1, -1, -1},
{-1, -1, -1, -1}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(136);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, WhileLoopStack) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
body {
stack = (s32[<=4,2]) parameter(0)
stack_buffer = s32[<=4, 2] get-tuple-element(stack), index=0
stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}
zero = s32[] constant(0)
one = s32[] constant(1)
new_data = s32[1, 2] broadcast(s32[] stack_size), dimensions={}
new_stack_size = s32[] add(stack_size, one)
new_stack_buffer = s32[<=4, 2] set-dimension-size(stack_buffer, new_stack_size), dimensions={0}
new_stack = s32[<=4, 2] dynamic-update-slice(new_stack_buffer, new_data, stack_size, zero)
ROOT new_stack_tuple = (s32[<=4,2]) tuple(new_stack)
}
condition {
stack = (s32[<=4,2]) parameter(0)
stack_buffer = s32[<=4, 2] get-tuple-element(stack), index=0
stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}
three = s32[] constant(3)
ROOT less-than = pred[] compare(s32[] stack_size, s32[] three), direction=LT
}
ENTRY entry {
zero = s32[] constant(0)
pad = s32[] constant(-1)
stack_buffer_input = s32[4, 2] broadcast(s32[] pad), dimensions={}
stack_buffer_input_dynamic = s32[<=4, 2] set-dimension-size(stack_buffer_input, zero), dimensions={0}
input_tuple = (s32[<=4 ,2]) tuple(stack_buffer_input_dynamic)
while = (s32[<=4, 2]) while(input_tuple), body=body, condition=condition
stack_buffer = s32[<=4, 2] get-tuple-element(while), index=0
ROOT reduce = s32[2] reduce(stack_buffer, zero),
dimensions={0},
to_apply=update_s32
}
)";
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));
Literal expected = LiteralUtil::CreateR1<int32_t>({{3, 3}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicAddWithImplicitBroadcast) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY entry {
zero = s32[] constant(0)
one = s32[] constant(1)
two = s32[] constant(2)
three = s32[] constant(3)
input1 = s32[4, 2] iota(), iota_dimension=0
ones = s32[4, 2] broadcast(one), dimensions={}
input1_added = s32[4, 2] add(input1, ones)
input1_dynamic = s32[<=4, 2] set-dimension-size(input1_added, one), dimensions={0}
input2 = s32[4, 2] broadcast(two), dimensions={}
input2_dynamic = s32[<=4, 2] set-dimension-size(input2, three), dimensions={0}
add = s32[<=4, 2] add(input1_dynamic, input2_dynamic)
ROOT reduce = s32[2] reduce(add, zero),
dimensions={0},
to_apply=update_s32
}
)";
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));
Literal expected = LiteralUtil::CreateR1<int32_t>({{9, 9}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicAddWithImplicitSlice) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY entry {
zero = s32[] constant(0)
one = s32[] constant(1)
two = s32[] constant(2)
three = s32[] constant(3)
input1 = s32[4, 2] broadcast(one), dimensions={}
input1_dynamic = s32[<=4, 2] set-dimension-size(input1, three), dimensions={0}
input2 = s32[4, 2] broadcast(two), dimensions={}
input2_dynamic = s32[<=4, 2] set-dimension-size(input2, two), dimensions={0}
add = s32[<=4, 2] add(input1_dynamic, input2_dynamic)
ROOT reduce = s32[2] reduce(add, zero),
dimensions={0},
to_apply=update_s32
}
)";
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));
Literal expected = LiteralUtil::CreateR1<int32_t>({{6, 6}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicStackPop) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
body {
param_tuple = (s32[<=4,2]) parameter(0)
param = s32[<=4, 2] get-tuple-element(param_tuple), index=0
one = s32[] constant(1)
size = s32[] get-dimension-size(param), dimensions={0}
new_size = s32[] subtract(size, one)
output = s32[<=4, 2] set-dimension-size(param, new_size), dimensions={0}
ROOT root = (s32[<=4, 2]) tuple(output)
}
condition {
stack = (s32[<=4,2]) parameter(0)
stack_buffer = s32[<=4,2] get-tuple-element(stack), index=0
stack_size = s32[] get-dimension-size(stack_buffer), dimensions={0}
two = s32[] constant(2)
ROOT greater-than = pred[] compare(s32[] stack_size, s32[] two), direction=GT
}
ENTRY entry {
one = s32[] constant(1)
zero = s32[] constant(0)
four = s32[] constant(4)
stack_buffer_input = s32[4, 2] broadcast(s32[] one), dimensions={}
stack_buffer_dynamic = s32[<=4, 2] set-dimension-size(stack_buffer_input, four), dimensions={0}
input_tuple = (s32[<=4, 2]) tuple(stack_buffer_dynamic)
while = (s32[<=4, 2]) while(input_tuple), body=body, condition=condition
stack_buffer = s32[<=4, 2] get-tuple-element(while), index=0
ROOT reduce = s32[2] reduce(stack_buffer, zero),
dimensions={0},
to_apply=update_s32
}
)";
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result, PadAndExecute(std::move(module), {}));
Literal expected = LiteralUtil::CreateR1<int32_t>({{2, 2}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DoubleDynamicDimension) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[2, 3, 3] parameter(0)
size = s32[] constant(2)
param_padded_partial = s32[2, <=3, 3] set-dimension-size(param, size),
dimensions={1}
param_padded = s32[2, 3, <=3] set-dimension-size(param_padded_partial, size),
dimensions={2}
reshaped = s32[<=18] reshape(param_padded)
init = s32[] constant(0)
ROOT reduce = s32[] reduce(reshaped, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}, {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(16);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReshapeDoubleDynamicDimensions) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
ENTRY main {
param = s32[2, 3, 3] parameter(0)
size = s32[] constant(2)
param_padded_partial = s32[2, <=3, 3] set-dimension-size(param, size),
dimensions={1}
param_padded = s32[2, <=3, <=3] set-dimension-size(param_padded_partial, size),
dimensions={2}
result_size = s32[] constant(8)
ROOT reshaped = s32[<=18] dynamic-reshape(param_padded, result_size)
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}, {{0, 1, 2}, {3, 4, 5}, {6, 7, 8}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}, false));
result.SetDynamicSize(0, 8);
Literal expected = LiteralUtil::CreateR1<int32_t>({0, 1, 3, 4, 0, 1, 3, 4});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReshapeOutputDoubleDynamicDimensions) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
ENTRY main {
param = s32[18] parameter(0)
eight = s32[] constant(8)
param_dynamic = s32[<=18] set-dimension-size(param, eight), dimensions={0}
two = s32[] constant(2)
ROOT reshaped = s32[2, <=3, <=3] dynamic-reshape(param_dynamic, two, two, two)
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>(
{0, 1, 3, 4, 0, 1, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}, false));
VLOG(1) << " result: " << result.ToString();
result.SetDynamicSize(1, 2);
result.SetDynamicSize(2, 2);
Literal expected =
LiteralUtil::CreateR3<int32_t>({{{0, 1}, {3, 4}}, {{0, 1}, {3, 4}}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicReshapeComplicated) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
ENTRY main {
param = s32[3, 4, 4] parameter(0)
two = s32[] constant(2)
param_dynamic = s32[<=3, 4, 4] set-dimension-size(param, two), dimensions={0}
three = s32[] constant(3)
param_dynamic1 = s32[<=3, <=4, 4] set-dimension-size(param_dynamic, three), dimensions={1}
param_dynamic2 = s32[<=3, <=4, <=4] set-dimension-size(param_dynamic1, three), dimensions={2}
six = s32[] constant(6)
ROOT reshaped = s32[<=6, <=8] dynamic-reshape(param_dynamic2, three, six)
}
)";
Literal operand = LiteralUtil::CreateR3<int32_t>(
{{{0, 1, 2, -1}, {3, 4, 5, -1}, {6, 7, 8, -1}, {-1, -1, -1, -1}},
{{9, 8, 7, -1}, {6, 5, 4, -1}, {3, 2, 1, -1}, {-1, -1, -1, -1}},
{{-1, -1, -1, -1},
{-1, -1, -1, -1},
{-1, -1, -1, -1},
{-1, -1, -1, -1}}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}, false));
result.SetDynamicSize(0, 3);
result.SetDynamicSize(1, 6);
Literal expected = LiteralUtil::CreateR2<int32_t>(
{{0, 1, 2, 3, 4, 5}, {6, 7, 8, 9, 8, 7}, {6, 5, 4, 3, 2, 1}});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, SetGetDimensionSize) {
const std::string hlo_text = R"(
HloModule TensorFlowScatterV1
ENTRY main {
param = s32[3] parameter(0)
size = s32[] constant(2)
param_dynamic_size = s32[3] set-dimension-size(param, size),
dimensions={0}
ROOT gds = s32[] get-dimension-size(param_dynamic_size),
dimensions={0}
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 2, 3});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand}));
Literal expected = LiteralUtil::CreateR0<int32_t>(2);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicSort) {
const std::string hlo_text = R"(
HloModule TEST
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
%compare-greater-than (lhs: s32[], rhs: s32[]) -> pred[] {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
ROOT %compare = pred[] compare(s32[] %lhs, s32[] %rhs), direction=GT
}
ENTRY main {
param = s32[4] parameter(0)
size = s32[] constant(3)
param_dynamic_size = s32[<=4] set-dimension-size(param, size),
dimensions={0}
ROOT sort = s32[<=4]{0} sort(s32[4]{0} %param_dynamic_size),
dimensions={0}, is_stable=false, to_apply=%compare-greater-than
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 2});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR1<int32_t>({4, 3, 1, 2});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicPad) {
const std::string hlo_text = R"(
HloModule TEST
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[4] parameter(0)
size = s32[] constant(3)
padding = s32[] constant(2)
param_dynamic = s32[<=4] set-dimension-size(param, size),
dimensions={0}
pad = s32[<=6] pad(param_dynamic, padding), padding=1_1
init = s32[] constant(0)
ROOT reduce = s32[] reduce(pad, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 5});
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR0<int32_t>(12);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicPadInteriorPadding) {
const std::string hlo_text = R"(
HloModule TEST
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[4] parameter(0)
size = s32[] constant(3)
padding = s32[] constant(2)
param_dynamic = s32[<=4] set-dimension-size(param, size),
dimensions={0}
pad = s32[<=7] pad(param_dynamic, padding), padding=0_0_1
init = s32[] constant(0)
ROOT reduce = s32[] reduce(pad, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({1, 4, 3, 5});
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text));
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR0<int32_t>(12);
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicConditionalDimension) {
const std::string hlo_text = R"(
HloModule module
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
true_branch {
true_param = (s32[<=3,2]) parameter(0)
param = s32[<=3, 2] get-tuple-element(true_param), index=0
add = s32[<=3,2] add(param, param)
ROOT true_tuple = (s32[<=3,2], s32[<=3,2]) tuple(add, add)
}
false_branch {
false_param = (s32[<=3,2]) parameter(0)
param = s32[<=3, 2] get-tuple-element(false_param), index=0
add = s32[<=3,2] add(param, param)
ROOT false_tuple = (s32[<=3,2], s32[<=3,2]) tuple(add, add)
}
ENTRY entry {
param0 = s32[3,2] parameter(0)
size = s32[] constant(2)
branch = pred[] constant(false)
param_dynamic = s32[<=3, 2] set-dimension-size(param0, size), dimensions={0}
param_tuple = (s32[<=3 ,2]) tuple(param_dynamic)
conditional = (s32[<=3, 2], s32[<=3, 2]) conditional(branch, param_tuple, param_tuple),
true_computation=true_branch, false_computation=false_branch
gte0 = s32[<=3,2] get-tuple-element(conditional), index=1
init = s32[] constant(0)
ROOT reduce = s32[2] reduce(gte0, init),
dimensions={0},
to_apply=update_s32
}
)";
Literal operand = LiteralUtil::CreateR2<int32_t>({{0, 1}, {2, 3}, {4, 5}});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR1<int32_t>({4, 8});
EXPECT_EQ(result, expected);
}
XLA_TEST_F(ExecutionTest, DynamicTupleSort) {
const std::string hlo_text = R"(
HloModule TEST
%compare-greater-than (lhs: s32[], rhs: s32[], lhs_2: s32[], lhs_2: s32[]) -> pred[] {
%lhs = s32[] parameter(0)
%rhs = s32[] parameter(1)
%lhs_2 = s32[] parameter(2)
%rhs_2 = s32[] parameter(3)
ROOT %compare = pred[] compare(s32[] %lhs, s32[] %rhs), direction=GT
}
update_s32 (lhs: s32[], rhs: s32[]) -> s32[] {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY main {
param = s32[3] parameter(0)
size = s32[] constant(2)
param_dynamic_size = s32[<=3] set-dimension-size(param, size),
dimensions={0}
sort = (s32[<=3]{0}, s32[<=3]{0}) sort(s32[<=3]{0} %param_dynamic_size,
s32[<=3]{0} %param_dynamic_size),
dimensions={0}, is_stable=true, to_apply=%compare-greater-than
ROOT get-tuple-element = s32[<=3]{0} get-tuple-element((s32[<=3]{0}, s32[<=3]{0}) %sort),
index=0
}
)";
Literal operand = LiteralUtil::CreateR1<int32_t>({0, 4, 2});
auto module = GetHloModule(hlo_text);
TF_ASSERT_OK_AND_ASSIGN(Literal result,
PadAndExecute(std::move(module), {&operand},
false));
Literal expected = LiteralUtil::CreateR1<int32_t>({4, 0, 2});
EXPECT_EQ(result, expected);
}
namespace op = xla::testing::opcode_matchers;
class HloDimensionSizeLegalizerTest : public HloTestBase {
protected:
HloDimensionSizeLegalizerTest() {}
};
TEST_F(HloDimensionSizeLegalizerTest, Ok) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule _
ENTRY gds {
p = s32[3,4] parameter(0)
size0 = s32[] get-dimension-size(p), dimensions={0}
size1 = s32[] get-dimension-size(p), dimensions={1}
ROOT mul = s32[] multiply(size0, size1)
})")
.value();
DynamicPadder pass;
EXPECT_TRUE(pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Constant(), op::Constant()));
}
TEST_F(HloDimensionSizeLegalizerTest, GetSetSetDimensionSizeRewriter) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule _
ENTRY gds {
p = s32[3,4] parameter(0)
size0 = s32[] get-dimension-size(p), dimensions={0}
p_copy = s32[3,4] copy(p)
p_copy_dynamic = s32[<=3, 4] set-dimension-size(p_copy, size0), dimensions={0}
size1 = s32[] get-dimension-size(p_copy_dynamic), dimensions={0}
ROOT mul = s32[] multiply(size0, size1)
})")
.value();
DynamicPadder pass;
EXPECT_TRUE(pass.Run(module.get()).value());
EXPECT_THAT(module->entry_computation()->root_instruction(),
op::Multiply(op::Constant(), op::Constant()));
}
TEST_F(HloDimensionSizeLegalizerTest, IllegalType) {
auto module = ParseAndReturnUnverifiedModule(R"(
HloModule _
ENTRY gds {
p = s32[3]{0} parameter(0)
ROOT gds = s64[] get-dimension-size(p), dimensions={0}
})")
.value();
DynamicPadder pass;
EXPECT_FALSE(pass.Run(module.get()).ok());
}
TEST_F(HloDimensionSizeLegalizerTest, IllegalDimension) {
auto module = ParseAndReturnUnverifiedModule(R"(
HloModule _
ENTRY gds {
p = f32[2,5] parameter(0)
ROOT gds = s32[] get-dimension-size(p), dimensions={2}
})")
.value();
DynamicPadder pass;
EXPECT_FALSE(pass.Run(module.get()).ok());
}
class SizeCheckTest : public HloTestBase {
protected:
SizeCheckTest() {}
};
TEST_F(SizeCheckTest, CompileTimeCheckBinaryOpFail) {
auto module = ParseAndReturnUnverifiedModule(R"(
HloModule _
ENTRY gds {
size_0 = s32[] parameter(0)
size_1 = s32[] parameter(1)
arg = s32[4]{0} parameter(2)
dynamic_arg_0 = s32[<=4] set-dimension-size(arg, size_0), dimensions={0}
dynamic_arg_1 = s32[<=4] set-dimension-size(arg, size_1), dimensions={0}
ROOT add = s32[<=4] add(dynamic_arg_0, dynamic_arg_1)
})")
.value();
auto options = DynamicPadderOptions();
options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kCompileTime;
DynamicPadder pass(options);
auto status = pass.Run(module.get()).status();
EXPECT_THAT(status.code(), tsl::error::INVALID_ARGUMENT);
}
TEST_F(SizeCheckTest, CompileTimeCheckBinaryOpPass) {
auto module = ParseAndReturnUnverifiedModule(R"(
HloModule _
ENTRY gds {
size_0 = s32[] parameter(0)
size_0_reshape = s32[1] reshape(size_0)
size_1 = s32[] reshape(size_0_reshape)
arg = s32[4]{0} parameter(1)
dynamic_arg_0 = s32[<=4] set-dimension-size(arg, size_0), dimensions={0}
dynamic_arg_1 = s32[<=4] set-dimension-size(arg, size_1), dimensions={0}
ROOT add = s32[<=4] add(dynamic_arg_0, dynamic_arg_1)
})")
.value();
auto options = DynamicPadderOptions();
options.shape_check_mode =
DynamicDimensionInference::ShapeCheckMode::kCompileTime;
DynamicDimensionSimplifier simplifier;
EXPECT_TRUE(simplifier.Run(module.get()).ok());
DynamicPadder pass(options);
auto status = pass.Run(module.get()).status();
EXPECT_TRUE(status.ok());
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_padder.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/dynamic_padder_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
2b3d12ac-68d3-49d6-91a9-b5472a2b113e | cpp | google/cel-cpp | duration_value | common/values/duration_value.cc | common/values/duration_value_test.cc | #include <cstddef>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/value.h"
#include "internal/serialize.h"
#include "internal/status_macros.h"
#include "internal/time.h"
namespace cel {
namespace {
std::string DurationDebugString(absl::Duration value) {
return internal::DebugStringDuration(value);
}
}
std::string DurationValue::DebugString() const {
return DurationDebugString(NativeValue());
}
absl::Status DurationValue::SerializeTo(AnyToJsonConverter&,
absl::Cord& value) const {
return internal::SerializeDuration(NativeValue(), value);
}
absl::StatusOr<Json> DurationValue::ConvertToJson(AnyToJsonConverter&) const {
CEL_ASSIGN_OR_RETURN(auto json,
internal::EncodeDurationToJson(NativeValue()));
return JsonString(std::move(json));
}
absl::Status DurationValue::Equal(ValueManager&, const Value& other,
Value& result) const {
if (auto other_value = As<DurationValue>(other); other_value.has_value()) {
result = BoolValue{NativeValue() == other_value->NativeValue()};
return absl::OkStatus();
}
result = BoolValue{false};
return absl::OkStatus();
}
absl::StatusOr<Value> DurationValue::Equal(ValueManager& value_manager,
const Value& other) const {
Value result;
CEL_RETURN_IF_ERROR(Equal(value_manager, other, result));
return result;
}
} | #include <sstream>
#include "absl/strings/cord.h"
#include "absl/time/time.h"
#include "absl/types/optional.h"
#include "common/any.h"
#include "common/casting.h"
#include "common/json.h"
#include "common/native_type.h"
#include "common/value.h"
#include "common/value_testing.h"
#include "internal/testing.h"
namespace cel {
namespace {
using ::absl_testing::IsOkAndHolds;
using ::testing::An;
using ::testing::Ne;
using DurationValueTest = common_internal::ThreadCompatibleValueTest<>;
TEST_P(DurationValueTest, Kind) {
EXPECT_EQ(DurationValue().kind(), DurationValue::kKind);
EXPECT_EQ(Value(DurationValue(absl::Seconds(1))).kind(),
DurationValue::kKind);
}
TEST_P(DurationValueTest, DebugString) {
{
std::ostringstream out;
out << DurationValue(absl::Seconds(1));
EXPECT_EQ(out.str(), "1s");
}
{
std::ostringstream out;
out << Value(DurationValue(absl::Seconds(1)));
EXPECT_EQ(out.str(), "1s");
}
}
TEST_P(DurationValueTest, ConvertToJson) {
EXPECT_THAT(DurationValue().ConvertToJson(value_manager()),
IsOkAndHolds(Json(JsonString("0s"))));
}
TEST_P(DurationValueTest, NativeTypeId) {
EXPECT_EQ(NativeTypeId::Of(DurationValue(absl::Seconds(1))),
NativeTypeId::For<DurationValue>());
EXPECT_EQ(NativeTypeId::Of(Value(DurationValue(absl::Seconds(1)))),
NativeTypeId::For<DurationValue>());
}
TEST_P(DurationValueTest, InstanceOf) {
EXPECT_TRUE(InstanceOf<DurationValue>(DurationValue(absl::Seconds(1))));
EXPECT_TRUE(
InstanceOf<DurationValue>(Value(DurationValue(absl::Seconds(1)))));
}
TEST_P(DurationValueTest, Cast) {
EXPECT_THAT(Cast<DurationValue>(DurationValue(absl::Seconds(1))),
An<DurationValue>());
EXPECT_THAT(Cast<DurationValue>(Value(DurationValue(absl::Seconds(1)))),
An<DurationValue>());
}
TEST_P(DurationValueTest, As) {
EXPECT_THAT(As<DurationValue>(Value(DurationValue(absl::Seconds(1)))),
Ne(absl::nullopt));
}
TEST_P(DurationValueTest, Equality) {
EXPECT_NE(DurationValue(absl::ZeroDuration()), absl::Seconds(1));
EXPECT_NE(absl::Seconds(1), DurationValue(absl::ZeroDuration()));
EXPECT_NE(DurationValue(absl::ZeroDuration()),
DurationValue(absl::Seconds(1)));
}
INSTANTIATE_TEST_SUITE_P(
DurationValueTest, DurationValueTest,
::testing::Combine(::testing::Values(MemoryManagement::kPooling,
MemoryManagement::kReferenceCounting)),
DurationValueTest::ToString);
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/duration_value.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/common/values/duration_value_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
58149a92-130e-465a-897d-2bb9af5d8f4b | cpp | google/quiche | quic_endpoint | quiche/quic/test_tools/simulator/quic_endpoint.cc | quiche/quic/test_tools/simulator/quic_endpoint_test.cc | #include "quiche/quic/test_tools/simulator/quic_endpoint.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include "quiche/quic/core/crypto/crypto_handshake_message.h"
#include "quiche/quic/core/crypto/crypto_protocol.h"
#include "quiche/quic/core/quic_data_writer.h"
#include "quiche/quic/platform/api/quic_test_output.h"
#include "quiche/quic/test_tools/quic_config_peer.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simulator/simulator.h"
namespace quic {
namespace simulator {
const QuicStreamId kDataStream = 3;
const QuicByteCount kWriteChunkSize = 128 * 1024;
const char kStreamDataContents = 'Q';
QuicEndpoint::QuicEndpoint(Simulator* simulator, std::string name,
std::string peer_name, Perspective perspective,
QuicConnectionId connection_id)
: QuicEndpointBase(simulator, name, peer_name),
bytes_to_transfer_(0),
bytes_transferred_(0),
wrong_data_received_(false),
notifier_(nullptr) {
connection_ = std::make_unique<QuicConnection>(
connection_id, GetAddressFromName(name), GetAddressFromName(peer_name),
simulator, simulator->GetAlarmFactory(), &writer_, false, perspective,
ParsedVersionOfIndex(CurrentSupportedVersions(), 0),
connection_id_generator_);
connection_->set_visitor(this);
connection_->SetEncrypter(ENCRYPTION_FORWARD_SECURE,
std::make_unique<quic::test::TaggingEncrypter>(
ENCRYPTION_FORWARD_SECURE));
connection_->SetEncrypter(ENCRYPTION_INITIAL, nullptr);
if (connection_->version().KnowsWhichDecrypterToUse()) {
connection_->InstallDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<quic::test::StrictTaggingDecrypter>(
ENCRYPTION_FORWARD_SECURE));
connection_->RemoveDecrypter(ENCRYPTION_INITIAL);
} else {
connection_->SetDecrypter(
ENCRYPTION_FORWARD_SECURE,
std::make_unique<quic::test::StrictTaggingDecrypter>(
ENCRYPTION_FORWARD_SECURE));
}
connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
connection_->OnHandshakeComplete();
if (perspective == Perspective::IS_SERVER) {
test::QuicConnectionPeer::SetNegotiatedVersion(connection_.get());
}
test::QuicConnectionPeer::SetAddressValidated(connection_.get());
connection_->SetDataProducer(&producer_);
connection_->SetSessionNotifier(this);
notifier_ = std::make_unique<test::SimpleSessionNotifier>(connection_.get());
std::string error;
CryptoHandshakeMessage peer_hello;
peer_hello.SetValue(kICSL,
static_cast<uint32_t>(kMaximumIdleTimeoutSecs - 1));
peer_hello.SetValue(kMIBS,
static_cast<uint32_t>(kDefaultMaxStreamsPerConnection));
QuicConfig config;
QuicErrorCode error_code = config.ProcessPeerHello(
peer_hello, perspective == Perspective::IS_CLIENT ? SERVER : CLIENT,
&error);
QUICHE_DCHECK_EQ(error_code, QUIC_NO_ERROR)
<< "Configuration failed: " << error;
if (connection_->version().UsesTls()) {
if (connection_->perspective() == Perspective::IS_CLIENT) {
test::QuicConfigPeer::SetReceivedOriginalConnectionId(
&config, connection_->connection_id());
test::QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_->connection_id());
} else {
test::QuicConfigPeer::SetReceivedInitialSourceConnectionId(
&config, connection_->client_connection_id());
}
}
connection_->SetFromConfig(config);
connection_->DisableMtuDiscovery();
}
QuicByteCount QuicEndpoint::bytes_received() const {
QuicByteCount total = 0;
for (auto& interval : offsets_received_) {
total += interval.max() - interval.min();
}
return total;
}
QuicByteCount QuicEndpoint::bytes_to_transfer() const {
if (notifier_ != nullptr) {
return notifier_->StreamBytesToSend();
}
return bytes_to_transfer_;
}
QuicByteCount QuicEndpoint::bytes_transferred() const {
if (notifier_ != nullptr) {
return notifier_->StreamBytesSent();
}
return bytes_transferred_;
}
void QuicEndpoint::AddBytesToTransfer(QuicByteCount bytes) {
if (notifier_ != nullptr) {
if (notifier_->HasBufferedStreamData()) {
Schedule(clock_->Now());
}
notifier_->WriteOrBufferData(kDataStream, bytes, NO_FIN);
return;
}
if (bytes_to_transfer_ > 0) {
Schedule(clock_->Now());
}
bytes_to_transfer_ += bytes;
WriteStreamData();
}
void QuicEndpoint::OnStreamFrame(const QuicStreamFrame& frame) {
QUICHE_DCHECK(frame.stream_id == kDataStream);
for (size_t i = 0; i < frame.data_length; i++) {
if (frame.data_buffer[i] != kStreamDataContents) {
wrong_data_received_ = true;
}
}
offsets_received_.Add(frame.offset, frame.offset + frame.data_length);
QUICHE_DCHECK_LE(offsets_received_.Size(), 1000u);
}
void QuicEndpoint::OnCryptoFrame(const QuicCryptoFrame& ) {}
void QuicEndpoint::OnCanWrite() {
if (notifier_ != nullptr) {
notifier_->OnCanWrite();
return;
}
WriteStreamData();
}
bool QuicEndpoint::WillingAndAbleToWrite() const {
if (notifier_ != nullptr) {
return notifier_->WillingToWrite();
}
return bytes_to_transfer_ != 0;
}
bool QuicEndpoint::ShouldKeepConnectionAlive() const { return true; }
bool QuicEndpoint::AllowSelfAddressChange() const { return false; }
bool QuicEndpoint::OnFrameAcked(const QuicFrame& frame,
QuicTime::Delta ack_delay_time,
QuicTime receive_timestamp) {
if (notifier_ != nullptr) {
return notifier_->OnFrameAcked(frame, ack_delay_time, receive_timestamp);
}
return false;
}
void QuicEndpoint::OnFrameLost(const QuicFrame& frame) {
QUICHE_DCHECK(notifier_);
notifier_->OnFrameLost(frame);
}
bool QuicEndpoint::RetransmitFrames(const QuicFrames& frames,
TransmissionType type) {
QUICHE_DCHECK(notifier_);
return notifier_->RetransmitFrames(frames, type);
}
bool QuicEndpoint::IsFrameOutstanding(const QuicFrame& frame) const {
QUICHE_DCHECK(notifier_);
return notifier_->IsFrameOutstanding(frame);
}
bool QuicEndpoint::HasUnackedCryptoData() const { return false; }
bool QuicEndpoint::HasUnackedStreamData() const {
if (notifier_ != nullptr) {
return notifier_->HasUnackedStreamData();
}
return false;
}
HandshakeState QuicEndpoint::GetHandshakeState() const {
return HANDSHAKE_COMPLETE;
}
WriteStreamDataResult QuicEndpoint::DataProducer::WriteStreamData(
QuicStreamId , QuicStreamOffset , QuicByteCount data_length,
QuicDataWriter* writer) {
writer->WriteRepeatedByte(kStreamDataContents, data_length);
return WRITE_SUCCESS;
}
bool QuicEndpoint::DataProducer::WriteCryptoData(EncryptionLevel ,
QuicStreamOffset ,
QuicByteCount ,
QuicDataWriter* ) {
QUIC_BUG(quic_bug_10157_1)
<< "QuicEndpoint::DataProducer::WriteCryptoData is unimplemented";
return false;
}
void QuicEndpoint::WriteStreamData() {
QuicConnection::ScopedPacketFlusher flusher(connection_.get());
while (bytes_to_transfer_ > 0) {
const size_t transmission_size =
std::min(kWriteChunkSize, bytes_to_transfer_);
QuicConsumedData consumed_data = connection_->SendStreamData(
kDataStream, transmission_size, bytes_transferred_, NO_FIN);
QUICHE_DCHECK(consumed_data.bytes_consumed <= transmission_size);
bytes_transferred_ += consumed_data.bytes_consumed;
bytes_to_transfer_ -= consumed_data.bytes_consumed;
if (consumed_data.bytes_consumed != transmission_size) {
return;
}
}
}
}
} | #include "quiche/quic/test_tools/simulator/quic_endpoint.h"
#include <memory>
#include <utility>
#include "quiche/quic/platform/api/quic_flags.h"
#include "quiche/quic/platform/api/quic_test.h"
#include "quiche/quic/test_tools/quic_connection_peer.h"
#include "quiche/quic/test_tools/quic_test_utils.h"
#include "quiche/quic/test_tools/simulator/simulator.h"
#include "quiche/quic/test_tools/simulator/switch.h"
using ::testing::_;
using ::testing::NiceMock;
using ::testing::Return;
namespace quic {
namespace simulator {
const QuicBandwidth kDefaultBandwidth =
QuicBandwidth::FromKBitsPerSecond(10 * 1000);
const QuicTime::Delta kDefaultPropagationDelay =
QuicTime::Delta::FromMilliseconds(20);
const QuicByteCount kDefaultBdp = kDefaultBandwidth * kDefaultPropagationDelay;
class QuicEndpointTest : public quic::test::QuicTest {
public:
QuicEndpointTest()
: simulator_(), switch_(&simulator_, "Switch", 8, kDefaultBdp * 2) {}
protected:
Simulator simulator_;
Switch switch_;
std::unique_ptr<SymmetricLink> Link(Endpoint* a, Endpoint* b) {
return std::make_unique<SymmetricLink>(a, b, kDefaultBandwidth,
kDefaultPropagationDelay);
}
std::unique_ptr<SymmetricLink> CustomLink(Endpoint* a, Endpoint* b,
uint64_t extra_rtt_ms) {
return std::make_unique<SymmetricLink>(
a, b, kDefaultBandwidth,
kDefaultPropagationDelay +
QuicTime::Delta::FromMilliseconds(extra_rtt_ms));
}
};
TEST_F(QuicEndpointTest, OneWayTransmission) {
QuicEndpoint endpoint_a(&simulator_, "Endpoint A", "Endpoint B",
Perspective::IS_CLIENT, test::TestConnectionId(42));
QuicEndpoint endpoint_b(&simulator_, "Endpoint B", "Endpoint A",
Perspective::IS_SERVER, test::TestConnectionId(42));
auto link_a = Link(&endpoint_a, switch_.port(1));
auto link_b = Link(&endpoint_b, switch_.port(2));
endpoint_a.AddBytesToTransfer(600);
QuicTime end_time =
simulator_.GetClock()->Now() + QuicTime::Delta::FromMilliseconds(1000);
simulator_.RunUntil(
[this, end_time]() { return simulator_.GetClock()->Now() >= end_time; });
EXPECT_EQ(600u, endpoint_a.bytes_transferred());
ASSERT_EQ(600u, endpoint_b.bytes_received());
EXPECT_FALSE(endpoint_a.wrong_data_received());
EXPECT_FALSE(endpoint_b.wrong_data_received());
endpoint_a.AddBytesToTransfer(2 * 1024 * 1024);
end_time = simulator_.GetClock()->Now() + QuicTime::Delta::FromSeconds(5);
simulator_.RunUntil(
[this, end_time]() { return simulator_.GetClock()->Now() >= end_time; });
const QuicByteCount total_bytes_transferred = 600 + 2 * 1024 * 1024;
EXPECT_EQ(total_bytes_transferred, endpoint_a.bytes_transferred());
EXPECT_EQ(total_bytes_transferred, endpoint_b.bytes_received());
EXPECT_EQ(0u, endpoint_a.write_blocked_count());
EXPECT_FALSE(endpoint_a.wrong_data_received());
EXPECT_FALSE(endpoint_b.wrong_data_received());
}
TEST_F(QuicEndpointTest, WriteBlocked) {
QuicEndpoint endpoint_a(&simulator_, "Endpoint A", "Endpoint B",
Perspective::IS_CLIENT, test::TestConnectionId(42));
QuicEndpoint endpoint_b(&simulator_, "Endpoint B", "Endpoint A",
Perspective::IS_SERVER, test::TestConnectionId(42));
auto link_a = Link(&endpoint_a, switch_.port(1));
auto link_b = Link(&endpoint_b, switch_.port(2));
auto* sender = new NiceMock<test::MockSendAlgorithm>();
EXPECT_CALL(*sender, CanSend(_)).WillRepeatedly(Return(true));
EXPECT_CALL(*sender, PacingRate(_))
.WillRepeatedly(Return(10 * kDefaultBandwidth));
EXPECT_CALL(*sender, BandwidthEstimate())
.WillRepeatedly(Return(10 * kDefaultBandwidth));
EXPECT_CALL(*sender, GetCongestionWindow())
.WillRepeatedly(Return(kMaxOutgoingPacketSize *
GetQuicFlag(quic_max_congestion_window)));
test::QuicConnectionPeer::SetSendAlgorithm(endpoint_a.connection(), sender);
QuicByteCount bytes_to_transfer = 3 * 1024 * 1024;
endpoint_a.AddBytesToTransfer(bytes_to_transfer);
QuicTime end_time =
simulator_.GetClock()->Now() + QuicTime::Delta::FromSeconds(30);
simulator_.RunUntil([this, &endpoint_b, bytes_to_transfer, end_time]() {
return endpoint_b.bytes_received() == bytes_to_transfer ||
simulator_.GetClock()->Now() >= end_time;
});
EXPECT_EQ(bytes_to_transfer, endpoint_a.bytes_transferred());
EXPECT_EQ(bytes_to_transfer, endpoint_b.bytes_received());
EXPECT_GT(endpoint_a.write_blocked_count(), 0u);
EXPECT_FALSE(endpoint_a.wrong_data_received());
EXPECT_FALSE(endpoint_b.wrong_data_received());
}
TEST_F(QuicEndpointTest, TwoWayTransmission) {
QuicEndpoint endpoint_a(&simulator_, "Endpoint A", "Endpoint B",
Perspective::IS_CLIENT, test::TestConnectionId(42));
QuicEndpoint endpoint_b(&simulator_, "Endpoint B", "Endpoint A",
Perspective::IS_SERVER, test::TestConnectionId(42));
auto link_a = Link(&endpoint_a, switch_.port(1));
auto link_b = Link(&endpoint_b, switch_.port(2));
endpoint_a.RecordTrace();
endpoint_b.RecordTrace();
endpoint_a.AddBytesToTransfer(1024 * 1024);
endpoint_b.AddBytesToTransfer(1024 * 1024);
QuicTime end_time =
simulator_.GetClock()->Now() + QuicTime::Delta::FromSeconds(5);
simulator_.RunUntil(
[this, end_time]() { return simulator_.GetClock()->Now() >= end_time; });
EXPECT_EQ(1024u * 1024u, endpoint_a.bytes_transferred());
EXPECT_EQ(1024u * 1024u, endpoint_b.bytes_transferred());
EXPECT_EQ(1024u * 1024u, endpoint_a.bytes_received());
EXPECT_EQ(1024u * 1024u, endpoint_b.bytes_received());
EXPECT_FALSE(endpoint_a.wrong_data_received());
EXPECT_FALSE(endpoint_b.wrong_data_received());
}
TEST_F(QuicEndpointTest, Competition) {
auto endpoint_a = std::make_unique<QuicEndpoint>(
&simulator_, "Endpoint A", "Endpoint D (A)", Perspective::IS_CLIENT,
test::TestConnectionId(42));
auto endpoint_b = std::make_unique<QuicEndpoint>(
&simulator_, "Endpoint B", "Endpoint D (B)", Perspective::IS_CLIENT,
test::TestConnectionId(43));
auto endpoint_c = std::make_unique<QuicEndpoint>(
&simulator_, "Endpoint C", "Endpoint D (C)", Perspective::IS_CLIENT,
test::TestConnectionId(44));
auto endpoint_d_a = std::make_unique<QuicEndpoint>(
&simulator_, "Endpoint D (A)", "Endpoint A", Perspective::IS_SERVER,
test::TestConnectionId(42));
auto endpoint_d_b = std::make_unique<QuicEndpoint>(
&simulator_, "Endpoint D (B)", "Endpoint B", Perspective::IS_SERVER,
test::TestConnectionId(43));
auto endpoint_d_c = std::make_unique<QuicEndpoint>(
&simulator_, "Endpoint D (C)", "Endpoint C", Perspective::IS_SERVER,
test::TestConnectionId(44));
QuicEndpointMultiplexer endpoint_d(
"Endpoint D",
{endpoint_d_a.get(), endpoint_d_b.get(), endpoint_d_c.get()});
auto link_a = CustomLink(endpoint_a.get(), switch_.port(1), 0);
auto link_b = CustomLink(endpoint_b.get(), switch_.port(2), 1);
auto link_c = CustomLink(endpoint_c.get(), switch_.port(3), 2);
auto link_d = Link(&endpoint_d, switch_.port(4));
endpoint_a->AddBytesToTransfer(2 * 1024 * 1024);
endpoint_b->AddBytesToTransfer(2 * 1024 * 1024);
endpoint_c->AddBytesToTransfer(2 * 1024 * 1024);
QuicTime end_time =
simulator_.GetClock()->Now() + QuicTime::Delta::FromSeconds(12);
simulator_.RunUntil(
[this, end_time]() { return simulator_.GetClock()->Now() >= end_time; });
for (QuicEndpoint* endpoint :
{endpoint_a.get(), endpoint_b.get(), endpoint_c.get()}) {
EXPECT_EQ(2u * 1024u * 1024u, endpoint->bytes_transferred());
EXPECT_GE(endpoint->connection()->GetStats().packets_lost, 0u);
}
for (QuicEndpoint* endpoint :
{endpoint_d_a.get(), endpoint_d_b.get(), endpoint_d_c.get()}) {
EXPECT_EQ(2u * 1024u * 1024u, endpoint->bytes_received());
EXPECT_FALSE(endpoint->wrong_data_received());
}
}
}
} | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/simulator/quic_endpoint.cc | https://github.com/google/quiche/blob/6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6/quiche/quic/test_tools/simulator/quic_endpoint_test.cc | 6fe69b2cf77d5fc175a729bc7a6c322a6388b8b6 |
23a79605-f8d1-470f-b2fc-6fc4ba7ce262 | cpp | tensorflow/tensorflow | hlo_dfs_reachability | third_party/xla/xla/hlo/ir/hlo_dfs_reachability.cc | third_party/xla/xla/service/hlo_dfs_reachability_test.cc | #include "xla/hlo/ir/hlo_dfs_reachability.h"
#include <cstddef>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
bool HloDfsReachability::IsPresent(const HloInstruction* instruction) const {
return instruction_to_idx_.contains(instruction);
}
bool HloDfsReachability::IsReachable(const HloInstruction* from,
const HloInstruction* to) const {
if (from == to) {
return true;
}
if (to->operand_count() == 0 && from->control_predecessors().empty()) {
return false;
}
size_t target_node_idx = instruction_to_idx_.at(from);
size_t dfs_root_idx = instruction_to_idx_.at(to);
if (dfs_root_idx < target_node_idx) {
return false;
}
llvm::SmallVector<const HloInstruction*> stack{to};
llvm::BitVector visited_idxs(1 + (dfs_root_idx - target_node_idx));
visited_idxs.set(dfs_root_idx - target_node_idx);
auto check_and_enqueue = [&](const HloInstruction* instr) {
if (instr == from) {
return true;
}
size_t instr_idx = instruction_to_idx_.at(instr);
if (instr_idx < target_node_idx) {
return false;
}
size_t visited_idx = instr_idx - target_node_idx;
if (visited_idxs.test(visited_idx)) {
return false;
}
visited_idxs.set(visited_idx);
stack.push_back(instr);
return false;
};
while (!stack.empty()) {
const HloInstruction* instr = stack.pop_back_val();
if (absl::c_any_of(instr->operands(), check_and_enqueue) ||
absl::c_any_of(instr->control_predecessors(), check_and_enqueue)) {
return true;
}
}
return false;
}
bool HloDfsReachability::IsConnected(const HloInstruction* a,
const HloInstruction* b) const {
return IsReachable(a, b) || IsReachable(b, a);
}
std::unique_ptr<HloDfsReachability> HloDfsReachability::Build(
const HloComputation* computation) {
auto res = std::make_unique<HloDfsReachability>();
HloComputation::ChannelDependencies empty_channel_dependencies;
std::vector<HloInstruction*> instructions =
computation->MakeInstructionPostOrder(empty_channel_dependencies);
res->instruction_to_idx_.reserve(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
res->instruction_to_idx_[instructions[i]] = i;
}
return res;
}
} | #include "xla/hlo/ir/hlo_dfs_reachability.h"
#include <cstddef>
#include <memory>
#include <string>
#include <string_view>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal_util.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class HloDfsReachabilityTest : public HloTestBase {};
TEST_F(HloDfsReachabilityTest, NonTrivialReachability) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32, HloOpcode::kAdd, constant1, constant2));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kNegate, constant2));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kMultiply, add, exp));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kCopy, exp));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(mul));
TF_CHECK_OK(add->AddControlDependencyTo(exp));
auto reachability = HloDfsReachability::Build(computation);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_TRUE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_TRUE(reachability->IsReachable(constant1, copy));
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_TRUE(reachability->IsReachable(constant2, negate));
EXPECT_TRUE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_TRUE(reachability->IsReachable(constant2, copy));
EXPECT_FALSE(reachability->IsReachable(exp, constant1));
EXPECT_FALSE(reachability->IsReachable(exp, constant2));
EXPECT_FALSE(reachability->IsReachable(exp, add));
EXPECT_FALSE(reachability->IsReachable(exp, negate));
EXPECT_TRUE(reachability->IsReachable(exp, exp));
EXPECT_TRUE(reachability->IsReachable(exp, mul));
EXPECT_TRUE(reachability->IsReachable(exp, copy));
EXPECT_FALSE(reachability->IsReachable(mul, constant1));
EXPECT_FALSE(reachability->IsReachable(mul, constant2));
EXPECT_FALSE(reachability->IsReachable(mul, add));
EXPECT_FALSE(reachability->IsReachable(mul, negate));
EXPECT_FALSE(reachability->IsReachable(mul, exp));
EXPECT_TRUE(reachability->IsReachable(mul, mul));
EXPECT_FALSE(reachability->IsReachable(mul, copy));
EXPECT_TRUE(reachability->IsConnected(constant1, copy));
EXPECT_TRUE(reachability->IsConnected(copy, constant1));
EXPECT_FALSE(reachability->IsConnected(negate, add));
EXPECT_FALSE(reachability->IsConnected(add, negate));
}
TEST_F(HloDfsReachabilityTest, ChannelReachability) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
HloComputation::Builder builder("ChannelReachability");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token0 = builder.AddInstruction(HloInstruction::CreateToken());
auto send =
builder.AddInstruction(HloInstruction::CreateSend(param, token0, 1));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
auto token1 = builder.AddInstruction(HloInstruction::CreateToken());
auto recv =
builder.AddInstruction(HloInstruction::CreateRecv(shape, token1, 1));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
auto module = CreateNewVerifiedModule();
module->mutable_config().set_use_spmd_partitioning(false);
module->mutable_config().set_static_device_assignment(DeviceAssignment(1, 2));
auto computation = module->AddEntryComputation(builder.Build(recv_done));
auto reachability = HloDfsReachability::Build(computation);
EXPECT_FALSE(reachability->IsReachable(param, recv_done));
EXPECT_FALSE(reachability->IsReachable(send, recv));
EXPECT_FALSE(reachability->IsReachable(send_done, recv));
}
class HloDfsReachabilityBenchmark {
public:
HloDfsReachabilityBenchmark(int size, std::string_view name) : name_(name) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(name);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
HloInstruction* prev = constant;
for (int i = 1; i < size; ++i) {
prev = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, prev));
}
HloModuleConfig hlo_config;
module_ = std::make_unique<HloModule>(name_, hlo_config);
computation_ =
module_->AddEntryComputation(builder.Build(prev));
}
std::unique_ptr<HloDfsReachability> Build() {
return HloDfsReachability::Build(computation_);
}
const HloComputation* computation() { return computation_; }
private:
std::unique_ptr<HloModule> module_;
HloComputation* computation_;
const std::string name_;
};
void BM_HloDfsReachabilityBuild(benchmark::State& state) {
int num_nodes = state.range(0);
HloDfsReachabilityBenchmark bm(num_nodes, state.name());
while (state.KeepRunningBatch(num_nodes)) {
benchmark::DoNotOptimize(bm.Build());
}
}
void BM_HloDfsReachabilityCheck(benchmark::State& state) {
size_t size = state.range(0);
HloDfsReachabilityBenchmark bm(size, state.name());
auto reachability = bm.Build();
auto instrs = bm.computation()->MakeInstructionPostOrder();
size_t i = 0;
for (auto s : state) {
size_t from = i % size;
size_t to = (++i + size / 2) % size;
reachability->IsReachable(instrs[from], instrs[to]);
}
}
#define BM_ARGS Arg(1)->Arg(64)->Arg(128)->Arg(256)->Range(512, 256 * 1024)
BENCHMARK(BM_HloDfsReachabilityBuild)->BM_ARGS;
BENCHMARK(BM_HloDfsReachabilityCheck)->BM_ARGS;
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/hlo/ir/hlo_dfs_reachability.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/hlo_dfs_reachability_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |
d3540fdf-f66a-4c00-8171-5490a9676f89 | cpp | google/cel-cpp | activation | eval/public/activation.cc | eval/public/activation_test.cc | #include "eval/public/activation.h"
#include <algorithm>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "eval/public/cel_function.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
absl::optional<CelValue> Activation::FindValue(absl::string_view name,
google::protobuf::Arena* arena) const {
auto entry = value_map_.find(name);
if (entry == value_map_.end()) {
return {};
}
return entry->second.RetrieveValue(arena);
}
absl::Status Activation::InsertFunction(std::unique_ptr<CelFunction> function) {
auto& overloads = function_map_[function->descriptor().name()];
for (const auto& overload : overloads) {
if (overload->descriptor().ShapeMatches(function->descriptor())) {
return absl::InvalidArgumentError(
"Function with same shape already defined in activation");
}
}
overloads.emplace_back(std::move(function));
return absl::OkStatus();
}
std::vector<const CelFunction*> Activation::FindFunctionOverloads(
absl::string_view name) const {
const auto map_entry = function_map_.find(name);
std::vector<const CelFunction*> overloads;
if (map_entry == function_map_.end()) {
return overloads;
}
overloads.resize(map_entry->second.size());
std::transform(map_entry->second.begin(), map_entry->second.end(),
overloads.begin(),
[](const auto& func) { return func.get(); });
return overloads;
}
bool Activation::RemoveFunctionEntries(
const CelFunctionDescriptor& descriptor) {
auto map_entry = function_map_.find(descriptor.name());
if (map_entry == function_map_.end()) {
return false;
}
std::vector<std::unique_ptr<CelFunction>>& overloads = map_entry->second;
bool funcs_removed = false;
auto func_iter = overloads.begin();
while (func_iter != overloads.end()) {
if (descriptor.ShapeMatches(func_iter->get()->descriptor())) {
func_iter = overloads.erase(func_iter);
funcs_removed = true;
} else {
++func_iter;
}
}
if (overloads.empty()) {
function_map_.erase(map_entry);
}
return funcs_removed;
}
void Activation::InsertValue(absl::string_view name, const CelValue& value) {
value_map_.try_emplace(name, ValueEntry(value));
}
void Activation::InsertValueProducer(
absl::string_view name, std::unique_ptr<CelValueProducer> value_producer) {
value_map_.try_emplace(name, ValueEntry(std::move(value_producer)));
}
bool Activation::RemoveValueEntry(absl::string_view name) {
return value_map_.erase(name);
}
bool Activation::ClearValueEntry(absl::string_view name) {
auto entry = value_map_.find(name);
if (entry == value_map_.end()) {
return false;
}
return entry->second.ClearValue();
}
int Activation::ClearCachedValues() {
int n = 0;
for (auto& entry : value_map_) {
if (entry.second.HasProducer()) {
if (entry.second.ClearValue()) {
n++;
}
}
}
return n;
}
}
}
}
} | #include "eval/public/activation.h"
#include <memory>
#include <string>
#include <utility>
#include "eval/eval/attribute_trail.h"
#include "eval/eval/ident_step.h"
#include "eval/public/cel_attribute.h"
#include "eval/public/cel_function.h"
#include "extensions/protobuf/memory_manager.h"
#include "internal/status_macros.h"
#include "internal/testing.h"
#include "parser/parser.h"
namespace google {
namespace api {
namespace expr {
namespace runtime {
namespace {
using ::absl_testing::StatusIs;
using ::cel::extensions::ProtoMemoryManager;
using ::google::api::expr::v1alpha1::Expr;
using ::google::protobuf::Arena;
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::HasSubstr;
using ::testing::IsEmpty;
using ::testing::Property;
using ::testing::Return;
class MockValueProducer : public CelValueProducer {
public:
MOCK_METHOD(CelValue, Produce, (Arena*), (override));
};
class ConstCelFunction : public CelFunction {
public:
explicit ConstCelFunction(absl::string_view name)
: CelFunction({std::string(name), false, {}}) {}
explicit ConstCelFunction(const CelFunctionDescriptor& desc)
: CelFunction(desc) {}
absl::Status Evaluate(absl::Span<const CelValue> args, CelValue* output,
google::protobuf::Arena* arena) const override {
*output = CelValue::CreateInt64(42);
return absl::OkStatus();
}
};
TEST(ActivationTest, CheckValueInsertFindAndRemove) {
Activation activation;
Arena arena;
activation.InsertValue("value42", CelValue::CreateInt64(42));
EXPECT_FALSE(activation.FindValue("value43", &arena));
EXPECT_TRUE(activation.FindValue("value42", &arena));
CelValue value = activation.FindValue("value42", &arena).value();
EXPECT_THAT(value.Int64OrDie(), Eq(42));
EXPECT_FALSE(activation.RemoveValueEntry("value43"));
EXPECT_TRUE(activation.RemoveValueEntry("value42"));
EXPECT_FALSE(activation.FindValue("value42", &arena));
}
TEST(ActivationTest, CheckValueProducerInsertFindAndRemove) {
const std::string kValue = "42";
auto producer = std::make_unique<MockValueProducer>();
google::protobuf::Arena arena;
ON_CALL(*producer, Produce(&arena))
.WillByDefault(Return(CelValue::CreateString(&kValue)));
EXPECT_CALL(*producer, Produce(&arena)).Times(1);
Activation activation;
activation.InsertValueProducer("value42", std::move(producer));
EXPECT_FALSE(activation.FindValue("value43", &arena));
for (int i = 0; i < 2; i++) {
auto opt_value = activation.FindValue("value42", &arena);
EXPECT_TRUE(opt_value.has_value()) << " for pass " << i;
CelValue value = opt_value.value();
EXPECT_THAT(value.StringOrDie().value(), Eq(kValue)) << " for pass " << i;
}
EXPECT_TRUE(activation.RemoveValueEntry("value42"));
EXPECT_FALSE(activation.FindValue("value42", &arena));
}
TEST(ActivationTest, CheckInsertFunction) {
Activation activation;
ASSERT_OK(activation.InsertFunction(
std::make_unique<ConstCelFunction>("ConstFunc")));
auto overloads = activation.FindFunctionOverloads("ConstFunc");
EXPECT_THAT(overloads,
ElementsAre(Property(
&CelFunction::descriptor,
Property(&CelFunctionDescriptor::name, Eq("ConstFunc")))));
EXPECT_THAT(activation.InsertFunction(
std::make_unique<ConstCelFunction>("ConstFunc")),
StatusIs(absl::StatusCode::kInvalidArgument,
HasSubstr("Function with same shape")));
EXPECT_THAT(activation.FindFunctionOverloads("ConstFunc0"), IsEmpty());
}
TEST(ActivationTest, CheckRemoveFunction) {
Activation activation;
ASSERT_OK(activation.InsertFunction(std::make_unique<ConstCelFunction>(
CelFunctionDescriptor{"ConstFunc", false, {CelValue::Type::kInt64}})));
EXPECT_OK(activation.InsertFunction(std::make_unique<ConstCelFunction>(
CelFunctionDescriptor{"ConstFunc", false, {CelValue::Type::kUint64}})));
auto overloads = activation.FindFunctionOverloads("ConstFunc");
EXPECT_THAT(
overloads,
ElementsAre(
Property(&CelFunction::descriptor,
Property(&CelFunctionDescriptor::name, Eq("ConstFunc"))),
Property(&CelFunction::descriptor,
Property(&CelFunctionDescriptor::name, Eq("ConstFunc")))));
EXPECT_TRUE(activation.RemoveFunctionEntries(
{"ConstFunc", false, {CelValue::Type::kAny}}));
EXPECT_THAT(activation.FindFunctionOverloads("ConstFunc"), IsEmpty());
}
TEST(ActivationTest, CheckValueProducerClear) {
const std::string kValue1 = "42";
const std::string kValue2 = "43";
auto producer1 = std::make_unique<MockValueProducer>();
auto producer2 = std::make_unique<MockValueProducer>();
google::protobuf::Arena arena;
ON_CALL(*producer1, Produce(&arena))
.WillByDefault(Return(CelValue::CreateString(&kValue1)));
ON_CALL(*producer2, Produce(&arena))
.WillByDefault(Return(CelValue::CreateString(&kValue2)));
EXPECT_CALL(*producer1, Produce(&arena)).Times(2);
EXPECT_CALL(*producer2, Produce(&arena)).Times(1);
Activation activation;
activation.InsertValueProducer("value42", std::move(producer1));
activation.InsertValueProducer("value43", std::move(producer2));
auto opt_value = activation.FindValue("value42", &arena);
EXPECT_TRUE(opt_value.has_value());
EXPECT_THAT(opt_value->StringOrDie().value(), Eq(kValue1));
EXPECT_TRUE(activation.ClearValueEntry("value42"));
EXPECT_FALSE(activation.ClearValueEntry("value43"));
auto opt_value2 = activation.FindValue("value43", &arena);
EXPECT_TRUE(opt_value2.has_value());
EXPECT_THAT(opt_value2->StringOrDie().value(), Eq(kValue2));
EXPECT_EQ(1, activation.ClearCachedValues());
EXPECT_FALSE(activation.ClearValueEntry("value42"));
EXPECT_FALSE(activation.ClearValueEntry("value43"));
auto opt_value3 = activation.FindValue("value42", &arena);
EXPECT_TRUE(opt_value3.has_value());
EXPECT_THAT(opt_value3->StringOrDie().value(), Eq(kValue1));
EXPECT_EQ(1, activation.ClearCachedValues());
}
TEST(ActivationTest, ErrorPathTest) {
Activation activation;
Expr expr;
auto* select_expr = expr.mutable_select_expr();
select_expr->set_field("ip");
Expr* ident_expr = select_expr->mutable_operand();
ident_expr->mutable_ident_expr()->set_name("destination");
const CelAttributePattern destination_ip_pattern(
"destination",
{CreateCelAttributeQualifierPattern(CelValue::CreateStringView("ip"))});
AttributeTrail trail("destination");
trail =
trail.Step(CreateCelAttributeQualifier(CelValue::CreateStringView("ip")));
ASSERT_EQ(destination_ip_pattern.IsMatch(trail.attribute()),
CelAttributePattern::MatchType::FULL);
EXPECT_TRUE(activation.missing_attribute_patterns().empty());
activation.set_missing_attribute_patterns({destination_ip_pattern});
EXPECT_EQ(
activation.missing_attribute_patterns()[0].IsMatch(trail.attribute()),
CelAttributePattern::MatchType::FULL);
}
}
}
}
}
} | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/activation.cc | https://github.com/google/cel-cpp/blob/4552db5798fb0853b131b783d8875794334fae7f/eval/public/activation_test.cc | 4552db5798fb0853b131b783d8875794334fae7f |
db986006-b216-4ed1-9076-c2e7bc628ce6 | cpp | tensorflow/tensorflow | bitwise_xor | tensorflow/lite/kernels/bitwise_xor.cc | tensorflow/lite/kernels/bitwise_xor_test.cc | #include "tensorflow/lite/core/c/c_api_types.h"
#include "tensorflow/lite/core/c/common.h"
#include "tensorflow/lite/kernels/internal/reference/binary_function.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace bitwise_xor {
constexpr int kInputTensor1 = 0;
constexpr int kInputTensor2 = 1;
constexpr int kOutputTensor = 0;
struct OpData {
bool requires_broadcast = false;
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
return data;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<OpData*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type);
output->type = input1->type;
data->requires_broadcast = !HaveSameShapes(input1, input2);
TfLiteIntArray* output_size = nullptr;
if (data->requires_broadcast) {
TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast(
context, input1, input2, &output_size));
} else {
output_size = TfLiteIntArrayCopy(input1->dims);
}
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
T BitwiseXor(T x, T y) {
return x ^ y;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
OpData* data = reinterpret_cast<OpData*>(node->user_data);
const TfLiteTensor* input1;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor1, &input1));
const TfLiteTensor* input2;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputTensor2, &input2));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
const TfLiteType type = output->type;
switch (type) {
case kTfLiteUInt8:
case kTfLiteInt8: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int8_t, int8_t, int8_t>(
GetTensorShape(input1), GetTensorData<int8_t>(input1),
GetTensorShape(input2), GetTensorData<int8_t>(input2),
GetTensorShape(output), GetTensorData<int8_t>(output), BitwiseXor);
} else {
reference_ops::BinaryFunction<int8_t, int8_t, int8_t>(
GetTensorShape(input1), GetTensorData<int8_t>(input1),
GetTensorShape(input2), GetTensorData<int8_t>(input2),
GetTensorShape(output), GetTensorData<int8_t>(output), BitwiseXor);
}
break;
}
case kTfLiteUInt16:
case kTfLiteInt16: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int16_t, int16_t, int16_t>(
GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), BitwiseXor);
} else {
reference_ops::BinaryFunction<int16_t, int16_t, int16_t>(
GetTensorShape(input1), GetTensorData<int16_t>(input1),
GetTensorShape(input2), GetTensorData<int16_t>(input2),
GetTensorShape(output), GetTensorData<int16_t>(output), BitwiseXor);
}
break;
}
case kTfLiteUInt32:
case kTfLiteInt32: {
if (data->requires_broadcast) {
reference_ops::BroadcastBinaryFunction4DSlow<int32_t, int32_t, int32_t>(
GetTensorShape(input1), GetTensorData<int32_t>(input1),
GetTensorShape(input2), GetTensorData<int32_t>(input2),
GetTensorShape(output), GetTensorData<int32_t>(output), BitwiseXor);
} else {
reference_ops::BinaryFunction<int32_t, int32_t, int32_t>(
GetTensorShape(input1), GetTensorData<int32_t>(input1),
GetTensorShape(input2), GetTensorData<int32_t>(input2),
GetTensorShape(output), GetTensorData<int32_t>(output), BitwiseXor);
}
break;
}
default:
TF_LITE_KERNEL_LOG(context,
"BitwiseXor currently only supports "
"8-bit/16-bit/32-bit integer/unsigned integer, got %s",
TfLiteTypeGetName(type));
return kTfLiteError;
}
return kTfLiteOk;
}
}
TfLiteRegistration* Register_BITWISE_XOR() {
static TfLiteRegistration r = {bitwise_xor::Init, bitwise_xor::Free,
bitwise_xor::Prepare, bitwise_xor::Eval};
return &r;
}
}
}
} | #include <cstdint>
#include <initializer_list>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
using ::testing::ElementsAreArray;
class BitwiseXorOpModel : public SingleOpModel {
public:
BitwiseXorOpModel(std::initializer_list<int> input1_shape,
std::initializer_list<int> input2_shape,
TensorType tensor_type) {
input1_ = AddInput(tensor_type);
input2_ = AddInput(tensor_type);
output_ = AddOutput(tensor_type);
SetBuiltinOp(BuiltinOperator_BITWISE_XOR, BuiltinOptions_BitwiseXorOptions,
CreateBitwiseXorOptions(builder_).Union());
BuildInterpreter({input1_shape, input2_shape});
}
int input1() const { return input1_; }
int input2() const { return input2_; }
template <typename T>
std::vector<T> GetOutput() {
return ExtractVector<T>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }
protected:
int input1_;
int input2_;
int output_;
};
TEST(BitwiseXorOpTest, SimpleTestInt8) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT8);
model.PopulateTensor<int8_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<int8_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int8_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestInt16) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT16);
model.PopulateTensor<int16_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<int16_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int16_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestInt32) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<int32_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestUInt8) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT8);
model.PopulateTensor<uint8_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<uint8_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint8_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestUInt16) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT16);
model.PopulateTensor<uint16_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<uint16_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint16_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, SimpleTestUInt32) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 4}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<uint32_t>(model.input2(), {5, 0, 7, 11});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({5, 5, 4, 5}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastLhs) {
BitwiseXorOpModel model({1, 1, 1, 1}, {1, 1, 1, 4}, TensorType_INT32);
model.PopulateTensor<int32_t>(model.input1(), {5});
model.PopulateTensor<int32_t>(model.input2(), {0, -5, -3, 14});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<int32_t>(), ElementsAreArray({5, -2, -8, 11}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
TEST(BitwiseXorOpTest, BroadcastRhs) {
BitwiseXorOpModel model({1, 1, 1, 4}, {1, 1, 1, 1}, TensorType_UINT32);
model.PopulateTensor<uint32_t>(model.input1(), {0, 5, 3, 14});
model.PopulateTensor<uint32_t>(model.input2(), {5});
ASSERT_EQ(model.Invoke(), kTfLiteOk);
EXPECT_THAT(model.GetOutput<uint32_t>(), ElementsAreArray({5, 0, 6, 11}));
EXPECT_THAT(model.GetOutputShape(), ElementsAreArray({1, 1, 1, 4}));
}
}
} | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bitwise_xor.cc | https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/kernels/bitwise_xor_test.cc | 4a29233a7b7c1a3a4294e4ccdd1772f9083944ea |