Code
stringlengths 131
28.2k
| Unit Test
stringlengths 40
32.1k
| __index_level_0__
int64 0
2.63k
|
---|---|---|
#ifndef XLA_SERVICE_GPU_GEMV_REWRITER_H_
#define XLA_SERVICE_GPU_GEMV_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GemvRewriter : public HloModulePass {
public:
absl::string_view name() const override { return "gemv-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gemv_rewriter.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::StatusOr<Layout> GetLayoutWithNewMinorMostDimension(
const Layout& layout) {
if (!LayoutUtil::IsMonotonicWithDim0Major(layout)) {
return absl::InvalidArgumentError("Layout is not normalized.");
}
return LayoutUtil::MakeDescendingLayout(layout.minor_to_major_size() + 1);
}
class GemvRewriterVisitor : public DfsHloRewriteVisitor {
public:
absl::Status HandleDot(HloInstruction* instr) override {
HloDotInstruction* dot = Cast<HloDotInstruction>(instr);
const DotDimensionNumbers& dim_numbers = dot->dot_dimension_numbers();
HloInstruction* lhs = dot->mutable_operand(0);
HloInstruction* rhs = dot->mutable_operand(1);
bool lhs_has_non_contracting_dim =
lhs->shape().rank() ==
dim_numbers.lhs_batch_dimensions_size() +
dim_numbers.lhs_contracting_dimensions_size() + 1;
bool rhs_has_non_contracting_dim =
rhs->shape().rank() ==
dim_numbers.rhs_batch_dimensions_size() +
dim_numbers.rhs_contracting_dimensions_size() + 1;
if (lhs_has_non_contracting_dim && rhs_has_non_contracting_dim) {
return absl::OkStatus();
}
if (!lhs_has_non_contracting_dim && !rhs_has_non_contracting_dim) {
return absl::OkStatus();
}
if (dot->shape().is_dynamic()) {
return absl::OkStatus();
}
changed_ = true;
HloComputation* computation = dot->parent();
HloInstruction* new_lhs = lhs;
if (!lhs_has_non_contracting_dim) {
const Shape& lhs_shape = lhs->shape();
absl::Span<const int64_t> lhs_dimensions = lhs_shape.dimensions();
std::vector<int64_t> new_lhs_dimensions(lhs_dimensions.begin(),
lhs_dimensions.end());
new_lhs_dimensions.push_back(1);
Shape new_lhs_shape(
lhs_shape.element_type(), new_lhs_dimensions,
absl::InlinedVector<bool, 4>(new_lhs_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_lhs_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(lhs_shape.layout()));
new_lhs = computation->AddInstruction(
HloInstruction::CreateBitcast(new_lhs_shape, lhs));
}
HloInstruction* new_rhs = rhs;
if (!rhs_has_non_contracting_dim) {
const Shape& rhs_shape = rhs->shape();
absl::Span<const int64_t> rhs_dimensions = rhs_shape.dimensions();
std::vector<int64_t> new_rhs_dimensions(rhs_dimensions.begin(),
rhs_dimensions.end());
new_rhs_dimensions.push_back(1);
Shape new_rhs_shape(
rhs_shape.element_type(), new_rhs_dimensions,
absl::InlinedVector<bool, 4>(new_rhs_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_rhs_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(rhs_shape.layout()));
new_rhs = computation->AddInstruction(
HloInstruction::CreateBitcast(new_rhs_shape, rhs));
}
std::vector<int64_t> new_out_dimensions;
new_out_dimensions.reserve(dot->shape().dimensions().size() + 1);
for (int64_t dim_size : dot->shape().dimensions()) {
new_out_dimensions.push_back(dim_size);
}
if (!lhs_has_non_contracting_dim) {
int non_contracting_dim_size = new_out_dimensions.back();
new_out_dimensions[new_out_dimensions.size() - 1] = 1;
new_out_dimensions.push_back(non_contracting_dim_size);
} else {
new_out_dimensions.push_back(1);
}
Shape new_out_shape(
dot->shape().element_type(), new_out_dimensions,
absl::InlinedVector<bool, 4>(new_out_dimensions.size(), false),
{});
TF_ASSIGN_OR_RETURN(
*new_out_shape.mutable_layout(),
GetLayoutWithNewMinorMostDimension(dot->shape().layout()));
HloInstruction* new_dot =
computation->AddInstruction(HloInstruction::CreateDot(
new_out_shape, new_lhs, new_rhs, dot->dot_dimension_numbers(),
dot->precision_config()));
HloInstruction* bitcast = computation->AddInstruction(
HloInstruction::CreateBitcast(dot->shape(), new_dot));
return computation->ReplaceInstruction(dot, bitcast);
}
bool changed() const { return changed_; }
private:
bool changed_ = false;
};
}
absl::StatusOr<bool> GemvRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
GemvRewriterVisitor gemv_rewriter;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(&gemv_rewriter));
}
return gemv_rewriter.changed();
}
}
} | #include "xla/service/gpu/gemv_rewriter.h"
#include <memory>
#include <optional>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class GemvRewriterTest : public HloTestBase {};
TEST_F(GemvRewriterTest, RewriteMatrixVectorMultiplicationToGemm) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[32,7] parameter(0)
p1 = f32[7] parameter(1)
ROOT d = f32[32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, RewriteVectorMatrixMultiplicationToGemm) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[7] parameter(0)
p1 = f32[7,32] parameter(1)
ROOT d = f32[32] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, RewriteMatrixVectorMultiplicationWithBatch) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[2,5,32,7] parameter(0)
p1 = f32[2,5,7] parameter(1)
ROOT d = f32[2,5,32] dot(p0, p1),
lhs_batch_dims={0,1}, rhs_batch_dims={0,1},
lhs_contracting_dims={3}, rhs_contracting_dims={2}
})";
const char* expected = R"()
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected);
}
TEST_F(GemvRewriterTest, DotNotRewriteVectorVectorMultiplication) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[7] parameter(0)
p1 = f32[7] parameter(1)
ROOT d = f32[] dot(p0, p1),
lhs_contracting_dims={0}, rhs_contracting_dims={0}
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), std::nullopt);
}
TEST_F(GemvRewriterTest, DotNotRewriteMatrixMatrixMultiplication) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[5,7] parameter(0)
p1 = f32[7,32] parameter(1)
ROOT d = f32[5,32] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
RunAndFilecheckHloRewrite(hlo, GemvRewriter(), std::nullopt);
}
TEST_F(GemvRewriterTest, DoNotRewriteDotsWithNonNormalizedLayout) {
const char* hlo = R"(
HloModule m
ENTRY e {
p0 = f32[5,32,7]{2,1,0} parameter(0)
p1 = f32[5,7]{0,1} parameter(1)
ROOT d = f32[5,32]{0,1} dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo));
GemvRewriter rewriter;
absl::StatusOr<bool> result = this->RunHloPass(&rewriter, module.get());
EXPECT_FALSE(result.ok());
EXPECT_EQ(result.status().message(), "Layout is not normalized.");
}
}
} | 2,087 |
#ifndef XLA_SERVICE_GPU_STREAM_ATTRIBUTE_ASYNC_WRAPPER_H_
#define XLA_SERVICE_GPU_STREAM_ATTRIBUTE_ASYNC_WRAPPER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla::gpu {
class StreamAttributeAsyncWrapper : public HloModulePass {
public:
inline static constexpr char kParallelExecutionThread[] = "parallel";
absl::string_view name() const override {
return "async-stream-attribute-wrapper";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/stream_attribute_async_wrapper.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
static absl::StatusOr<bool> AsynchronizeInstruction(HloInstruction* instr) {
auto instr_gpu_config = instr->backend_config<GpuBackendConfig>();
if (!instr_gpu_config.ok() || instr_gpu_config->operation_queue_id() ==
Thunk::kDefaultExecutionStreamId.value()) {
return false;
}
HloComputation* computation = instr->parent();
TF_ASSIGN_OR_RETURN(
HloInstruction * done,
computation->CreateAsyncInstructions(
instr, {}, StreamAttributeAsyncWrapper::kParallelExecutionThread,
true));
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
done->backend_config<GpuBackendConfig>());
gpu_config.set_force_earliest_schedule(false);
TF_RETURN_IF_ERROR(done->set_backend_config(gpu_config));
VLOG(5) << "Created async instruction: " << done->ToString();
return true;
}
}
absl::StatusOr<bool> StreamAttributeAsyncWrapper::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
2, "StreamAttributeAsyncWrapper::Run(), before:\n" + module->ToString());
bool changed = false;
for (const HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instr : comp->instructions()) {
TF_ASSIGN_OR_RETURN(bool result, AsynchronizeInstruction(instr));
changed |= result;
}
}
XLA_VLOG_LINES(
2, "StreamAttributeAsyncWrapper::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/gpu/stream_attribute_async_wrapper.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using StreamAttributeAsyncWrapperTest = HloTestBase;
TEST_F(StreamAttributeAsyncWrapperTest, NonDefaultOpIsWrapped) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
ENTRY entry {
p1_32 = f32[1] parameter(0)
p2_32 = f32[1] parameter(1)
add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[], "force_earliest_schedule":true}
ROOT exp_32 = f32[1] exponential(add_32), backend_config={"operation_queue_id":"0", "wait_on_operation_queues":[1]}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAsyncWrapper async_wrapper;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, async_wrapper.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* producer =
module->entry_computation()->root_instruction()->operand(0);
EXPECT_EQ(producer->opcode(), HloOpcode::kAsyncDone);
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig done_gpu_config,
producer->backend_config<GpuBackendConfig>());
EXPECT_EQ(done_gpu_config.force_earliest_schedule(), false);
const HloInstruction* producer_start = producer->operand(0);
EXPECT_EQ(producer_start->opcode(), HloOpcode::kAsyncStart);
const xla::HloAsyncInstruction* async =
Cast<HloAsyncInstruction>(producer_start);
EXPECT_EQ(async->async_wrapped_opcode(), HloOpcode::kAdd);
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
async->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
EXPECT_EQ(gpu_config.force_earliest_schedule(), true);
EXPECT_EQ(async->async_execution_thread(), "parallel");
}
}
} | 2,088 |
#ifndef XLA_SERVICE_GPU_FUSION_PROCESS_DUMP_H_
#define XLA_SERVICE_GPU_FUSION_PROCESS_DUMP_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class FusionProcessDump {
public:
static absl::StatusOr<FusionProcessDump> LoadFromFile(
const std::string& path);
static absl::StatusOr<FusionProcessDump> LoadFromData(
const std::string& data, absl::string_view format);
static absl::StatusOr<FusionProcessDump> LoadFromProto(
const FusionProcessDumpProto& fusion_process_dump_proto);
const FusionProcessDumpProto& proto() { return fusion_process_dump_proto_; }
HloModule* module() { return hlo_module_.get(); }
const se::DeviceDescription& device_info() { return device_info_; }
int64_t current_step_idx() { return current_step_idx_; }
HloComputation* GetCurrentComputation();
HloInstruction* GetInstructionWithName(absl::string_view name);
HloInstruction* GetProducer();
absl::InlinedVector<HloInstruction*, 2> GetConsumers();
HloInstruction* GetLastFusion() { return last_fusion_; }
const FusionStep& CurrentStep();
bool HasNext();
void Advance();
private:
FusionProcessDump(FusionProcessDumpProto fusion_process_dump_proto,
std::unique_ptr<HloModule> hlo_module,
se::DeviceDescription device_info,
absl::flat_hash_map<std::string, HloComputation*>
instruction_name_to_computation_map)
: fusion_process_dump_proto_(std::move(fusion_process_dump_proto)),
hlo_module_(std::move(hlo_module)),
device_info_(std::move(device_info)),
instruction_name_to_computation_map_(
std::move(instruction_name_to_computation_map)) {}
FusionProcessDumpProto fusion_process_dump_proto_;
std::unique_ptr<HloModule> hlo_module_;
se::DeviceDescription device_info_;
absl::flat_hash_map<std::string, HloComputation*>
instruction_name_to_computation_map_;
int64_t current_step_idx_ = 0;
HloInstruction* last_fusion_ = nullptr;
};
}
}
#endif
#include "xla/service/gpu/fusion_process_dump.h"
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tools/hlo_module_loader.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
HloInstruction* AddFusionInstruction(HloInstruction* producer,
HloInstruction* consumer,
HloComputation* computation,
std::string_view fusion_name) {
if (consumer->opcode() == HloOpcode::kFusion) {
return consumer;
}
auto kind = HloInstruction::FusionKind::kLoop;
auto fusion_instruction = computation->AddInstruction(
HloInstruction::CreateFusion(consumer->shape(), kind, consumer),
fusion_name);
TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction));
return fusion_instruction;
}
HloInstruction* Fuse(HloInstruction* producer, HloInstruction* consumer,
HloComputation* computation,
std::string_view fusion_name) {
HloInstruction* fusion_instruction =
AddFusionInstruction(producer, consumer, computation, fusion_name);
if (producer->opcode() == HloOpcode::kFusion) {
fusion_instruction->MergeFusionInstruction(producer);
} else {
fusion_instruction->FuseInstruction(producer);
}
if (producer->user_count() == 0) {
TF_CHECK_OK(computation->RemoveInstruction(producer));
}
return fusion_instruction;
}
absl::string_view GetProducerName(const FusionStep& step) {
if (step.has_fusion()) {
return step.fusion().producer_name();
}
if (step.has_update_priority()) {
return step.update_priority().producer_name();
}
if (step.has_producer_ineligible()) {
return step.producer_ineligible().producer_name();
}
LOG(FATAL) << "Producer name not found in the current step.";
}
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromFile(
const std::string& path) {
std::string format = std::string(tsl::io::Extension(path));
std::string data;
TF_RETURN_IF_ERROR(tsl::ReadFileToString(tsl::Env::Default(), path, &data));
return FusionProcessDump::LoadFromData(data, format);
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromData(
const std::string& data, absl::string_view format) {
FusionProcessDumpProto fusion_process_dump_proto;
if (format == "txt" || format == "pbtxt") {
if (!tsl::protobuf::TextFormat::ParseFromString(
data, &fusion_process_dump_proto)) {
return InvalidArgument("Failed to parse input as HLO protobuf text");
}
} else if (format == "pb") {
if (!fusion_process_dump_proto.ParseFromString(data)) {
return InvalidArgument("Failed to parse input as HLO protobuf binary");
}
} else {
return InvalidArgument(
"Invalid format from file extension: '%s'. Expected: txt, pb, or pbtxt",
format);
}
return FusionProcessDump::LoadFromProto(fusion_process_dump_proto);
}
absl::StatusOr<FusionProcessDump> FusionProcessDump::LoadFromProto(
const FusionProcessDumpProto& fusion_process_dump_proto) {
TF_ASSIGN_OR_RETURN(
auto module,
LoadModuleFromData(fusion_process_dump_proto.hlo_module_before_fusion(),
"txt"));
se::DeviceDescription gpu_device_info(
fusion_process_dump_proto.gpu_device_info());
absl::flat_hash_map<std::string, HloComputation*>
instruction_name_to_computation_map;
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instr : computation->instructions()) {
instruction_name_to_computation_map[instr->name()] = computation;
}
}
return FusionProcessDump(std::move(fusion_process_dump_proto),
std::move(module), std::move(gpu_device_info),
std::move(instruction_name_to_computation_map));
}
HloComputation* FusionProcessDump::GetCurrentComputation() {
return instruction_name_to_computation_map_.at(
GetProducerName(CurrentStep()));
}
HloInstruction* FusionProcessDump::GetInstructionWithName(
absl::string_view name) {
return instruction_name_to_computation_map_[name]->GetInstructionWithName(
name);
}
HloInstruction* FusionProcessDump::GetProducer() {
return GetInstructionWithName(GetProducerName(CurrentStep()));
}
absl::InlinedVector<HloInstruction*, 2> FusionProcessDump::GetConsumers() {
auto& step = CurrentStep();
if (step.has_fusion()) {
return {GetInstructionWithName(step.fusion().consumer_name())};
}
if (step.has_update_priority()) {
absl::InlinedVector<HloInstruction*, 2> consumers;
for (const auto& consumer_name : step.update_priority().consumer_names()) {
consumers.push_back(GetInstructionWithName(consumer_name));
}
return consumers;
}
return {};
}
const FusionStep& FusionProcessDump::CurrentStep() {
CHECK(HasNext());
return fusion_process_dump_proto_.fusion_steps(current_step_idx_);
}
bool FusionProcessDump::HasNext() {
return current_step_idx_ < fusion_process_dump_proto_.fusion_steps_size();
}
void FusionProcessDump::Advance() {
auto step = CurrentStep();
if (step.has_fusion()) {
const auto& fusion_step = step.fusion();
auto* computation = GetCurrentComputation();
HloInstruction* producer =
computation->GetInstructionWithName(fusion_step.producer_name());
HloInstruction* consumer =
computation->GetInstructionWithName(fusion_step.consumer_name());
HloInstruction* fusion =
Fuse(producer, consumer, computation, fusion_step.fusion_name());
instruction_name_to_computation_map_[fusion->name()] = computation;
last_fusion_ = fusion;
}
++current_step_idx_;
}
}
} | #include "xla/service/gpu/fusion_process_dump.h"
#include <string>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusion_process_dump.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
using FusionProcessDumpTest = HloTestBase;
void AddFusion(FusionProcessDumpProto& dump_proto,
const std::string& fusion_name, const std::string& producer_name,
const std::string& consumer_name) {
auto step = dump_proto.add_fusion_steps();
auto fusion_step = step->mutable_fusion();
fusion_step->set_fusion_name(fusion_name);
fusion_step->set_producer_name(producer_name);
fusion_step->set_consumer_name(consumer_name);
}
TEST_F(FusionProcessDumpTest, MultipleFusionSteps) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
ENTRY main {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
add = f32[] add(p0, p1)
subtract = f32[] subtract(p0, p1)
abs = f32[] abs(subtract)
ROOT multiply = f32[] multiply(add, abs)
})"));
FusionProcessDumpProto dump_proto;
*dump_proto.mutable_gpu_device_info() =
TestGpuDeviceInfo::RTXA6000DeviceInfo().ToGpuProto();
dump_proto.set_hlo_module_before_fusion(
module->ToString(HloPrintOptions::ShortParsable()));
AddFusion(dump_proto, "fusion.1", "subtract", "abs");
AddFusion(dump_proto, "fusion.2", "fusion.1", "multiply");
AddFusion(dump_proto, "fusion.2", "add", "fusion.2");
TF_ASSERT_OK_AND_ASSIGN(auto fusion_process_dump,
FusionProcessDump::LoadFromProto(dump_proto));
fusion_process_dump.Advance();
fusion_process_dump.Advance();
fusion_process_dump.Advance();
EXPECT_FALSE(fusion_process_dump.HasNext());
auto root =
fusion_process_dump.module()->entry_computation()->root_instruction();
EXPECT_EQ(root->name(), "fusion.2");
ASSERT_THAT(root, GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Multiply(
m::Add(m::Parameter(), m::Parameter()),
m::Abs(m::Subtract(m::Parameter(), m::Parameter())))));
}
}
}
} | 2,089 |
#ifndef XLA_SERVICE_GPU_RENAME_FUSIONS_H_
#define XLA_SERVICE_GPU_RENAME_FUSIONS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class RenameFusions : public HloModulePass {
absl::string_view name() const override { return "rename_fusions"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/rename_fusions.h"
#include <memory>
#include <string>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
namespace xla {
namespace gpu {
namespace {
constexpr absl::string_view FusionKindToString(
HloInstruction::FusionKind kind) {
switch (kind) {
case HloInstruction::FusionKind::kCustom:
return "custom";
case HloInstruction::FusionKind::kLoop:
return "loop";
case HloInstruction::FusionKind::kInput:
return "input";
case HloInstruction::FusionKind::kOutput:
return "output";
}
}
std::string MakeFusionHeroNames(const HloInstruction* instruction) {
std::unique_ptr<HloFusionAdaptor> fusion_adaptor =
HloFusionAdaptor::ForInstruction(instruction);
absl::btree_set<absl::string_view> heroes;
for (auto root : fusion_adaptor->GetRoots()) {
heroes.insert(HloOpcodeString(FindNonTrivialHero(root).opcode()));
}
return absl::StrReplaceAll(absl::StrJoin(heroes, "_"), {{"-", "_"}});
}
void RenameFusion(HloModule* module, HloInstruction* instruction) {
std::string hero_names = MakeFusionHeroNames(instruction);
module->SetAndUniquifyInstrName(
instruction, absl::StrCat(FusionKindToString(instruction->fusion_kind()),
"_", hero_names, "_fusion"));
module->SetAndUniquifyComputationName(
instruction->fused_instructions_computation(),
absl::StrCat("fused_", hero_names));
}
}
absl::StatusOr<bool> RenameFusions::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (HloComputation* computation : module->MakeNonfusionComputations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() != HloOpcode::kFusion ||
instruction->fusion_kind() == HloInstruction::FusionKind::kCustom) {
continue;
}
RenameFusion(module, instruction);
}
}
return true;
}
}
} | #include "xla/service/gpu/rename_fusions.h"
#include <utility>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
class RenameFusionsTest : public HloTestBase {
protected:
RenameFusions rename_fusions_;
};
TEST_F(RenameFusionsTest, FusionInstructionNames) {
absl::string_view kHlo = R"(
HloModule test_module
square {
p = f32[16384] parameter(0)
ROOT m = f32[16384] multiply(p, p)
}
exp {
p = f32[16384] parameter(0)
ROOT e = f32[16384] exponential(p)
}
log {
p = f32[16384] parameter(0)
ROOT l = f32[16384] log(p)
}
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
p0 = bf16[1024,8192] parameter(0)
p1 = f32[8192] parameter(1)
p2 = f32[16384] parameter(2)
convert = f32[1024,8192] convert(p0)
broadcast = f32[1024,8192] broadcast(p1), dimensions={1}
c0 = f32[] constant(0)
multiply = f32[1024,8192] multiply(broadcast, convert)
reduce = f32[1024] reduce(multiply, c0), dimensions={1}, to_apply=add
convert.1 = bf16[1024] convert(reduce)
s = f32[16384] fusion(p2), kind=kLoop, calls=square
e = f32[16384] fusion(s), kind=kLoop, calls=exp
l = f32[16384] fusion(s), kind=kInput, calls=log
ROOT result = (bf16[1024]{0}, f32[16384]{0}, f32[16384]{0}) tuple(convert.1, l, e)
})";
RunAndFilecheckHloRewrite(kHlo, std::move(rename_fusions_), R"(
CHECK: ENTRY %main
CHECK: %loop_multiply_fusion{{.*}} calls=%fused_multiply
CHECK: %input_log_fusion{{.*}} calls=%fused_log
CHECK: %loop_exponential_fusion{{.*}} calls=%fused_exponential
CHECK: ROOT %result
)");
}
}
} | 2,090 |
#ifndef XLA_SERVICE_GPU_GPU_CONV_REWRITER_H_
#define XLA_SERVICE_GPU_GPU_CONV_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuConvRewriter : public HloModulePass {
public:
explicit GpuConvRewriter(const se::GpuComputeCapability& compute_capability)
: compute_capability_(compute_capability) {};
absl::string_view name() const override { return "gpu-conv-rewriter"; }
static bool ConvIsLowerable(HloInstruction* conv);
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::GpuComputeCapability compute_capability_;
};
}
}
#endif
#include "xla/service/gpu/gpu_conv_rewriter.h"
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <string_view>
#include <tuple>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::Status CheckTypes(HloInstruction* conv,
const se::GpuComputeCapability cc) {
auto valid_shape = [conv, &cc](const Shape& shape) -> absl::Status {
PrimitiveType type = shape.element_type();
if (!primitive_util::IsFloatingPointType(type) &&
!primitive_util::IsIntegralType(type)) {
return Unimplemented(
"Convolutions must have floating-point or integral operands/outputs, "
"but got convolution with type %s: %s",
primitive_util::LowercasePrimitiveTypeName(type), conv->ToString());
}
if (primitive_util::IsF8Type(type)) {
if (type != F8E4M3FN && type != F8E5M2) {
return Unimplemented(
"The only FP8 types supported in convolutions are f8e5m2 and "
"f8e4m3, "
"but got convolution with FP8 type %s: %s",
primitive_util::LowercasePrimitiveTypeName(type), conv->ToString());
}
if (!std::holds_alternative<se::CudaComputeCapability>(cc)) {
return Unimplemented(
"FP8 convolutions are only supported on CUDA GPUs, but got "
"FP8 convolution on ROCm GPU: %s",
conv->ToString());
} else if (!std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper()) {
return Unimplemented(
"FP8 convolutions are only supported on CUDA GPUs with compute "
"capability at least 9.0, but got "
"FP8 convolution on GPU with compute capability %s: %s",
std::get<se::CudaComputeCapability>(cc).ToString(),
conv->ToString());
}
}
return absl::OkStatus();
};
TF_RETURN_IF_ERROR(valid_shape(conv->shape()));
TF_RETURN_IF_ERROR(valid_shape(conv->operand(0)->shape()));
TF_RETURN_IF_ERROR(valid_shape(conv->operand(1)->shape()));
return absl::OkStatus();
}
using ConvolutionMatch = std::optional<
std::tuple<Window, ConvolutionDimensionNumbers, HloInstruction*>>;
bool MaybeConv1dToConv2d(HloInstruction* conv) {
if (conv->window().dimensions().size() != 2) {
return false;
}
if (conv->operand(1)->opcode() != HloOpcode::kReshape) {
return false;
}
auto filter = conv->operand(1);
std::optional<ShapeUtil::ShapeEqualityDescriptor> reshape_degenerate =
filter->ReshapeMerelyInsertsOrDeletes1SizedDimensions();
if (reshape_degenerate.has_value() &&
reshape_degenerate->deleted_dimensions.empty() &&
reshape_degenerate->inserted_dimensions.size() == 1) {
const auto& dnums = conv->convolution_dimension_numbers();
for (auto dim : dnums.kernel_spatial_dimensions()) {
if (dim == reshape_degenerate->inserted_dimensions[0]) {
return true;
}
}
}
return false;
}
bool CanImplementAsGpuForwardConv(HloInstruction* conv) {
const ConvolutionDimensionNumbers& dnums =
conv->convolution_dimension_numbers();
if (dnums.input_spatial_dimensions_size() > 3) {
return false;
}
if (ShapeUtil::IsZeroElementArray(conv->operand(0)->shape()) ||
ShapeUtil::IsZeroElementArray(conv->operand(1)->shape())) {
return false;
}
if (dnums.input_spatial_dimensions_size() == 2
? !window_util::AllOrNoneReversed(conv->window())
: window_util::HasWindowReversal(conv->window())) {
return false;
}
return true;
}
ConvolutionMatch MatchBackwardFilter(HloInstruction* conv) {
VLOG(2) << "Trying to match convolution backward filter.";
if (conv->feature_group_count() > 1) {
VLOG(1) << conv->ToString()
<< " is a forward convolution. All grouped backward filters are "
"mapped to batch grouped convolutions in tf2xla bridge. Hence "
"backward filter "
"convolutions cannot have feature groups greater than 1 at this "
"point. No need to fold to backward filter.";
return std::nullopt;
}
CHECK_EQ(HloOpcode::kConvolution, conv->opcode());
const ConvolutionDimensionNumbers& conv_dnums =
conv->convolution_dimension_numbers();
auto input_batch_dim = conv_dnums.input_batch_dimension();
auto input_feature_dim = conv_dnums.input_feature_dimension();
auto input_spatial_dims = conv_dnums.input_spatial_dimensions();
auto kernel_input_feature_dim = conv_dnums.kernel_input_feature_dimension();
auto kernel_output_feature_dim = conv_dnums.kernel_output_feature_dimension();
auto kernel_spatial_dims = conv_dnums.kernel_spatial_dimensions();
auto output_batch_dim = conv_dnums.output_batch_dimension();
auto output_feature_dim = conv_dnums.output_feature_dimension();
auto output_spatial_dims = conv_dnums.output_spatial_dimensions();
for (const WindowDimension& window_dim : conv->window().dimensions()) {
if (window_dim.stride() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have stride of 1.";
return std::nullopt;
}
if (window_dim.base_dilation() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have no base (LHS) dilation.";
return std::nullopt;
}
if (window_dim.padding_low() < 0) {
VLOG(1) << "Padding low should be non-negative.";
return std::nullopt;
}
if (window_dim.window_reversal()) {
VLOG(1) << "Window reversal field not supported";
return std::nullopt;
}
}
int small_kernel_dimension_num = 0;
for (int i = 0; i < kernel_spatial_dims.size(); ++i) {
if (conv->operand(1)->shape().dimensions(kernel_spatial_dims[i]) <=
conv->shape().dimensions(output_spatial_dims[i])) {
small_kernel_dimension_num += 1;
}
}
if ((kernel_spatial_dims.empty() || small_kernel_dimension_num > 1 ||
(!MaybeConv1dToConv2d(conv) && small_kernel_dimension_num == 1)) &&
!window_util::HasWindowDilation(conv->window())) {
VLOG(1) << conv->ToString()
<< " is a regular forward convolution. No need "
"to fold it to a backward filter convolution....";
return std::nullopt;
}
Window backward_conv_window;
for (int i = 0; i < input_spatial_dims.size(); ++i) {
WindowDimension* dim = backward_conv_window.add_dimensions();
int64_t filter_size = conv->shape().dimensions(output_spatial_dims[i]);
dim->set_size(filter_size);
dim->set_stride(conv->window().dimensions(i).window_dilation());
dim->set_padding_low(conv->window().dimensions(i).padding_low());
dim->set_base_dilation(1);
dim->set_window_dilation(1);
int64_t input_size =
conv->operand(0)->shape().dimensions(input_spatial_dims[i]);
int64_t output_size = conv->window().dimensions(i).size();
int64_t padded_input_size = filter_size + (output_size - 1) * dim->stride();
int64_t min_padding_high =
padded_input_size - input_size - dim->padding_low();
int64_t max_padding_high = min_padding_high + dim->stride() - 1;
CHECK_GE(dim->padding_low(), 0);
if (dim->padding_low() >= min_padding_high &&
dim->padding_low() <= max_padding_high) {
dim->set_padding_high(dim->padding_low());
} else {
if (dim->padding_low() < min_padding_high) {
dim->set_padding_high(min_padding_high);
} else {
dim->set_padding_high(max_padding_high);
}
}
if (dim->padding_high() < 0) {
LOG(WARNING)
<< "Fusing this pattern to backward filter convolution would cause "
"negative padding ("
<< dim->padding_high()
<< ") on right/bottom of the weight gradients, which is not "
"supported by GpuConvPaddingLegalization (b/32744257). "
"Falling back to "
"unfused convolution for instruction: "
<< conv->ToString();
return std::nullopt;
}
}
ConvolutionDimensionNumbers backward_conv_dnums;
backward_conv_dnums.set_input_batch_dimension(input_feature_dim);
backward_conv_dnums.set_input_feature_dimension(input_batch_dim);
for (int i = 0; i < input_spatial_dims.size(); ++i) {
backward_conv_dnums.add_input_spatial_dimensions(input_spatial_dims[i]);
}
backward_conv_dnums.set_output_batch_dimension(kernel_input_feature_dim);
backward_conv_dnums.set_output_feature_dimension(kernel_output_feature_dim);
for (int i = 0; i < kernel_spatial_dims.size(); ++i) {
backward_conv_dnums.add_output_spatial_dimensions(kernel_spatial_dims[i]);
}
backward_conv_dnums.set_kernel_input_feature_dimension(output_batch_dim);
backward_conv_dnums.set_kernel_output_feature_dimension(output_feature_dim);
for (int i = 0; i < output_spatial_dims.size(); ++i) {
backward_conv_dnums.add_kernel_spatial_dimensions(output_spatial_dims[i]);
}
HloInstruction* lhs = conv->mutable_operand(0);
return std::make_tuple(backward_conv_window, backward_conv_dnums, lhs);
}
ConvolutionMatch MatchBackwardInput(HloInstruction* conv) {
VLOG(2) << "Trying to match convolution backward input.";
if (conv->feature_group_count() > 1) {
return std::nullopt;
}
CHECK_EQ(HloOpcode::kConvolution, conv->opcode());
HloInstruction* reverse_filter = conv->mutable_operand(1);
ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers();
auto kernel_out_feature_dim = dnums.kernel_output_feature_dimension();
auto kernel_out_features =
reverse_filter->shape().dimensions(kernel_out_feature_dim);
if (conv->feature_group_count() > 1 &&
kernel_out_features == conv->feature_group_count()) {
return std::nullopt;
}
bool is_reversed_filter =
reverse_filter->opcode() == HloOpcode::kReverse &&
absl::c_is_permutation(dnums.kernel_spatial_dimensions(),
reverse_filter->dimensions());
bool is_reversed_conv1d_filter =
MaybeConv1dToConv2d(conv) &&
reverse_filter->operand(0)->opcode() == HloOpcode::kReverse;
bool is_1x1_filter =
absl::c_all_of(conv->window().dimensions(),
[](const WindowDimension& d) { return d.size() == 1; });
if (!is_reversed_filter && !is_reversed_conv1d_filter &&
!(window_util::HasBaseDilation(conv->window()) &&
(reverse_filter->IsConstant() || is_1x1_filter))) {
VLOG(1) << "Can't match to backwards convolution. Either filter is not "
"kReverse, or it's not a base-dilated conv with a 1x1 or "
"constant filter.";
return std::nullopt;
}
for (const WindowDimension& window_dim : conv->window().dimensions()) {
if (window_dim.stride() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have stride of 1.";
return std::nullopt;
}
if (window_dim.window_dilation() != 1) {
VLOG(1) << "Forward convolution's window "
<< conv->window().ShortDebugString()
<< " should have no window dilation.";
return std::nullopt;
}
if (window_dim.window_reversal()) {
VLOG(1) << "Window reversal field not supported";
return std::nullopt;
}
}
const auto& input_spatial_dims = dnums.input_spatial_dimensions();
const auto& output_spatial_dims = dnums.output_spatial_dimensions();
CHECK_EQ(conv->window().dimensions().size(), input_spatial_dims.size());
CHECK_EQ(output_spatial_dims.size(), input_spatial_dims.size());
const Window& old_window = conv->window();
Window new_window = old_window;
for (size_t i = 0; i < input_spatial_dims.size(); ++i) {
auto dim = new_window.mutable_dimensions(i);
dim->set_stride(old_window.dimensions(i).base_dilation());
dim->set_base_dilation(1);
auto kernel_size = old_window.dimensions(i).size();
auto backward_padding_low =
kernel_size - 1 - old_window.dimensions(i).padding_low();
if (backward_padding_low < 0) {
LOG(WARNING)
<< "The low padding of the backward convolution would be negative ("
<< backward_padding_low
<< "), which isn't supported by GpuConvPaddingLegalization "
"for now (b/32744257).";
return std::nullopt;
}
dim->set_padding_low(backward_padding_low);
auto unpadded_input_size = conv->shape().dimensions(output_spatial_dims[i]);
auto output_size =
conv->operand(0)->shape().dimensions(input_spatial_dims[i]);
auto padded_input_size = kernel_size + dim->stride() * (output_size - 1);
auto total_pad_size = padded_input_size - unpadded_input_size;
auto min_padding_high = total_pad_size - backward_padding_low;
auto max_padding_high = min_padding_high + dim->stride() - 1;
if (backward_padding_low >= min_padding_high &&
backward_padding_low <= max_padding_high) {
dim->set_padding_high(backward_padding_low);
} else {
if (backward_padding_low < min_padding_high) {
dim->set_padding_high(min_padding_high);
} else {
dim->set_padding_high(max_padding_high);
}
}
if (dim->padding_high() < 0) {
LOG(WARNING) << "Fusing this pattern to backward convolution would cause "
"negative padding ("
<< dim->padding_high()
<< ") on right/bottom of the activations, which is not "
"supported by GpuConvPaddingLegalization (b/32744257). "
"Falling back to unfused convolution for instruction: "
<< conv->ToString();
return std::nullopt;
}
}
auto conv_dnums = conv->convolution_dimension_numbers();
dnums.set_kernel_input_feature_dimension(
conv_dnums.kernel_output_feature_dimension());
dnums.set_kernel_output_feature_dimension(
conv_dnums.kernel_input_feature_dimension());
for (int i = 0; i < input_spatial_dims.size(); ++i) {
dnums.set_input_spatial_dimensions(i,
conv_dnums.output_spatial_dimensions(i));
dnums.set_output_spatial_dimensions(i,
conv_dnums.input_spatial_dimensions(i));
}
dnums.set_input_feature_dimension(conv_dnums.output_feature_dimension());
dnums.set_input_batch_dimension(conv_dnums.output_batch_dimension());
dnums.set_output_feature_dimension(conv_dnums.input_feature_dimension());
dnums.set_output_batch_dimension(conv_dnums.input_batch_dimension());
if (reverse_filter->opcode() != HloOpcode::kReverse &&
reverse_filter->IsConstant()) {
HloComputation* c = conv->parent();
reverse_filter = c->AddInstruction(
HloInstruction::CreateReverse(reverse_filter->shape(), reverse_filter,
dnums.kernel_spatial_dimensions()));
reverse_filter = c->AddInstruction(
HloInstruction::CreateReverse(reverse_filter->shape(), reverse_filter,
dnums.kernel_spatial_dimensions()));
TF_CHECK_OK(conv->ReplaceOperandWith(1, reverse_filter));
}
HloInstruction* rhs = reverse_filter;
if (rhs->opcode() == HloOpcode::kReverse) {
rhs = rhs->mutable_operand(0);
} else if (is_reversed_conv1d_filter) {
auto src = rhs->mutable_operand(0)->mutable_operand(0);
rhs = conv->parent()->AddInstruction(
HloInstruction::CreateReshape(rhs->shape(), src));
}
if (conv->feature_group_count() == 1) {
return std::make_tuple(new_window, dnums, rhs);
}
int64_t input_feature_dimension = dnums.kernel_input_feature_dimension();
int64_t output_feature_dimension = dnums.kernel_output_feature_dimension();
if (std::abs(input_feature_dimension - output_feature_dimension) != 1) {
return std::nullopt;
}
int64_t input_features = rhs->shape().dimensions(input_feature_dimension);
int64_t output_features = rhs->shape().dimensions(output_feature_dimension);
std::vector<int64_t> reshape_dims = SpanToVector(rhs->shape().dimensions());
auto num_groups = conv->feature_group_count();
CHECK_EQ(input_features % num_groups, 0)
<< "Input feature count should be an exact multiple of feature group "
"count";
reshape_dims[input_feature_dimension] =
reshape_dims[input_feature_dimension] / num_groups;
reshape_dims.insert(reshape_dims.begin() + input_feature_dimension,
num_groups);
HloComputation* c = conv->parent();
rhs = c->AddInstruction(HloInstruction::CreateReshape(
ShapeUtil::MakeShape(rhs->shape().element_type(), reshape_dims), rhs));
std::vector<int64_t> transpose_dims(rhs->shape().dimensions_size());
std::iota(transpose_dims.begin(), transpose_dims.end(), 0);
transpose_dims.erase(transpose_dims.begin() + input_feature_dimension);
transpose_dims.insert(transpose_dims.begin() + output_feature_dimension,
input_feature_dimension);
std::vector<int64_t> transpose_reshape_dims =
SpanToVector(rhs->shape().dimensions());
transpose_reshape_dims.erase(transpose_reshape_dims.begin() +
input_feature_dimension);
transpose_reshape_dims.insert(
transpose_reshape_dims.begin() + output_feature_dimension, num_groups);
rhs = c->AddInstruction(HloInstruction::CreateTranspose(
ShapeUtil::MakeShape(rhs->shape().element_type(), transpose_reshape_dims),
rhs, transpose_dims));
Shape new_shape = rhs->shape();
new_shape.DeleteDimension(output_feature_dimension);
new_shape.set_dimensions(output_feature_dimension,
output_features * num_groups);
rhs = c->AddInstruction(HloInstruction::CreateReshape(new_shape, rhs));
return std::make_tuple(new_window, dnums, rhs);
}
HloInstruction* CreateGpuConv(absl::string_view call_target, const Shape& shape,
HloInstruction* lhs, HloInstruction* rhs,
const Window& window,
const ConvolutionDimensionNumbers& dnums,
int64_t feature_group_count,
const PrecisionConfig& precision_config,
const OpMetadata& metadata) {
HloComputation* computation = lhs->parent(); | #include "xla/service/gpu/gpu_conv_rewriter.h"
#include <optional>
#include <string>
#include "absl/log/check.h"
#include "absl/strings/str_format.h"
#include "xla/array4d.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/shape_inference.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class GpuConvRewriterTest : public HloTestBase {
public:
GpuConvRewriterTest()
: HloTestBase(true,
false) {
for (int i = 0; i < 2; ++i) {
WindowDimension* window_dim = default_conv_window_.add_dimensions();
window_dim->set_size(1);
window_dim->set_stride(1);
window_dim->set_padding_low(0);
window_dim->set_padding_high(0);
window_dim->set_window_dilation(1);
window_dim->set_base_dilation(1);
}
tf_default_dnums_for_backward_filter_.set_input_batch_dimension(3);
tf_default_dnums_for_backward_filter_.set_input_feature_dimension(0);
tf_default_dnums_for_backward_filter_.add_input_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.add_input_spatial_dimensions(2);
tf_default_dnums_for_backward_filter_.set_kernel_input_feature_dimension(0);
tf_default_dnums_for_backward_filter_.set_kernel_output_feature_dimension(
3);
tf_default_dnums_for_backward_filter_.add_kernel_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.add_kernel_spatial_dimensions(2);
tf_default_dnums_for_backward_filter_.add_output_spatial_dimensions(0);
tf_default_dnums_for_backward_filter_.add_output_spatial_dimensions(1);
tf_default_dnums_for_backward_filter_.set_output_batch_dimension(2);
tf_default_dnums_for_backward_filter_.set_output_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_input_batch_dimension(0);
tf_default_dnums_for_backward_input_.set_output_batch_dimension(0);
tf_default_dnums_for_backward_input_.set_input_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_output_feature_dimension(3);
tf_default_dnums_for_backward_input_.add_input_spatial_dimensions(1);
tf_default_dnums_for_backward_input_.add_output_spatial_dimensions(1);
tf_default_dnums_for_backward_input_.add_input_spatial_dimensions(2);
tf_default_dnums_for_backward_input_.add_output_spatial_dimensions(2);
tf_default_dnums_for_backward_input_.set_kernel_input_feature_dimension(3);
tf_default_dnums_for_backward_input_.set_kernel_output_feature_dimension(2);
tf_default_dnums_for_backward_input_.add_kernel_spatial_dimensions(0);
tf_default_dnums_for_backward_input_.add_kernel_spatial_dimensions(1);
}
protected:
const se::GpuComputeCapability& GetComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
}
bool RunPass(HloModule* module) {
return GpuConvRewriter(GetComputeCapability()).Run(module).value();
}
Window default_conv_window_;
ConvolutionDimensionNumbers tf_default_dnums_for_backward_filter_;
ConvolutionDimensionNumbers tf_default_dnums_for_backward_input_;
};
TEST_F(GpuConvRewriterTest, BackwardFilterConvolve) {
HloComputation::Builder builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 2, 1}), "gradients"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_size(2);
conv_window.mutable_dimensions(1)->set_window_dilation(2);
auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
activations->shape(), gradients->shape(), 1,
1, conv_window,
tf_default_dnums_for_backward_filter_,
std::nullopt)
.value(),
activations, gradients, 1,
1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
OpMetadata metadata;
metadata.set_op_name("foo");
conv->set_metadata(metadata);
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
const auto& md_after_opt =
entry_computation->root_instruction()->operand(0)->metadata();
EXPECT_TRUE(protobuf_util::ProtobufEquals(md_after_opt, metadata))
<< md_after_opt.DebugString() << " vs " << metadata.DebugString();
}
TEST_F(GpuConvRewriterTest,
BackwardFilterConvolveEquivalentToForwardConvolution) {
HloComputation::Builder builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "gradients"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_size(3);
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
activations->shape(), gradients->shape(), 1,
1, conv_window,
tf_default_dnums_for_backward_filter_,
std::nullopt)
.value(),
activations, gradients, 1,
1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardFilterConvolveWithPaddedActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(35);
conv_window.mutable_dimensions(i)->set_padding_low(1);
conv_window.mutable_dimensions(i)->set_padding_high(1);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {32, 3, 3, 32}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardFilterConvolveWithPaddedGradients) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(4);
conv_window.mutable_dimensions(i)->set_padding_high(-1);
conv_window.mutable_dimensions(i)->set_window_dilation(2);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {320, 3, 3, 192}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardFilterConvolveWithUnevenPadding) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* activations =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "activations"));
HloInstruction* gradients =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "gradients"));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(35);
conv_window.mutable_dimensions(i)->set_padding_high(1);
}
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {32, 2, 2, 32}), activations, gradients,
1, 1, conv_window,
tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveEvenPadding) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {4, 5, 16, 16}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {5, 3, 7, 7}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {2, 3}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(7);
conv_window.mutable_dimensions(i)->set_padding_low(3);
conv_window.mutable_dimensions(i)->set_padding_high(3);
}
ConvolutionDimensionNumbers conv_dnums;
conv_dnums.set_input_batch_dimension(0);
conv_dnums.set_output_batch_dimension(0);
conv_dnums.set_input_feature_dimension(1);
conv_dnums.set_output_feature_dimension(1);
conv_dnums.add_input_spatial_dimensions(2);
conv_dnums.add_output_spatial_dimensions(2);
conv_dnums.add_input_spatial_dimensions(3);
conv_dnums.add_output_spatial_dimensions(3);
conv_dnums.set_kernel_input_feature_dimension(0);
conv_dnums.set_kernel_output_feature_dimension(1);
conv_dnums.add_kernel_spatial_dimensions(2);
conv_dnums.add_kernel_spatial_dimensions(3);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {4, 3, 16, 16}), output,
reverse_kernel, 1,
1, conv_window, conv_dnums,
DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(),
ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1, conv_window,
conv_dnums, std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const HloInstruction* custom_call =
entry_computation->root_instruction()->operand(0);
for (int i = 0; i < 2; ++i) {
const WindowDimension& window_dim = custom_call->window().dimensions(i);
EXPECT_EQ(3, window_dim.padding_low());
EXPECT_EQ(3, window_dim.padding_high());
EXPECT_EQ(1, window_dim.stride());
EXPECT_EQ(1, window_dim.base_dilation());
}
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolve1x1Filter) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 1, 1}), "kernel"));
Window conv_window = default_conv_window_;
conv_window.mutable_dimensions(1)->set_base_dilation(2);
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
output->shape(), kernel->shape(),
1,
1, conv_window,
tf_default_dnums_for_backward_input_,
std::nullopt)
.value(),
output, kernel, 1,
1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest,
BackwardInputConvolve1x1FilterEquivalentToForwardConvolve) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 1, 1, 1}), "kernel"));
builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeInference::InferConvolveShape(
output->shape(), kernel->shape(), 1,
1, default_conv_window_,
tf_default_dnums_for_backward_input_,
std::nullopt)
.value(),
output, kernel, 1,
1, default_conv_window_,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveUnevenPaddingOnGradients) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 3, 192, 320}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(3);
conv_window.mutable_dimensions(i)->set_padding_low(2);
conv_window.mutable_dimensions(i)->set_padding_high(3);
conv_window.mutable_dimensions(i)->set_base_dilation(2);
}
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const HloInstruction* custom_call =
entry_computation->root_instruction()->operand(0);
for (int i = 0; i < 2; ++i) {
const WindowDimension& window_dim = custom_call->window().dimensions(i);
EXPECT_EQ(0, window_dim.padding_low());
EXPECT_EQ(0, window_dim.padding_high());
EXPECT_EQ(2, window_dim.stride());
EXPECT_EQ(1, window_dim.base_dilation());
}
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveLowPaddingTooLarge) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {3, 3, 192, 320}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
for (int i = 0; i < 2; ++i) {
conv_window.mutable_dimensions(i)->set_size(3);
conv_window.mutable_dimensions(i)->set_padding_low(3);
conv_window.mutable_dimensions(i)->set_padding_high(2);
conv_window.mutable_dimensions(i)->set_base_dilation(2);
}
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveUnevenPaddingOnActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 7, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 3, 1, 1}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
WindowDimension* forward_conv_col_dim = conv_window.mutable_dimensions(1);
forward_conv_col_dim->set_size(3);
forward_conv_col_dim->set_padding_low(2);
forward_conv_col_dim->set_padding_high(1);
forward_conv_col_dim->set_base_dilation(2);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {1, 1, 14, 1}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
const HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
ASSERT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0)));
const WindowDimension& backward_conv_col_dim =
entry_computation->root_instruction()->operand(0)->window().dimensions(1);
EXPECT_EQ(0, backward_conv_col_dim.padding_low());
EXPECT_EQ(1, backward_conv_col_dim.padding_high());
}
TEST_F(GpuConvRewriterTest,
BackwardInputConvolveNegativePaddingHighOnActivations) {
auto builder = HloComputation::Builder(TestName());
HloInstruction* output =
builder.AddInstruction(HloInstruction::CreateParameter(
0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output"));
HloInstruction* kernel =
builder.AddInstruction(HloInstruction::CreateParameter(
1, ShapeUtil::MakeShape(F32, {1, 2, 1, 1}), "kernel"));
HloInstruction* reverse_kernel = builder.AddInstruction(
HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1}));
Window conv_window = default_conv_window_;
WindowDimension* forward_conv_col_dim = conv_window.mutable_dimensions(1);
forward_conv_col_dim->set_size(2);
forward_conv_col_dim->set_padding_high(2);
HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve(
ShapeUtil::MakeShape(F32, {1, 1, 4, 1}), output, reverse_kernel,
1, 1, conv_window,
tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2)));
CHECK(ShapeUtil::Compatible(
conv->shape(), ShapeInference::InferConvolveShape(
output->shape(), reverse_kernel->shape(),
1, 1,
conv_window, tf_default_dnums_for_backward_input_,
std::nullopt)
.value()));
auto module = CreateNewVerifiedModule();
HloComputation* entry_computation =
module->AddEntryComputation(builder.Build());
EXPECT_TRUE(RunPass(module.get()));
EXPECT_THAT(entry_computation->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}), 0)));
}
TEST_F(GpuConvRewriterTest, BackwardInputConvolveConstantFilter) {
Array4D<float> constant_arr(4, 4, 2, 2);
constant_arr.FillIota(0);
std::string constant_str =
LiteralUtil::CreateR4FromArray4D(constant_arr).ToStringWithoutShape();
const std::string module_str = absl::StrFormat(R"(
HloModule test
ENTRY entry_computation {
param0 = f32[128,2,16,16]{3,2,1,0} parameter(0)
constant = f32[4,4,2,2]{3,2,1,0} constant(%s)
ROOT convolution = f32[128,2,32,32]{3,2,1,0} convolution(param0, constant),
window={size=4x4 pad=2_2x2_2 lhs_dilate=2x2},
dim_labels=bf01_01oi->bf01, feature_group_count=1
})",
constant_str);
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardInputCallTarget},
m::Parameter(), m::Reverse(m::Constant())),
0)));
}
TEST_F(GpuConvRewriterTest, TestBackwardFilterPatternMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,120,256,256] parameter(0)
filter = f32[8,120,256,256] parameter(1)
ROOT conv = f32[120,120,3,3] convolution(input, filter), window={size=256x256 pad=1_1x1_1}, dim_labels=fb01_io01->fb01
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget},
m::Parameter(0), m::Parameter(1)),
0)));
}
TEST_F(GpuConvRewriterTest, TestBackwardFilterPatternNoMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,128,2,32] parameter(0)
filter = f32[3,3,128,128] parameter(1)
ROOT conv = f32[8,128,2,32] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvForwardCallTarget}, m::Parameter(0),
m::Parameter(1)),
0)));
}
TEST_F(GpuConvRewriterTest, TestConv1dBackwardFilterPatternMatch) {
const std::string module_str = absl::StrFormat(R"(
HloModule Test
ENTRY Test {
input = f32[8,256,128] parameter(0)
filter = f32[8,254,128] parameter(1)
reshape.1 = f32[8,1,256,128] reshape(input)
reshape.2 = f32[8,1,254,128] reshape(filter)
ROOT conv = f32[1,3,128,128] convolution(reshape.1, reshape.2), window={size=1x254}, dim_labels=f01b_i01o->01bf
})");
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
EXPECT_TRUE(RunPass(m.get()));
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCudnnConvBackwardFilterCallTarget},
m::Reshape(), m::Reshape()),
0)));
}
TEST_F(GpuConvRewriterTest, Tes | 2,091 |
#ifndef XLA_SERVICE_GPU_HLO_FUSION_ANALYSIS_H_
#define XLA_SERVICE_GPU_HLO_FUSION_ANALYSIS_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class HloFusionAnalysis {
public:
enum class EmitterFusionKind {
kLoop,
kCustomFusion,
kTriton,
kReduction,
kTranspose,
kConcatenate,
kInputSlices,
kScatter,
kCuDnn,
};
struct InputOutputInfo {
int smallest_input_dtype_bits;
int smallest_output_dtype_bits;
};
static HloFusionAnalysis Create(FusionBackendConfig backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
const se::DeviceDescription* device_info);
static HloFusionAnalysis Create(const HloFusionInstruction* fusion,
const se::DeviceDescription* device_info);
const HloFusionAdaptor& fusion() const { return *fusion_; }
const absl::InlinedVector<HloInstructionAdaptor, 2>& fusion_roots() const {
return fusion_roots_;
}
HloInstructionAdaptor fusion_root(int64_t i) const {
return fusion_roots_[i];
}
int64_t fusion_root_count() const { return fusion_roots_.size(); }
const absl::InlinedVector<HloInstructionAdaptor, 2>& fusion_heroes() const {
return fusion_heroes_;
}
HloInstructionAdaptor fusion_hero(int64_t i) const {
return fusion_heroes_[i];
}
int64_t fusion_hero_count() const { return fusion_heroes_.size(); }
EmitterFusionKind GetEmitterFusionKind() const;
const HloInstruction* FindHeroReduction() const;
const se::DeviceDescription& device_info() const { return *device_info_; }
const FusionBackendConfig& fusion_backend_config() const {
return fusion_backend_config_;
}
const TransposeDescription& tiled_transpose() const {
CHECK(tiled_transpose_.has_value());
return *tiled_transpose_;
}
const InputOutputInfo& input_output_info() const {
return input_output_info_;
}
private:
HloFusionAnalysis(FusionBackendConfig fusion_backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_roots,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_heroes,
const se::DeviceDescription* device_info,
std::optional<TransposeDescription> tiled_transpose,
InputOutputInfo input_output_info);
bool HasConsistentTransposeHeros() const;
FusionBackendConfig fusion_backend_config_;
std::unique_ptr<HloFusionAdaptor> fusion_;
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_roots_;
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_heroes_;
const se::DeviceDescription* device_info_;
std::optional<TransposeDescription> tiled_transpose_;
InputOutputInfo input_output_info_;
};
HloFusionAnalysis AnalyzeProducerConsumerFusion(
const HloInstruction& producer, const HloInstruction& consumer,
const se::DeviceDescription& device_info);
HloFusionAnalysis AnalyzeFusion(const HloInstruction& consumer,
const se::DeviceDescription& device_info);
}
}
#endif
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include <algorithm>
#include <limits>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace {
bool IsInputFusibleNonStridedSlices(
const absl::Span<const HloInstructionAdaptor> fusion_roots) {
return absl::c_all_of(fusion_roots, [&](const HloInstructionAdaptor& root) {
return IsSliceWithUnitStrides(&root.instruction());
});
}
bool AllSliceInputsAreCompatible(
const absl::Span<const HloInstructionAdaptor> fusion_roots) {
const Shape& first_slice_operand_shape =
fusion_roots[0].GetOperand(0).shape();
return absl::c_all_of(fusion_roots, [&](const HloInstructionAdaptor& slice) {
return ShapeUtil::EqualIgnoringElementType(slice.GetOperand(0).shape(),
first_slice_operand_shape);
});
}
std::optional<TransposeDescription> FindConsistentTransposeHero(
const absl::InlinedVector<HloInstructionAdaptor, 2>& hlo_roots,
const absl::InlinedVector<HloInstructionAdaptor, 2>& heroes) {
std::optional<TransposeDescription> tiled_transpose_hero;
std::vector<const HloInstruction*> non_transpose_roots;
for (auto [root, hero] : llvm::zip(hlo_roots, heroes)) {
if (auto tr = GetDescriptionForTiledTransposeEmitter(root.instruction(),
hero.instruction())) {
if (!tiled_transpose_hero) {
tiled_transpose_hero = tr;
} else if (!tiled_transpose_hero->IsEquivalent(*tr)) {
return std::nullopt;
}
} else {
non_transpose_roots.push_back(&root.instruction());
}
}
if (!tiled_transpose_hero) return std::nullopt;
for (auto* root : non_transpose_roots) {
if (!ShapeUtil::IsReshapeOrTransposeBitcast(
root->shape(), tiled_transpose_hero->input_shape(),
true)) {
return std::nullopt;
}
}
return tiled_transpose_hero;
}
const Shape& GetShape(const HloInstructionAdaptor& adaptor) {
return adaptor.shape();
}
const Shape& GetShape(const HloInstruction* instruction) {
return instruction->shape();
}
template <typename Container>
int SmallestBitWidth(const Container& args) {
int bits = std::numeric_limits<int>::max();
for (const auto& operand : args) {
const Shape& shape = GetShape(operand);
if (!shape.IsArray()) continue;
bits = std::min(bits, shape.element_type() == PRED
? 8
: primitive_util::BitWidth(shape.element_type()));
}
return bits;
}
}
HloFusionAnalysis::HloFusionAnalysis(
FusionBackendConfig fusion_backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_roots,
absl::InlinedVector<HloInstructionAdaptor, 2> fusion_heroes,
const se::DeviceDescription* device_info,
std::optional<TransposeDescription> tiled_transpose,
HloFusionAnalysis::InputOutputInfo input_output_info)
: fusion_backend_config_(std::move(fusion_backend_config)),
fusion_(std::move(fusion)),
fusion_roots_(std::move(fusion_roots)),
fusion_heroes_(std::move(fusion_heroes)),
device_info_(device_info),
tiled_transpose_(tiled_transpose),
input_output_info_(std::move(input_output_info)) {}
HloFusionAnalysis HloFusionAnalysis::Create(
FusionBackendConfig backend_config,
std::unique_ptr<HloFusionAdaptor> fusion,
const se::DeviceDescription* device_info) {
absl::InlinedVector<HloInstructionAdaptor, 2> roots = fusion->GetRoots();
absl::InlinedVector<HloInstructionAdaptor, 2> heroes;
for (auto root : roots) {
heroes.push_back(FindNonTrivialHero(root));
}
InputOutputInfo input_output_info{
SmallestBitWidth(fusion->GetParameters()),
SmallestBitWidth(roots),
};
std::optional<TransposeDescription> tiled_transpose_hero =
FindConsistentTransposeHero(roots, heroes);
return HloFusionAnalysis(std::move(backend_config), std::move(fusion),
std::move(roots), std::move(heroes), device_info,
tiled_transpose_hero, std::move(input_output_info));
}
HloFusionAnalysis HloFusionAnalysis::Create(
const HloFusionInstruction* fusion,
const se::DeviceDescription* device_info) {
CHECK(device_info != nullptr);
FusionBackendConfig backend_config =
fusion->has_backend_config()
? fusion->backend_config<GpuBackendConfig>()->fusion_backend_config()
: FusionBackendConfig::default_instance();
return Create(std::move(backend_config),
HloFusionAdaptor::ForInstruction(fusion), device_info);
}
bool HloFusionAnalysis::HasConsistentTransposeHeros() const {
return tiled_transpose_.has_value();
}
static bool UseConcatenateFusion(
absl::Span<const HloInstructionAdaptor> roots,
absl::Span<const HloInstructionAdaptor> heroes) {
if (heroes.size() != 1) return false;
if (heroes.front().opcode() != HloOpcode::kConcatenate) return false;
if (roots.front().shape().IsTuple()) return false;
if (heroes.front().instruction().operand_count() > 4) return false;
return true;
}
HloFusionAnalysis::EmitterFusionKind HloFusionAnalysis::GetEmitterFusionKind()
const {
if (fusion_backend_config_.kind() == kCustomFusionKind) {
return EmitterFusionKind::kCustomFusion;
}
if (fusion_backend_config_.kind() == kTritonFusionKind ||
fusion_backend_config_.kind() == kTritonGemmFusionKind) {
return EmitterFusionKind::kTriton;
}
if (fusion_backend_config_.kind() == kCuDnnFusionKind) {
return EmitterFusionKind::kCuDnn;
}
if (input_output_info_.smallest_input_dtype_bits < 8 ||
input_output_info_.smallest_output_dtype_bits < 8) {
if (fusion_roots_.size() > 1 &&
IsInputFusibleNonStridedSlices(fusion_roots_) &&
AllSliceInputsAreCompatible(fusion_roots_)) {
return EmitterFusionKind::kInputSlices;
}
return EmitterFusionKind::kLoop;
}
std::optional<HloInstructionAdaptor> first_reduce_hero;
for (auto [root, hero] : llvm::zip(fusion_roots_, fusion_heroes_)) {
if (IsRealReductionHero(root.instruction(), hero.instruction())) {
first_reduce_hero = hero;
break;
}
}
if (first_reduce_hero.has_value()) {
bool valid_shapes = true;
Shape hero_operand_shape = first_reduce_hero->GetOperand(0).shape();
for (auto [root, hero] : llvm::zip(fusion_roots_, fusion_heroes_)) {
if (root == *first_reduce_hero) {
continue;
}
if (!IsRealReductionHero(root.instruction(), hero.instruction())) {
if (ShapeUtil::ElementsIn(root.shape()) !=
ShapeUtil::ElementsIn(hero_operand_shape)) {
valid_shapes = false;
break;
}
} else if (!AreReductionsMultiOutputFusionCompatible(
&hero.instruction(), &first_reduce_hero->instruction())) {
valid_shapes = false;
break;
}
}
if (valid_shapes) {
return EmitterFusionKind::kReduction;
}
}
if (HasConsistentTransposeHeros() && tiled_transpose_->permutation[2] != 2) {
return EmitterFusionKind::kTranspose;
}
if (fusion_roots_.size() > 1) {
if (IsInputFusibleNonStridedSlices(fusion_roots_) &&
AllSliceInputsAreCompatible(fusion_roots_)) {
return EmitterFusionKind::kInputSlices;
}
return EmitterFusionKind::kLoop;
}
if (fusion_roots_[0].opcode() == HloOpcode::kScatter) {
return EmitterFusionKind::kScatter;
}
if (UseConcatenateFusion(fusion_roots_, fusion_heroes_)) {
return EmitterFusionKind::kConcatenate;
}
return EmitterFusionKind::kLoop;
}
const HloInstruction* HloFusionAnalysis::FindHeroReduction() const {
if (GetEmitterFusionKind() != EmitterFusionKind::kReduction) {
return nullptr;
}
const auto& roots = fusion_roots();
CHECK(!roots.empty());
for (auto [root, hero] : llvm::zip(roots, fusion_heroes_)) {
if (IsRealReductionHero(root.instruction(), hero.instruction())) {
return &hero.instruction();
}
}
LOG(FATAL) << "Did not find a hero reduction";
}
HloFusionAnalysis AnalyzeProducerConsumerFusion(
const HloInstruction& producer, const HloInstruction& consumer,
const se::DeviceDescription& device_info) {
return HloFusionAnalysis::Create(
consumer.has_backend_config()
? consumer.backend_config<GpuBackendConfig>()->fusion_backend_config()
: producer.backend_config<GpuBackendConfig>()
->fusion_backend_config(),
HloFusionAdaptor::ForProducerConsumer(&producer, &consumer),
&device_info);
}
HloFusionAnalysis AnalyzeFusion(const HloInstruction& consumer,
const se::DeviceDescription& device_info) {
return HloFusionAnalysis::Create(
consumer.backend_config<GpuBackendConfig>()->fusion_backend_config(),
HloFusionAdaptor::ForInstruction(&consumer), &device_info);
}
}
} | #include "xla/service/gpu/hlo_fusion_analysis.h"
#include <gtest/gtest.h>
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_description.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class HloFusionAnalysisTest : public HloTestBase {};
TEST_F(HloFusionAnalysisTest, DoesNotPeekOutsideBoundary) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %bitcast = s32[] bitcast(%reduce)
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kLoop);
auto analysis_fused =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis_fused.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionWithMultipleUsers) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
%negate = f32[] negate(%reduce)
%log = f32[] log(%reduce)
ROOT %tuple = (f32[], f32[]) tuple(%negate, %log)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kLoop, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction()),
&device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %negate = f32[] negate(%reduce)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = f32[] fusion(%p0, %p1), kind=kInput, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(root), &device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFused) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%fusion = f32[] fusion(%p0, %p1), kind=kInput, calls=fusion
ROOT %negate = f32[] negate(%fusion)
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFusedInConsumer) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[] parameter(0)
ROOT %negate = f32[] negate(%p0)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %fusion = f32[] fusion(%reduce), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFusedInBoth) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion.1 {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
ROOT %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add
}
fusion.2 {
%p0 = f32[] parameter(0)
ROOT %negate = f32[] negate(%p0)
}
ENTRY main {
%p0 = f32[1024] parameter(0)
%p1 = f32[] parameter(1)
%fusion.1 = f32[] fusion(%p0, %p1), kind=kInput, calls=fusion.1
ROOT %fusion.2 = f32[] fusion(%fusion.1), kind=kInput, calls=fusion.2
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ReduceMultiOutputFusionWithTransposeBitcast) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024, 512]{1,0} parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[1024]{0} reduce(%p0, %p1), dimensions={1}, to_apply=add
%bitcast = f32[512, 1024]{0,1} bitcast(%p0)
ROOT res = (f32[1024]{0}, f32[512, 1024]{0,1}) tuple(%reduce, %bitcast)
}
ENTRY main {
%p0 = f32[1024, 512]{1,0} parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[1024]{0}, f32[512, 1024]{0,1}) fusion(%p0, %p1), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, InvalidReduceMultiOutputFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[1024, 1024]{1,0} parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[1024]{0} reduce(%p0, %p1), dimensions={0}, to_apply=add
%reduce2 = f32[1024]{0} reduce(%p0, %p1), dimensions={1}, to_apply=add
ROOT res = (f32[1024]{0}, f32[1024]{0}) tuple(reduce, reduce2)
}
ENTRY main {
%p0 = f32[1024, 1024]{1,0} parameter(0)
%p1 = f32[] parameter(1)
ROOT %fusion = (f32[1024]{0}, f32[1024]{0}) fusion(%p0, %p1), kind=kInput, calls=fusion
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kLoop);
}
TEST_F(HloFusionAnalysisTest, InvalidDevice) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY main {
%p0 = f32[1024,128] parameter(0)
%p1 = f32[] parameter(1)
%reduce = f32[128] reduce(%p0, %p1), dimensions={0}, to_apply=add
ROOT %bitcast = s32[128] bitcast(%reduce)
})"));
stream_executor::GpuDeviceInfoProto device_info_proto;
stream_executor::DeviceDescription device_info(device_info_proto);
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
EXPECT_EQ(analysis_fused.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
}
TEST_F(HloFusionAnalysisTest, ConcatFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[128] parameter(0)
%p1 = f32[128] parameter(1)
%add = f32[128] add(p0, p0)
%concat = f32[256] concatenate(%add, %p1), dimensions={0}
ROOT %negate = f32[256] negate(%concat)
}
ENTRY main {
%p0 = f32[128] parameter(0)
%p1 = f32[128] parameter(1)
ROOT %fusion = f32[256] fusion(%p0, %p1), kind=kInput, calls=fused_computation
})"));
auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis = HloFusionAnalysis::Create(
FusionBackendConfig::default_instance(),
HloFusionAdaptor::ForInstruction(root), &device_info);
EXPECT_EQ(analysis.GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kConcatenate);
}
}
} | 2,092 |
#ifndef XLA_SERVICE_GPU_SOFTMAX_REWRITER_TRITON_H_
#define XLA_SERVICE_GPU_SOFTMAX_REWRITER_TRITON_H_
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
struct DiamondChainDescriptor {
HloInstruction* root = nullptr;
HloInstruction* producer = nullptr;
};
using DiamondMatchingDecision = std::variant<FusionDecision, HloInstruction*>;
class SoftmaxRewriterTriton : public HloModulePass {
public:
explicit SoftmaxRewriterTriton(const se::DeviceDescription& device_info,
HloCostAnalysis::ShapeSizeFunction shape_size)
: device_info_(device_info), shape_size_(shape_size) {}
absl::string_view name() const override { return "triton-softmax-rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
absl::StatusOr<std::vector<DiamondChainDescriptor>>
FindAllFusibleDiamondChains(
HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads) const;
absl::Status FuseDiamondChain(const DiamondChainDescriptor& diamond_chain);
DiamondMatchingDecision MatchesTritonCompatibleClosedReductionDiamond(
HloInstruction* instr) const;
private:
const se::DeviceDescription& device_info_;
const HloCostAnalysis::ShapeSizeFunction shape_size_;
mlir::MLIRContext mlir_context_;
};
}
}
#endif
#include "xla/service/gpu/softmax_rewriter_triton.h"
#include <functional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/triton_support.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using hlo_query::IsBroadcastOfParameter;
using hlo_query::IsBroadcastOfScalarConstant;
bool HasDefaultLayout(const Shape& shape) {
return shape.has_layout() &&
LayoutUtil::IsMonotonicWithDim0Major(shape.layout());
}
bool TrivialEdge(HloInstruction** producer, HloInstruction* consumer,
HloOpcode opcode, const se::GpuComputeCapability& gpu_version);
bool BitcastIsTilingNoop(HloInstruction* bitcast,
const se::GpuComputeCapability& gpu_version) {
CHECK_EQ(bitcast->opcode(), HloOpcode::kBitcast);
if (ShapeUtil::IsEffectiveScalar(bitcast->shape())) {
return true;
}
auto last_dimension = [](const HloInstruction* instr) {
return instr->shape().dimensions().back();
};
HloInstruction* reduce = nullptr;
TrivialEdge(&reduce, bitcast->mutable_operand(0), HloOpcode::kReduce,
gpu_version);
return (HasDefaultLayout(bitcast->shape()) &&
HasDefaultLayout(bitcast->operand(0)->shape()) &&
(reduce != nullptr ||
last_dimension(bitcast->operand(0)) == last_dimension(bitcast)));
}
inline bool HasOneUse(const HloInstruction* instr) {
return instr->user_count() == 1;
}
bool IsBatchOrReductionDimBroadcast(const HloInstruction& hlo) {
CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast)
<< "Expected broadcast " << hlo.ToShortString();
CHECK_EQ(hlo.operand(0)->opcode(), HloOpcode::kParameter)
<< "Expected parameter " << hlo.operand(0)->ToShortString();
const HloBroadcastInstruction* broadcast =
Cast<HloBroadcastInstruction>(&hlo);
const HloParameterInstruction* parameter =
Cast<HloParameterInstruction>(hlo.operand(0));
if (parameter->shape().dimensions_size() + 1 !=
broadcast->shape().dimensions_size()) {
return false;
}
bool preserve_first_dim = broadcast->dimensions().front() == 0;
bool preserve_last_dim = broadcast->dimensions().back() ==
broadcast->shape().dimensions_size() - 1;
return !(preserve_first_dim && preserve_last_dim);
}
bool IsBroadcastOfAScalar(const HloInstruction& hlo) {
CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast)
<< "Expected broadcast " << hlo.ToShortString();
return ShapeUtil::IsScalar(hlo.operand(0)->shape());
}
bool IsSingleRowParameterBroadcast(const HloInstruction& hlo) {
CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast)
<< "Expected broadcast " << hlo.ToShortString();
CHECK_EQ(hlo.operand(0)->opcode(), HloOpcode::kParameter)
<< "Expected parameter " << hlo.operand(0)->ToShortString();
const HloBroadcastInstruction* broadcast =
Cast<HloBroadcastInstruction>(&hlo);
const HloParameterInstruction* parameter =
Cast<HloParameterInstruction>(hlo.operand(0));
if (parameter->shape().dimensions_size() != 1) {
return false;
}
return broadcast->dimensions()[0] == broadcast->shape().dimensions_size() - 1;
}
bool IsSupportedBroadcastOfParameter(const HloInstruction& hlo) {
return IsBroadcastOfParameter(hlo) &&
(IsBatchOrReductionDimBroadcast(hlo) || IsBroadcastOfAScalar(hlo) ||
IsSingleRowParameterBroadcast(hlo));
}
HloInstruction* ChooseOperandForFusionProcessing(HloInstruction* instr) {
CHECK_GT(instr->operand_count(), 0);
CHECK_LE(instr->operand_count(), 2);
if (instr->operand_count() > 1 &&
(IsBroadcastOfScalarConstant(*instr->operand(0)) ||
IsSupportedBroadcastOfParameter(*instr->operand(0)))) {
return instr->mutable_operand(1);
}
return instr->mutable_operand(0);
}
bool IsTriviallyFusible(HloInstruction* instr,
const se::GpuComputeCapability& gpu_version,
int num_allowed_users = 1) {
if (instr->user_count() > num_allowed_users ||
!HasDefaultLayout(instr->shape())) {
return false;
}
if (instr->opcode() == HloOpcode::kBitcast &&
BitcastIsTilingNoop(instr, gpu_version)) {
return true;
}
if (instr->IsElementwise() && instr->operand_count() == 1) {
return static_cast<bool>(
legacy_triton::IsTritonSupportedInstruction(*instr, gpu_version));
}
if (instr->IsElementwiseBinary()) {
const HloInstruction* operand_0 = instr->operand(0);
const HloInstruction* operand_1 = instr->operand(1);
if (operand_0 == operand_1) {
return static_cast<bool>(
legacy_triton::IsTritonSupportedInstruction(*instr, gpu_version));
}
if ((IsBroadcastOfScalarConstant(*operand_0) ||
IsSupportedBroadcastOfParameter(*operand_0)) ^
(IsBroadcastOfScalarConstant(*operand_1) ||
IsSupportedBroadcastOfParameter(*operand_1))) {
return static_cast<bool>(
legacy_triton::IsTritonSupportedInstruction(*instr, gpu_version));
}
}
return false;
}
bool TrivialEdge(HloInstruction** producer, HloInstruction* consumer,
HloOpcode opcode,
const se::GpuComputeCapability& gpu_version) {
while (consumer->opcode() != opcode) {
if (IsTriviallyFusible(consumer, gpu_version)) {
consumer = ChooseOperandForFusionProcessing(consumer);
} else {
return false;
}
}
*producer = consumer;
return true;
}
bool IsTriviallyConnectedProducerOf(
HloInstruction* producer, HloInstruction* consumer,
const se::GpuComputeCapability& gpu_version) {
if (producer == consumer) {
return true;
}
HloInstruction* found_producer = consumer;
while (
TrivialEdge(&found_producer, consumer, producer->opcode(), gpu_version)) {
if (found_producer == producer) {
return true;
}
if (!IsTriviallyFusible(found_producer, gpu_version)) {
return false;
}
consumer = found_producer->mutable_operand(0);
}
return false;
}
HloInstruction* FindFirstNonFusibleDiamondProducer(
HloInstruction* diamond_producer,
const se::GpuComputeCapability& gpu_version) {
if (IsTriviallyFusible(diamond_producer, gpu_version,
2)) {
diamond_producer = ChooseOperandForFusionProcessing(diamond_producer);
while (IsTriviallyFusible(diamond_producer, gpu_version)) {
diamond_producer = ChooseOperandForFusionProcessing(diamond_producer);
}
}
return diamond_producer;
}
absl::StatusOr<HloFusionInstruction*> MakeFusionForDiamondChain(
const DiamondChainDescriptor& diamond_chain) {
auto [root, producer] = diamond_chain;
std::string suggested_name = "triton_softmax";
HloComputation::Builder builder(absl::StrCat(suggested_name, "_computation"));
absl::flat_hash_map<const HloInstruction*, HloInstruction*>
old_to_new_mapping;
int param = 0;
old_to_new_mapping[producer] =
builder.AddInstruction(HloInstruction::CreateParameter(
param, producer->shape(), absl::StrCat("parameter_", param)));
param++;
std::vector<HloInstruction*> parameters = {producer};
std::function<void(HloInstruction*)> create_computation =
[&](HloInstruction* instr) -> void {
if (old_to_new_mapping.contains(instr)) {
return;
}
std::vector<HloInstruction*> new_operands;
for (HloInstruction* operand : instr->mutable_operands()) {
create_computation(operand);
new_operands.push_back(old_to_new_mapping[operand]);
}
if (instr->opcode() == HloOpcode::kParameter) {
old_to_new_mapping[instr] =
builder.AddInstruction(HloInstruction::CreateParameter(
param, instr->shape(), absl::StrCat("parameter_", param)));
parameters.push_back(instr);
param++;
} else {
old_to_new_mapping[instr] = builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), new_operands));
}
};
create_computation(root);
HloComputation* computation =
root->GetModule()->AddComputationAndUnifyNamesAndIds(builder.Build(),
false);
HloInstruction* softmax_fusion =
root->parent()->AddInstruction(HloInstruction::CreateFusion(
root->shape(), HloInstruction::FusionKind::kCustom, parameters,
computation));
softmax_fusion->GetModule()->SetAndUniquifyInstrName(softmax_fusion,
"triton_softmax");
TF_ASSIGN_OR_RETURN(auto gpu_config,
softmax_fusion->backend_config<GpuBackendConfig>());
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind(std::string(kTritonFusionKind));
TF_RETURN_IF_ERROR(softmax_fusion->set_backend_config(gpu_config));
return xla::Cast<HloFusionInstruction>(softmax_fusion);
}
absl::Status FuseDiamondChainImpl(
const DiamondChainDescriptor& diamond_chain,
GpuPerformanceModelWithIndexingAnalysis& indexing_performance_model) {
TF_ASSIGN_OR_RETURN(HloFusionInstruction * softmax_fusion,
MakeFusionForDiamondChain(diamond_chain));
HloInstruction* root = diamond_chain.root;
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(softmax_fusion);
TF_ASSIGN_OR_RETURN(
TiledRunTimeDataOrError tiled_runtime_data_or,
indexing_performance_model.TryFindBestTilingForFusion(*fusion_adaptor));
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&tiled_runtime_data_or)) {
return absl::FailedPreconditionError(absl::StrCat(
"SymbolicTileAnalysis failed. ", fusion_decision->Explain()));
}
TiledRunTimeData tiled_runtime_data =
std::get<TiledRunTimeData>(std::move(tiled_runtime_data_or));
TF_ASSIGN_OR_RETURN(auto backend_config,
softmax_fusion->backend_config<GpuBackendConfig>());
*backend_config.mutable_fusion_backend_config()
->mutable_block_level_fusion_config() =
tiled_runtime_data.block_level_parameters.ToBlockLevelFusionConfig();
TF_RETURN_IF_ERROR(softmax_fusion->set_backend_config(backend_config));
if (root->IsRoot()) {
root->parent()->set_root_instruction(softmax_fusion);
TF_RETURN_IF_ERROR(
root->parent()->RemoveInstructionAndUnusedOperands(root));
} else {
TF_RETURN_IF_ERROR(
root->parent()->ReplaceInstruction(root, softmax_fusion));
}
VLOG(5) << softmax_fusion->ToString();
return absl::OkStatus();
}
absl::StatusOr<bool> CanSymbolicTileAnalysisTileDiamondChain(
const DiamondChainDescriptor& diamond_chain) {
TF_ASSIGN_OR_RETURN(HloFusionInstruction * softmax_fusion,
MakeFusionForDiamondChain(diamond_chain));
mlir::MLIRContext context;
SymbolicTileAnalysisOrError symbolic_tile_analysis_or_error =
SymbolicTileAnalysis::AnalyzeComputation(
*softmax_fusion->called_computation(), &context);
bool can_tile = std::holds_alternative<SymbolicTileAnalysis>(
symbolic_tile_analysis_or_error);
TF_RETURN_IF_ERROR(diamond_chain.root->GetModule()->RemoveEmbeddedComputation(
softmax_fusion->called_computation()));
TF_RETURN_IF_ERROR(
diamond_chain.root->parent()->RemoveInstruction(softmax_fusion));
return can_tile;
}
}
DiamondMatchingDecision
SoftmaxRewriterTriton::MatchesTritonCompatibleClosedReductionDiamond(
HloInstruction* instr) const {
if (!instr->IsElementwiseBinary()) {
return "Root is not elementwise binary.";
}
if (!legacy_triton::IsTritonSupportedInstruction(
*instr, device_info_.gpu_compute_capability())) {
return "Root is not supported for Triton instruction.";
}
HloInstruction* producer;
HloInstruction* broadcast;
HloInstruction* reduce;
if (!TrivialEdge(&broadcast, instr->mutable_operand(1), HloOpcode::kBroadcast,
device_info_.gpu_compute_capability())) {
return "Could not find a trivial connection from root to a broadcast.";
}
if (!TrivialEdge(&reduce, broadcast->mutable_operand(0), HloOpcode::kReduce,
device_info_.gpu_compute_capability())) {
return "Could not find a trivial connection from matched broadcast to a "
"reduction.";
}
if (!(HasDefaultLayout(broadcast->shape()) &&
HasDefaultLayout(reduce->shape()))) {
return "Broadcast or reduce have non-default layouts.";
}
if (CodegenDecision is_supported =
legacy_triton::IsTritonSupportedInstruction(
*reduce, device_info_.gpu_compute_capability());
!is_supported) {
VLOG(3) << is_supported.Explain();
return is_supported;
}
if (!HasOneUse(broadcast) || !HasOneUse(reduce)) {
return "More than one use of broadcast or reduce.";
}
producer = reduce->mutable_operand(0);
if (absl::c_linear_search(broadcast->dimensions(),
broadcast->shape().rank() - 1)) {
return "Broadcast is not along the reduction dimension.";
}
while (IsTriviallyFusible(producer, device_info_.gpu_compute_capability())) {
producer = ChooseOperandForFusionProcessing(producer);
}
if (!HasDefaultLayout(producer->shape())) {
return "Producer has non-default layout.";
}
if (!IsTriviallyConnectedProducerOf(producer, instr->mutable_operand(0),
device_info_.gpu_compute_capability())) {
return "Producer is not trivially connected.";
}
if (producer != instr->operand(0) && instr->operand(0)->user_count() != 1) {
return "Unsupported root-producer connection.";
}
VLOG(5) << "Matched Softmax diamond with: ";
VLOG(5) << "root: " << instr->ToString();
VLOG(5) << "producer: " << producer->ToString();
VLOG(5) << "broadcast: " << broadcast->ToString();
VLOG(5) << "reduce: " << reduce->ToString();
return producer;
}
absl::StatusOr<std::vector<DiamondChainDescriptor>>
SoftmaxRewriterTriton::FindAllFusibleDiamondChains(
HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
std::vector<DiamondChainDescriptor> matched_diamonds;
for (HloComputation* comp :
module.MakeNonfusionComputations(execution_threads)) {
if (comp->IsCustomCallComputation()) {
continue;
}
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
PrimitiveType element_ty = instr->shape().element_type();
if (element_ty != F16 && element_ty != F32 && element_ty != BF16) {
continue;
}
auto producer = MatchesTritonCompatibleClosedReductionDiamond(instr);
if (std::holds_alternative<HloInstruction*>(producer)) {
DiamondChainDescriptor diamond_chain{
instr, std::get<HloInstruction*>(producer)};
TF_ASSIGN_OR_RETURN(
bool can_tile_diamond_chain,
CanSymbolicTileAnalysisTileDiamondChain(diamond_chain));
if (can_tile_diamond_chain) {
matched_diamonds.push_back(diamond_chain);
} else {
VLOG(5) << "Cannot tile the diamond pattern described by "
<< "instructions " << instr->ToString() << " and "
<< std::get<HloInstruction*>(producer)->ToString() << ".";
continue;
}
} else {
VLOG(5) << "Cannot match the diamond pattern for instruction "
<< instr->ToString()
<< ". Reason: " << std::get<FusionDecision>(producer).Explain();
}
}
}
if (matched_diamonds.empty()) {
return std::vector<DiamondChainDescriptor>();
}
auto reduction_dimension_size_from_diamond_root =
[](HloInstruction* diamond_root) {
HloInstruction* instr = diamond_root->mutable_operand(1);
while (instr->opcode() != HloOpcode::kReduce) {
instr = ChooseOperandForFusionProcessing(instr);
}
int operand_rank = instr->operand(0)->shape().rank();
CHECK_EQ(instr->dimensions().size(), 1);
CHECK_EQ(instr->dimensions(0), operand_rank - 1);
return instr->operand(0)->shape().dimensions(operand_rank - 1);
};
auto last_trivially_fusible_user = [&](HloInstruction* instr) {
while (HasOneUse(instr) && !instr->IsRoot() &&
IsTriviallyFusible(instr->users().front(),
device_info_.gpu_compute_capability())) {
instr = instr->users().front();
}
if (HasOneUse(instr) && !instr->IsRoot() &&
IsTriviallyFusible(
instr->users().front(), device_info_.gpu_compute_capability(),
instr->users().front()->user_count())) {
instr = instr->users().front();
}
return instr;
};
std::vector<DiamondChainDescriptor> diamond_chains;
diamond_chains.reserve(matched_diamonds.size());
HloInstruction* current_fusion_producer = FindFirstNonFusibleDiamondProducer(
matched_diamonds.front().producer, device_info_.gpu_compute_capability());
int current_reduce_dimension_size =
reduction_dimension_size_from_diamond_root(matched_diamonds.front().root);
for (int diamond_idx = 1; diamond_idx < matched_diamonds.size();
++diamond_idx) {
auto [diamond_root, diamond_producer] = matched_diamonds[diamond_idx];
HloInstruction* previous_diamond_root =
matched_diamonds[diamond_idx - 1].root;
HloInstruction* first_non_fusible_diamond_producer =
FindFirstNonFusibleDiamondProducer(
diamond_producer, device_info_.gpu_compute_capability());
int diamond_reduce_dimension_size =
reduction_dimension_size_from_diamond_root(diamond_root);
if (first_non_fusible_diamond_producer == previous_diamond_root &&
((first_non_fusible_diamond_producer != diamond_producer &&
HasOneUse(first_non_fusible_diamond_producer)) ||
(first_non_fusible_diamond_producer == diamond_producer &&
first_non_fusible_diamond_producer->user_count() == 2)) &&
diamond_reduce_dimension_size == current_reduce_dimension_size) {
continue;
}
diamond_chains.push_back(DiamondChainDescriptor{
last_trivially_fusible_user(previous_diamond_root),
current_fusion_producer,
});
current_fusion_producer = first_non_fusible_diamond_producer;
current_reduce_dimension_size = diamond_reduce_dimension_size;
}
diamond_chains.push_back(DiamondChainDescriptor{
last_trivially_fusible_user(matched_diamonds.back().root),
current_fusion_producer});
std::vector<DiamondChainDescriptor> filtered_diamond_chains;
for (const DiamondChainDescriptor& diamond_chain : diamond_chains) {
TF_ASSIGN_OR_RETURN(bool can_tile_diamond_chain,
CanSymbolicTileAnalysisTileDiamondChain(diamond_chain));
if (can_tile_diamond_chain) {
filtered_diamond_chains.push_back(diamond_chain);
}
}
return filtered_diamond_chains;
}
absl::Status SoftmaxRewriterTriton::FuseDiamondChain(
const DiamondChainDescriptor& diamond_chain) {
HloFusionAnalysisCache fusion_analysis_cache(device_info_);
GpuPerformanceModelWithIndexingAnalysis indexing_performance_model(
&device_info_, &fusion_analysis_cache, shape_size_, &mlir_context_);
return FuseDiamondChainImpl(diamond_chain, indexing_performance_model);
}
absl::StatusOr<bool> SoftmaxRewriterTriton::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto cuda_compute_capability = std::get_if<se::CudaComputeCapability>(
&device_info_.gpu_compute_capability());
if (!cuda_compute_capability) {
return absl::FailedPreconditionError(
"Triton support is only enabled for CUDA GPUs.");
} else if (!cuda_compute_capability->IsAtLeastAmpere()) {
return absl::FailedPreconditionError(
absl::StrCat("Triton support is only enabled for Ampere GPUs (compute ",
"capability 8.0) and up, but got compute capability ",
cuda_compute_capability->major, ".",
cuda_compute_capability->minor, "."));
}
TF_ASSIGN_OR_RETURN(std::vector<DiamondChainDescriptor> diamond_chains,
FindAllFusibleDiamondChains(*module, execution_threads));
if (diamond_chains.empty()) {
return false;
} | #include "xla/service/gpu/softmax_rewriter_triton.h"
#include <cstdint>
#include <memory>
#include <string>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/base/optimization.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using ::testing::HasSubstr;
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
bool HasBlockLevelFusionConfig(const HloInstruction* fusion) {
return fusion->opcode() == HloOpcode::kFusion &&
fusion->has_backend_config() &&
fusion->backend_config<GpuBackendConfig>().ok() &&
fusion->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.has_block_level_fusion_config();
}
absl::StatusOr<bool> SoftmaxRewriterTritonMatchAndRewrite(
const se::DeviceDescription& device_info, HloModule* module) {
CHECK_NE(module, nullptr);
SoftmaxRewriterTriton softmax_rewriter_triton(device_info,
ShapeSizeBytesFunction());
TF_ASSIGN_OR_RETURN(std::vector<DiamondChainDescriptor> diamond_chains,
softmax_rewriter_triton.FindAllFusibleDiamondChains(
*module, {}));
for (auto diamond_chain = diamond_chains.rbegin();
diamond_chain != diamond_chains.rend(); ++diamond_chain) {
TF_RETURN_IF_ERROR(
softmax_rewriter_triton.FuseDiamondChain(*diamond_chain));
}
return !diamond_chains.empty();
}
class SoftmaxRewriterTritonTest
: public HloTestBase,
public ::testing::WithParamInterface<PrimitiveType> {
protected:
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
};
TEST_P(SoftmaxRewriterTritonTest, CanFuseExactSoftmax) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
exponential = $0[127,125]{1,0} exponential(subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
VLOG(2) << module->ToString();
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig)));
break;
case F16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Divide(m::Exp(), m::Broadcast())));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest, CanFuseFirstSoftmaxDiamond) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
VLOG(2) << module->ToString();
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_F(SoftmaxRewriterTritonTest, CanNotFuseExactSoftmaxF64) {
const std::string hlo_string = R"(
HloModule softmax
max_computation {
arg_0 = f64[] parameter(0)
arg_1 = f64[] parameter(1)
ROOT maximum = f64[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = f64[] parameter(0)
arg_1.1 = f64[] parameter(1)
ROOT add = f64[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = f64[127,125]{1,0} parameter(0)
constant_neg_inf = f64[] constant(-inf)
reduce = f64[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = f64[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = f64[127,125]{1,0} subtract(param_0, broadcast)
exponential = f64[127,125]{1,0} exponential(subtract)
constant_zero = f64[] constant(0)
second_reduce = f64[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = f64[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = f64[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_F(SoftmaxRewriterTritonTest, CanFuseExactSoftmaxBF16) {
const std::string hlo_string = R"(
HloModule softmax
max_computation {
arg_0 = bf16[] parameter(0)
arg_1 = bf16[] parameter(1)
ROOT maximum = bf16[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = bf16[] parameter(0)
arg_1.1 = bf16[] parameter(1)
ROOT add = bf16[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = bf16[127,125]{1,0} parameter(0)
constant_neg_inf = bf16[] constant(-inf)
reduce = bf16[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = bf16[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = bf16[127,125]{1,0} subtract(param_0, broadcast)
exponential = bf16[127,125]{1,0} exponential(subtract)
constant_zero = bf16[] constant(0)
second_reduce = bf16[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = bf16[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = bf16[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithWrongLayout) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{0,1} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithWrongReduceDimension) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[125]{0} reduce(param_0, constant_neg_inf), dimensions={0}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={1}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithWrongBroadcastDimension) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[125,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[125]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[125,125]{1,0} broadcast(reduce), dimensions={1}
ROOT subtract = $0[125,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithExtraBroadcastUsage) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
ROOT multiply = $0[127,125]{1,0} multiply(broadcast, subtract)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseSoftmaxWithIntermediateUnaryElementwise) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
abs = $0[127,125]{1,0} abs(subtract)
exponential = $0[127,125]{1,0} exponential(abs)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig)));
break;
case F16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Divide()));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseTwoDiamondsWithSecondDiamondProducerEqualToFirstDiamondRoot) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(subtract, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
ROOT divide = $0[127,125]{1,0} divide(subtract, second_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig)));
break;
case F16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Divide()));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseDiamondWithTrailingUnaryElementwiseAtTheRoot) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
ROOT abs = $0[127,125]{1,0} abs(subtract)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest, CanFuseDiamondWithUnaryElementwisePrefix) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
abs = $0[127,125]{1,0} abs(param_0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(abs, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseDiamondWithMultipleBroadcastDimensions) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[1,3,125,125]{3,2,1,0} parameter(0)
bitcast = $0[3,125,125]{2,1,0} bitcast($0[1,3,125,125]{3,2,1,0} param_0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[3,125]{1,0} reduce($0[3,125,125]{2,1,0} bitcast, $0[] constant_neg_inf), dimensions={2}, to_apply=max_computation
broadcast = $0[1,3,125,125]{3,2,1,0} broadcast($0[3,125]{1,0} reduce), dimensions={1,2}
ROOT subtract = $0[1,3,125,125]{3,2,1,0} subtract($0[1,3,125,125]{3,2,1,0} param_0, $0[1,3,125,125]{3,2,1,0} broadcast)
})";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithNonConstantReducerIdentity) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
identity = $0[] parameter(1)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, identity), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithTritonIncompatibleRoot) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
divide = $0[127,125]{1,0} divide(param_0, broadcast)
ROOT remainder = $0[127,125]{1,0} remainder(divide, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithTritonIncompatibleReducer) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
if_0 = pred[] is-finite(arg_0)
c = $0[] convert(if_0)
ROOT maximum = $0[] maximum(c, arg_1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanFuseSoftmaxDiamondWithLastDimensionBitcastAfterReduce) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[3,127,125]{2,1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[3,127]{1,0} reduce(param_0, constant_neg_inf), dimensions={2}, to_apply=max_computation
bitcasted_reduce = $0[381]{0} bitcast(reduce)
broadcast = $0[381,125]{1,0} broadcast(bitcasted_reduce), dimensions={0}
bitcasted_broadcast = $0[3,127,125]{2,1,0} bitcast(broadcast)
ROOT subtract = $0[3,127,125]{2,1,0} subtract(param_0, bitcasted_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(
m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)));
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseSoftmaxDiamondWithTransposeBitcast) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
ENTRY main {
param_0 = $0[1,127,125]{2,1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
bitcasted_param_0 = $0[127,1,125]{2,0,1} bitcast(param_0)
reduce = $0[127,1]{0,1} reduce(bitcasted_param_0, constant_neg_inf), dimensions={2}, to_apply=max_computation
broadcast = $0[127,1,125]{2,0,1} broadcast(reduce), dimensions={0,1}
bitcasted_broadcast = $0[1,127,125]{2,1,0} bitcast(broadcast)
ROOT subtract = $0[1,127,125]{2,1,0} subtract(param_0, bitcasted_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_FALSE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseTwoDiamondsWithDifferentReductionAxisSizeTogether) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,625]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,625]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,625]{1,0} subtract(param_0, broadcast)
bitcasted_subtract = $0[127,5,125] bitcast(subtract)
exponential = $0[127,5,125] exponential(bitcasted_subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127,5] reduce(exponential, constant_zero), dimensions={2}, to_apply=add_computation
second_broadcast = $0[127,5,125] broadcast(second_reduce), dimensions={0,1}
ROOT divide = $0[127,5,125] divide(exponential, second_broadcast)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Bitcast(m::Fusion(m::Parameter())
.WithPredicate(
HasBlockLevelFusionConfig)))
.WithPredicate(HasBlockLevelFusionConfig)));
break;
case F16:
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Divide(m::Exp(), m::Broadcast())));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseTwoDiamondsWithExtraUsageForFirstDiamondRoot) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
exponential = $0[127,125]{1,0} exponential(subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
ROOT tuple = ($0[127,125]{1,0}, $0[127,125]{1,0}) tuple(divide, subtract)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(
m::Fusion(m::Fusion()).WithPredicate(HasBlockLevelFusionConfig),
m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig))));
break;
case F16:
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::Divide(),
m::Fusion(m::Parameter())
.WithPredicate(HasBlockLevelFusionConfig))));
break;
default:
ABSL_UNREACHABLE();
}
}
TEST_P(SoftmaxRewriterTritonTest,
CanNotFuseTwoDiamondsWithExtraUsageForSecondDiamondProducer) {
PrimitiveType data_type = GetParam();
const std::string hlo_string_template = R"(
HloModule softmax
max_computation {
arg_0 = $0[] parameter(0)
arg_1 = $0[] parameter(1)
ROOT maximum = $0[] maximum(arg_0, arg_1)
}
add_computation {
arg_0.1 = $0[] parameter(0)
arg_1.1 = $0[] parameter(1)
ROOT add = $0[] add(arg_0.1, arg_1.1)
}
ENTRY main {
param_0 = $0[127,125]{1,0} parameter(0)
constant_neg_inf = $0[] constant(-inf)
reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0}
subtract = $0[127,125]{1,0} subtract(param_0, broadcast)
exponential = $0[127,125]{1,0} exponential(subtract)
constant_zero = $0[] constant(0)
second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation
second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0}
divide = $0[127,125]{1,0} divide(exponential, second_broadcast)
ROOT tuple = ($0[127,125]{1,0}, $0[127,125]{1,0}) tuple(divide, exponential)
}
)";
const std::string hlo_string =
absl::Substitute(hlo_string_template,
primitive_util::LowercasePrimitiveTypeName(data_type));
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
EXPECT_TRUE(
SoftmaxRewriterTritonMatchAndRewrite(device_info_, module.get()).value());
EXPECT_TRUE(verifier().Run(module.get()).status().ok());
switch (data_type) {
case F32:
case BF16:
EXPECT_THAT( | 2,093 |
#ifndef XLA_SERVICE_GPU_GPU_LATENCY_HIDING_SCHEDULER_H_
#define XLA_SERVICE_GPU_GPU_LATENCY_HIDING_SCHEDULER_H_
#include <cstdint>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
CanonicalAsyncOp GpuGetCanonicalAsyncOp(const HloInstruction& hlo);
int64_t GetSizeOfShape(const Shape& shape, int pointer_size);
enum class GpuResourceType {
kGpuAsyncStreamSend0 = 0,
kGpuAsyncStreamSend1 = 1,
kGpuAsyncStreamRecv0 = 2,
kGpuAsyncStreamRecv1 = 3,
kGpuAsyncStreamCollectives = 4,
kGpuAsyncStreamComputes = 5,
kNumTargetResources = 6,
};
class GpuAsyncTrackerBase : public AsyncTracker {
public:
explicit GpuAsyncTrackerBase(
const SchedulerConfig& config,
GetCanonicalAsyncOpFunc func = GpuGetCanonicalAsyncOp);
bool IsSupportedAsyncDone(const HloInstruction& hlo) const override;
bool IsSupportedAsyncStart(const HloInstruction& hlo) const override;
void PostProcessScheduleGraph(
HloScheduleGraph* schedule_graph,
const LatencyEstimator* latency_estimator) const override;
};
class GpuAsyncTracker : public GpuAsyncTrackerBase {
public:
explicit GpuAsyncTracker(const SchedulerConfig& config);
ResourcesVector GetResourcesFromInstruction(
const HloInstruction& instr) const override;
int64_t GetNumTargetDefinedResources() const override;
int64_t GetNumAvailableResources(int64_t resource_type) const override;
absl::string_view GetResourceName(int64_t resource_type) const override;
ResourceHazardType GetResourceHazardType(
int64_t resource_type) const override;
int64_t GetNumResourcesPerInstruction(
int64_t resource_type, const HloInstruction& instr) const override;
};
class GpuLatencyEstimator : public ApproximateLatencyEstimator {
public:
explicit GpuLatencyEstimator(
int64_t pointer_size,
GetCanonicalAsyncOpFunc func = GpuGetCanonicalAsyncOp);
TimeCost NodeCost(const HloInstruction* instr) const override;
TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& to) const override;
private:
int64_t pointer_size_;
};
}
}
#endif
#include "xla/service/gpu/gpu_latency_hiding_scheduler.h"
#include <cstdint>
#include <tuple>
#include <utility>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace gpu {
namespace {
static constexpr int64_t kCostlyAllReduceThreshold = 30 * 1024 * 1024;
static constexpr int64_t kCostlyAllReduceMultiplier = 4;
bool IsNopInstruction(const HloInstruction& hlo) {
HloOpcode op = hlo.opcode();
return op == HloOpcode::kGetTupleElement || op == HloOpcode::kBitcast ||
op == HloOpcode::kConstant || op == HloOpcode::kParameter ||
hlo.IsEffectiveBitcast();
}
bool IsAsyncComputeOp(const HloInstruction& hlo) {
return (hlo.opcode() == HloOpcode::kAsyncStart ||
hlo.opcode() == HloOpcode::kAsyncDone) &&
!hlo_query::IsCollectiveCommunicationOp(hlo.async_wrapped_opcode()) &&
hlo.async_execution_thread() != hlo.parent()->execution_thread();
}
int64_t GetPipelineStream(const HloInstruction& start) {
auto it = start.frontend_attributes().map().find(kSendRecvPipelineAttr);
if (it != start.frontend_attributes().map().end() && it->second == "1") {
return 1;
}
return 0;
}
std::pair<GpuResourceType, ResourceUsageType> GetP2PResourceAndUsage(
const HloInstruction& instr, const CanonicalAsyncOp& op) {
ResourceUsageType usage = op.outer == HloOpcode::kAsyncStart
? ResourceUsageType::kResourceRelease
: ResourceUsageType::kResourceOccupy;
int64_t pipeline = GetPipelineStream(instr);
HloOpcode opcode = op.inner;
GpuResourceType resource;
if (pipeline == 0) {
resource = opcode == HloOpcode::kSend
? GpuResourceType::kGpuAsyncStreamSend0
: GpuResourceType::kGpuAsyncStreamRecv0;
} else {
resource = opcode == HloOpcode::kSend
? GpuResourceType::kGpuAsyncStreamSend1
: GpuResourceType::kGpuAsyncStreamRecv1;
}
return {resource, usage};
}
}
int64_t GetSizeOfShape(const Shape& shape, int pointer_size) {
int64_t size = ShapeUtil::ByteSizeOf(shape, pointer_size);
if (shape.IsTuple() || shape.is_static()) {
return size;
}
int64_t metadata_size = sizeof(int32_t) * shape.dimensions_size();
return size + metadata_size;
}
CanonicalAsyncOp GpuGetCanonicalAsyncOp(const HloInstruction& hlo) {
switch (hlo.opcode()) {
case HloOpcode::kSend:
return {HloOpcode::kAsyncStart, HloOpcode::kSend};
case HloOpcode::kSendDone:
return {HloOpcode::kAsyncDone, HloOpcode::kSend};
case HloOpcode::kRecv:
return {HloOpcode::kAsyncStart, HloOpcode::kRecv};
case HloOpcode::kRecvDone:
return {HloOpcode::kAsyncDone, HloOpcode::kRecv};
default:
return DefaultGetCanonicalAsyncOp(hlo);
}
}
GpuAsyncTrackerBase::GpuAsyncTrackerBase(const SchedulerConfig& config,
GetCanonicalAsyncOpFunc func)
: AsyncTracker(config, func) {}
bool GpuAsyncTrackerBase::IsSupportedAsyncDone(
const HloInstruction& hlo) const {
return (hlo_query::IsAsyncCollectiveDoneOp(&hlo,
true) &&
!IsSyncCollective(hlo.operand(0))) ||
IsAsyncComputeOp(hlo);
}
bool GpuAsyncTrackerBase::IsSupportedAsyncStart(
const HloInstruction& hlo) const {
return (hlo_query::IsAsyncCollectiveStartOp(&hlo,
true) &&
!IsSyncCollective(&hlo)) ||
IsAsyncComputeOp(hlo);
}
void GpuAsyncTrackerBase::PostProcessScheduleGraph(
HloScheduleGraph* schedule_graph,
const LatencyEstimator* latency_estimator) const {
for (auto inst : schedule_graph->GetOriginalInstrList()) {
if (inst->opcode() == HloOpcode::kRecv) {
if (inst->frontend_attributes().map().count(kSendRecvPipelineAttr) > 0) {
HloGraphNode& node = schedule_graph->GetNode(inst);
node.SetForceEarly(true);
VLOG(5) << "Setting force early for instruction: " << inst->ToString();
}
}
if (inst->has_backend_config()) {
auto gpu_config = inst->backend_config<GpuBackendConfig>();
if (gpu_config.ok()) {
HloGraphNode& node = schedule_graph->GetNode(inst);
node.SetForceDelay(gpu_config->force_earliest_schedule());
VLOG(5) << "Setting force delay for instruction: " << inst->ToString();
}
}
}
}
GpuAsyncTracker::GpuAsyncTracker(const SchedulerConfig& config)
: GpuAsyncTrackerBase(config) {}
ResourcesVector GpuAsyncTracker::GetResourcesFromInstruction(
const HloInstruction& instr) const {
CanonicalAsyncOp op = GetCanonicalAsyncOp(instr);
if (op.outer == HloOpcode::kAsyncStart || op.outer == HloOpcode::kAsyncDone) {
ResourceUsageType usage;
GpuResourceType resource;
if (op.inner == HloOpcode::kSend || op.inner == HloOpcode::kRecv) {
std::tie(resource, usage) = GetP2PResourceAndUsage(instr, op);
} else {
usage = op.outer == HloOpcode::kAsyncStart
? ResourceUsageType::kResourceRelease
: ResourceUsageType::kResourceOccupy;
resource = hlo_query::IsCollectiveCommunicationOp(op.inner)
? GpuResourceType::kGpuAsyncStreamCollectives
: GpuResourceType::kGpuAsyncStreamComputes;
}
return {std::make_pair(
GetFirstTargetDefinedResource() + static_cast<int64_t>(resource),
usage)};
}
return GpuAsyncTrackerBase::GetResourcesFromInstruction(instr);
}
int64_t GpuAsyncTracker::GetNumTargetDefinedResources() const {
return static_cast<int64_t>(GpuResourceType::kNumTargetResources);
};
int64_t GpuAsyncTracker::GetNumAvailableResources(int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetNumAvailableResources(resource_type);
}
CHECK_LT(resource_type,
first_target_resource +
static_cast<int64_t>(GpuResourceType::kNumTargetResources));
if ((resource_type - first_target_resource) ==
static_cast<int64_t>(GpuResourceType::kGpuAsyncStreamComputes)) {
return 2;
}
return 1;
}
absl::string_view GpuAsyncTracker::GetResourceName(
int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetResourceName(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
switch (static_cast<GpuResourceType>(resource_type - first_target_resource)) {
case GpuResourceType::kGpuAsyncStreamSend0:
return "kGpuAsyncStreamSend0";
case GpuResourceType::kGpuAsyncStreamSend1:
return "kGpuAsyncStreamSend1";
case GpuResourceType::kGpuAsyncStreamRecv0:
return "kGpuAsyncStreamRecv0";
case GpuResourceType::kGpuAsyncStreamRecv1:
return "kGpuAsyncStreamRecv1";
case GpuResourceType::kGpuAsyncStreamCollectives:
return "kGpuAsyncStreamCollectives";
case GpuResourceType::kGpuAsyncStreamComputes:
return "kGpuAsyncStreamComputes";
default:
return "kUnsupportedResource";
}
}
ResourceHazardType GpuAsyncTracker::GetResourceHazardType(
int64_t resource_type) const {
const int64_t first_target_resource = GetFirstTargetDefinedResource();
if (resource_type < first_target_resource) {
return GpuAsyncTrackerBase::GetResourceHazardType(resource_type);
}
CHECK_LE(resource_type,
first_target_resource + GetNumTargetDefinedResources());
return ResourceHazardType::kUnshareable;
}
int64_t GpuAsyncTracker::GetNumResourcesPerInstruction(
int64_t resource_type, const HloInstruction& instr) const {
int64_t num_resources =
GpuAsyncTrackerBase::GetNumResourcesPerInstruction(resource_type, instr);
if (num_resources <= 0 || instr.opcode() != HloOpcode::kWhile) {
return num_resources;
}
int64_t first_p2p_resource =
GetFirstTargetDefinedResource() +
static_cast<int64_t>(GpuResourceType::kGpuAsyncStreamSend0);
if (resource_type < first_p2p_resource ||
resource_type > first_p2p_resource + 4) {
return num_resources;
}
auto find_instruction_for_pipeline = [&](HloOpcode opcode, int64_t pipeline) {
for (auto user1 : instr.users()) {
if (user1->opcode() == HloOpcode::kGetTupleElement) {
for (auto user2 : user1->users()) {
if (user2->opcode() == opcode) {
if (GetPipelineStream(*user2) == pipeline) {
return true;
}
}
}
}
}
return false;
};
bool found;
if (resource_type == first_p2p_resource) {
found = find_instruction_for_pipeline(HloOpcode::kSendDone, 0);
} else if (resource_type == first_p2p_resource + 1) {
found = find_instruction_for_pipeline(HloOpcode::kSendDone, 1);
} else if (resource_type == first_p2p_resource + 2) {
found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 0);
} else {
found = find_instruction_for_pipeline(HloOpcode::kRecvDone, 1);
}
return num_resources - (found ? 1 : 0);
}
GpuLatencyEstimator::GpuLatencyEstimator(int64_t pointer_size,
GetCanonicalAsyncOpFunc func)
: ApproximateLatencyEstimator(func), pointer_size_(pointer_size) {}
ApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (IsNopInstruction(*instr)) {
return 0.0;
}
if (instr->opcode() == HloOpcode::kCustomCall) {
if (IsCublasGemm(*instr) || IsCustomCallToDnnConvolution(*instr)) {
return ApproximateLatencyEstimator::kMediumCost;
}
return ApproximateLatencyEstimator::kMediumCost;
}
return ApproximateLatencyEstimator::NodeCost(instr);
}
ApproximateLatencyEstimator::TimeCost GpuLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& to) const {
if (IsAsyncPair(from, to)) {
if (from.GetInstr().opcode() == HloOpcode::kRecv) {
return ApproximateLatencyEstimator::kLowLatency;
} else if (from.GetInstr().opcode() == HloOpcode::kSend) {
return ApproximateLatencyEstimator::kHighLatency * 10;
}
bool enable_approx_collectives =
from.GetInstr()
.GetModule()
->config()
.debug_options()
.xla_gpu_enable_approx_costly_collectives();
bool is_all_reduce = from.GetInstr().opcode() == HloOpcode::kAllReduceStart;
bool collective_size_exceeds_threshold =
GetSizeOfShape(from.GetInstr().shape(), pointer_size_) >
kCostlyAllReduceThreshold;
if (enable_approx_collectives && is_all_reduce &&
collective_size_exceeds_threshold) {
return ApproximateLatencyEstimator::kHighLatency *
kCostlyAllReduceMultiplier;
}
return ApproximateLatencyEstimator::kHighLatency;
}
return ApproximateLatencyEstimator::kLowLatency;
}
}
} | namespace xla::gpu {
namespace {
}
} | 2,094 |
#ifndef XLA_SERVICE_GPU_GPU_CONVERT_ASYNC_COLLECTIVES_TO_SYNC_H_
#define XLA_SERVICE_GPU_GPU_CONVERT_ASYNC_COLLECTIVES_TO_SYNC_H_
#include <utility>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/convert_async_collectives_to_sync.h"
namespace xla {
namespace gpu {
class GpuConvertAsyncCollectivesToSync : public ConvertAsyncCollectivesToSync {
public:
using ConvertAsyncCollectivesToSync::ConvertAsyncCollectivesToSync;
absl::string_view name() const override {
return "gpu-convert-async-collectives-to-sync";
}
absl::Status ConvertAsyncInstructionsToSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs)
const override;
};
}
}
#endif
#include "xla/service/gpu/gpu_convert_async_collectives_to_sync.h"
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::Status GpuConvertAsyncCollectivesToSync::ConvertAsyncInstructionsToSync(
HloComputation* computation,
absl::Span<const std::pair<HloInstruction*, HloInstruction*>> async_pairs)
const {
absl::flat_hash_map<HloInstruction*, HloInstruction*> replaced_ops;
CollectiveBackendConfig sync_config;
sync_config.set_is_sync(true);
for (auto& [async_start, async_done] : async_pairs) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
async_start->backend_config<GpuBackendConfig>());
*gpu_config.mutable_collective_backend_config() = sync_config;
TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config));
replaced_ops[async_start] = nullptr;
replaced_ops[async_done] = async_start;
}
HloModule* module = computation->parent();
const HloInstructionSequence& sequence =
module->schedule().sequence(computation);
std::vector<HloInstruction*> new_sequence;
new_sequence.reserve(sequence.size());
for (HloInstruction* instr : sequence.instructions()) {
auto it = replaced_ops.find(instr);
if (it == replaced_ops.end()) {
new_sequence.push_back(instr);
continue;
}
if (it->second == nullptr) {
continue;
}
new_sequence.push_back(it->second);
new_sequence.push_back(instr);
}
module->schedule().set_sequence(computation, new_sequence);
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/gpu_convert_async_collectives_to_sync.h"
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::IsFalse;
using ::testing::IsTrue;
class GpuConvertAsyncCollectivesToSyncTest : public HloTestBase {
public:
absl::Status RunPass(HloModule *module, bool expect_change,
HloPredicate is_nop = {}) {
TF_ASSIGN_OR_RETURN(bool changed,
GpuConvertAsyncCollectivesToSync{is_nop}.Run(module));
EXPECT_EQ(changed, expect_change);
return absl::OkStatus();
}
bool IsSync(HloModule *module, std::string_view name) {
const HloInstruction *inst = FindInstruction(module, name);
if (inst == nullptr) {
return false;
}
auto backend_config = inst->backend_config<GpuBackendConfig>()
.value()
.collective_backend_config();
return backend_config.is_sync();
}
HloPredicate is_nop_simple_ =
HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kGetTupleElement,
HloOpcode::kParameter>;
};
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduce) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3, replica_groups={{0,1}, {2,3}}
id2 = f32[] bitcast(id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true, is_nop_simple_));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectiveBroadcast) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
collective_broadcast {
p0 = u32[8] parameter(0)
ROOT result = u32[8] collective-broadcast(p0), replica_groups={{0,1}, {2,3}}
}
ENTRY main {
data = u32[8] parameter(0)
cb-start = ((u32[8]{0}), u32[8]{0}) async-start(u32[8]{0} %data), calls=collective_broadcast
ROOT %ars = u32[8]{0} async-done(((u32[8]{0}), u32[8]{0}) %cb-start), calls=collective_broadcast
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "cb-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllReduceWithNonNop) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
id2 = u32[] add(id, id)
ROOT done = u32[] all-reduce-done(start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), false));
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllGather) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
a1 = u32[1, 2] parameter(0)
ags = (u32[1, 2], u32[2, 2]) all-gather-start(a1), dimensions={0}, channel_id=3
ROOT allgather = u32[2,2] all-gather-done(ags)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "ags"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleCollectivePermute) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
ENTRY test_computation {
p = u32[2] parameter(0)
start = (u32[2], u32[2], u32[], u32[]) collective-permute-start(p), source_target_pairs={{0,1}, {1,0}}
ROOT done = u32[2] collective-permute-done(start)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleReduceScatter) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
reduce_scatter {
p0 = u32[8] parameter(0)
ROOT result = u32[4] reduce-scatter(p0), replica_groups={{0,3}, {1,2}},
dimensions={0}, to_apply=add
}
ENTRY main {
data = u32[8] parameter(0)
rs-start = ((u32[8]{0}), u32[4]{0}) async-start(u32[8]{0} %data), calls=reduce_scatter
ROOT %ars = u32[4]{0} async-done(((u32[8]{0}), u32[4]{0}) %rs-start), calls=reduce_scatter
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "rs-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, SimpleAllToAll) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
all_to_all {
p0 = u32[2] parameter(0)
ROOT result = u32[2] all-to-all(p0), dimensions={0}, replica_groups={{0,1},{2,3}}
}
ENTRY test_computation {
a1 = u32[2] parameter(0)
a2a-start = ((u32[2]), u32[2]) async-start(u32[2] a1), calls=all_to_all
ROOT a2s = u32[2] async-done(a2a-start), calls=all_to_all
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "a2a-start"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, ControlDeps) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
done1 = u32[] all-reduce-done(start1)
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4, control-predecessors={done1}
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightStreaming) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done1 = u32[] all-reduce-done(start1)
done2 = u32[] all-reduce-done(start2)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNested) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsTrue());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
TEST_F(GpuConvertAsyncCollectivesToSyncTest, MultipleInFlightNestedPartial) {
const absl::string_view hlo_string = R"(
HloModule test, is_scheduled=true
apply_op {
x = u32[] parameter(0)
y = u32[] parameter(1)
ROOT apply_op = u32[] add(x, y)
}
ENTRY test_computation {
id = u32[] replica-id()
start1 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=3
start2 = u32[] all-reduce-start(id), to_apply=apply_op, channel_id=4
done2 = u32[] all-reduce-done(start2)
id2 = u32[] add(done2, done2)
done1 = u32[] all-reduce-done(start1)
ROOT x = u32[] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK(RunPass(module.get(), true));
EXPECT_THAT(IsSync(module.get(), "start1"), IsFalse());
EXPECT_THAT(IsSync(module.get(), "start2"), IsTrue());
}
}
}
} | 2,095 |
#ifndef XLA_SERVICE_GPU_TOPK_SPLITTER_H_
#define XLA_SERVICE_GPU_TOPK_SPLITTER_H_
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class TopKSplitter : public HloModulePass {
public:
explicit TopKSplitter(size_t split_threshold = 1024 * 1024)
: split_threshold_(split_threshold) {}
absl::string_view name() const override { return "topk-splitter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const size_t split_threshold_;
};
}
}
#endif
#include "xla/service/gpu/topk_splitter.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <string>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr size_t kRequiredAlignment = 1024;
constexpr size_t kMaximumBatchSize = 1024;
class TopkSplitterVisitor : public DfsHloRewriteVisitor {
public:
explicit TopkSplitterVisitor(size_t split_threshold)
: split_threshold_(split_threshold) {}
absl::Status HandleCustomCall(HloInstruction* inst) override {
HloCustomCallInstruction* topk = DynCast<HloCustomCallInstruction>(inst);
if (topk == nullptr || topk->custom_call_target() != "TopK") {
return absl::OkStatus();
}
HloComputation* comp = inst->parent();
Shape data_shape = topk->operand(0)->shape();
bool has_batch = data_shape.dimensions_size() == 2;
if (has_batch && data_shape.dimensions(0) != 1) {
return absl::OkStatus();
}
size_t n = data_shape.dimensions(has_batch ? 1 : 0);
int64_t k = topk->shape().tuple_shapes(0).dimensions(has_batch ? 1 : 0);
if (k > sqrt(n)) {
return absl::OkStatus();
}
if (n % kRequiredAlignment != 0) {
return absl::OkStatus();
}
if (n < split_threshold_) return absl::OkStatus();
int new_batch =
std::min(absl::bit_floor(n / split_threshold_), kMaximumBatchSize);
int new_n = n / new_batch;
Shape split_input_shape =
ShapeUtil::MakeShape(data_shape.element_type(), {new_batch, new_n});
TF_ASSIGN_OR_RETURN(
HloInstruction * reshaped,
MakeReshapeHlo(split_input_shape, topk->mutable_operand(0)));
Shape batch_topk_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(data_shape.element_type(), {new_batch, k}),
ShapeUtil::MakeShape(S32, {new_batch, k})});
HloInstruction* batch_topk =
comp->AddInstruction(HloInstruction::CreateCustomCall(
batch_topk_shape, {reshaped}, topk->to_apply(), "TopK",
""));
TF_ASSIGN_OR_RETURN(HloInstruction * indices,
MakeGetTupleElementHlo(batch_topk, 1));
TF_ASSIGN_OR_RETURN(HloInstruction * values,
MakeGetTupleElementHlo(batch_topk, 0));
Shape iota_shape = ShapeUtil::MakeShape(S32, {new_batch});
TF_ASSIGN_OR_RETURN(
HloInstruction * fix,
MakeBinaryHlo(
HloOpcode::kMultiply, MakeIotaHlo(comp, iota_shape, 0),
MakeBroadcastHlo(MakeR0ConstantHlo<int32_t>(comp, new_n),
{}, iota_shape)));
TF_ASSIGN_OR_RETURN(
indices, MakeBinaryHlo(HloOpcode::kAdd, indices,
MakeBroadcastHlo(fix, {0}, indices->shape())));
Shape linear_index_shape = ShapeUtil::MakeShape(S32, {k * new_batch});
Shape linear_shape = ShapeUtil::ChangeElementType(
linear_index_shape, data_shape.element_type());
Shape linear_sort_shape =
ShapeUtil::MakeTupleShape({linear_shape, linear_index_shape});
HloInstruction* aggregated_sort =
comp->AddInstruction(HloInstruction::CreateSort(
linear_sort_shape, 0,
{*MakeReshapeHlo(linear_shape, values),
*MakeReshapeHlo(linear_index_shape, indices)},
topk->to_apply(), true));
auto slice_tuple = [&](HloInstruction* sort, const size_t index) {
return *MakeReshapeHlo(
topk->shape().tuple_shapes(index),
*MakeSliceHlo(*MakeGetTupleElementHlo(sort, index), {0}, {k}, {1}));
};
return ReplaceInstruction(topk,
comp->AddInstruction(HloInstruction::CreateTuple({
slice_tuple(aggregated_sort, 0),
slice_tuple(aggregated_sort, 1),
})));
}
private:
size_t split_threshold_;
};
}
absl::StatusOr<bool> TopKSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return TopkSplitterVisitor(split_threshold_)
.RunOnModule(module, execution_threads);
}
}
} | #include "xla/service/gpu/topk_splitter.h"
#include <stdint.h>
#include <cstddef>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/topk_rewriter.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace m = ::xla::match;
namespace xla {
namespace gpu {
namespace {
using ::tsl::testing::IsOkAndHolds;
using TopkSplitterTest = HloTestBase;
constexpr absl::string_view kComparator = R"(
%compare {
%p.1.lhs.40628 = s32[] parameter(2)
%p.1.rhs.40629 = s32[] parameter(3)
%constant.40630 = pred[] constant(true)
%broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={}
%p.0.lhs.40626 = f32[] parameter(0)
%p.0.rhs.40627 = f32[] parameter(1)
%compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER
ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631)
})";
TEST_F(TopkSplitterTest, SplitsTopK) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,1073741824] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(), module.get()), IsOkAndHolds(true));
auto first_topk = m::CustomCall(m::Reshape(m::Parameter(0)));
auto slice_result = [&](auto input, size_t i) {
return m::Reshape(m::Slice(m::GetTupleElement(input, i)));
};
auto index_correction =
m::Broadcast(m::Multiply(m::Iota(), m::Broadcast(m::Constant())));
auto sorted = m::Sort(
m::Reshape(m::GetTupleElement(first_topk, 0)),
m::Reshape(m::Add(m::GetTupleElement(first_topk, 1), index_correction)));
EXPECT_TRUE(
Match(module->entry_computation()->root_instruction(),
m::Tuple(slice_result(sorted, 0), slice_result(sorted, 1))));
}
TEST_F(TopkSplitterTest, SplitsTopKNoBatchDimension) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1073741824] parameter(0)
ROOT %cc.2 = (f32[5], s32[5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(), module.get()), IsOkAndHolds(true));
auto first_topk = m::CustomCall(m::Reshape(m::Parameter(0)));
auto slice_result = [&](auto input, size_t i) {
return m::Reshape(m::Slice(m::GetTupleElement(input, i)));
};
auto index_correction =
m::Broadcast(m::Multiply(m::Iota(), m::Broadcast(m::Constant())));
auto sorted = m::Sort(
m::Reshape(m::GetTupleElement(first_topk, 0)),
m::Reshape(m::Add(m::GetTupleElement(first_topk, 1), index_correction)));
EXPECT_TRUE(
Match(module->entry_computation()->root_instruction(),
m::Tuple(slice_result(sorted, 0), slice_result(sorted, 1))));
}
TEST_F(TopkSplitterTest, SplitFailsUnderThreshold) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524288] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(
RunHloPass(TopKSplitter(1048576), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, SplitFailsUnaligned) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524289] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(1024), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, SplitFailsLargeK) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,524288] parameter(0)
ROOT %cc.2 = (f32[1,1024], s32[1,1024]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(RunHloPass(TopKSplitter(1024), module.get()),
IsOkAndHolds(false));
}
TEST_F(TopkSplitterTest, Equivalent) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%arg.1 = f32[1,16384] parameter(0)
ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(TopkDecomposer().Run(module.get()), IsOkAndHolds(true));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopKSplitter(1024).Run(module), IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
EXPECT_TRUE(HloDCE().Run(module).status().ok());
};
EXPECT_TRUE(RunAndCompare(std::move(module), std::nullopt, round_trip));
}
TEST_F(TopkSplitterTest, StableSorts) {
const std::string hlo_string = absl::Substitute(R"(
HloModule module
$0
ENTRY cluster {
%constant.1 = f32[] constant(42)
%broadcast.2= f32[1,16384] broadcast(f32[] %constant.1), dimensions={}
ROOT %cc.3 = (f32[1,5], s32[1,5]) custom-call(%broadcast.2), custom_call_target= "TopK", to_apply=%compare
})",
kComparator);
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
EXPECT_THAT(TopkDecomposer().Run(module.get()), IsOkAndHolds(true));
auto round_trip = [](HloModule* module) {
EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) {
return true;
}).Run(module),
IsOkAndHolds(true));
EXPECT_THAT(TopKSplitter(1024).Run(module), IsOkAndHolds(true));
EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true));
EXPECT_TRUE(HloDCE().Run(module).status().ok());
};
EXPECT_TRUE(RunAndCompare(std::move(module), std::nullopt, round_trip));
}
}
}
} | 2,096 |
#ifndef XLA_SERVICE_GPU_CUDNN_FUSED_MHA_REWRITER_H_
#define XLA_SERVICE_GPU_CUDNN_FUSED_MHA_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/dnn.h"
namespace xla {
namespace gpu {
class CudnnFusedMHARewriter : public HloModulePass {
public:
explicit CudnnFusedMHARewriter(se::CudaComputeCapability cc,
se::StreamExecutor* stream_executor)
: compute_capability_(cc), stream_executor_(stream_executor) {}
explicit CudnnFusedMHARewriter(se::CudaComputeCapability cc,
se::dnn::VersionInfo cudnn_version)
: compute_capability_(cc), cudnn_version_(cudnn_version) {}
absl::string_view name() const override {
return "cudnn-fused-multi-headed-attention-rewriter";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::CudaComputeCapability compute_capability_;
se::StreamExecutor* stream_executor_ = nullptr;
const se::dnn::VersionInfo cudnn_version_;
};
}
}
#endif
#include "xla/service/gpu/cudnn_fused_mha_rewriter.h"
#include <algorithm>
#include <cstdint>
#include <numeric>
#include <optional>
#include <queue>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
namespace xla {
namespace gpu {
namespace {
namespace m = match;
struct MatchFwdResult {
HloInstruction* matched_bmm_1 = nullptr;
HloInstruction* matched_bmm_2 = nullptr;
HloInstruction* matched_bias = nullptr;
HloInstruction* matched_scale = nullptr;
HloInstruction* matched_softmax_input = nullptr;
HloInstruction* matched_reduce_sum = nullptr;
double matched_dropout_rate = 0.0;
bool need_canonicalization = false;
bool is_training = false;
bool is_causal_mask = false;
bool has_match = false;
std::string matched_custom_call_name;
};
struct MatchBwdResult {
HloInstruction* matched_bmm_1_grad_1 = nullptr;
HloInstruction* matched_bmm_1_grad_2 = nullptr;
HloInstruction* matched_bmm_2_grad_1 = nullptr;
HloInstruction* matched_bmm_2_grad_2 = nullptr;
HloInstruction* matched_dbias = nullptr;
bool bmm_1_grad_1_need_canonicalization = false;
bool bmm_1_grad_2_need_canonicalization = false;
bool bmm_2_grad_1_need_canonicalization = false;
bool bmm_2_grad_2_need_canonicalization = false;
bool has_match = false;
std::string matched_custom_call_name;
};
template <typename Pattern>
auto OptionalReshape(Pattern pattern) {
auto shared = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(m::Reshape(shared), shared);
}
template <typename Pattern>
auto OptionalConvert(Pattern pattern) {
auto shared = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(m::Convert(shared), shared);
}
template <typename Pattern>
auto OptionalBitcast(Pattern pattern) {
auto shared = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(m::Bitcast(shared), shared);
}
template <typename Pattern>
auto OptionalBroadcast(Pattern pattern) {
auto shared = m::SharedSubpattern(pattern);
return m::AnyOf<HloInstruction>(m::Broadcast(shared), shared);
}
bool IsBatchedMatmul(const HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kDot) return false;
if (Cast<HloDotInstruction>(instr)->sparse_operands()) return false;
const DotDimensionNumbers& dot_dims = instr->dot_dimension_numbers();
bool is_batch_dot = !dot_dims.lhs_batch_dimensions().empty() ||
!dot_dims.rhs_batch_dimensions().empty();
return is_batch_dot;
}
bool IsSharingOperandWithFwdMha(HloInstruction* gemm) {
for (int64_t i = 0; i < gemm->operands().size(); i++) {
std::queue<HloInstruction*> visit_list;
visit_list.push(gemm->mutable_operand(i));
while (!visit_list.empty()) {
HloInstruction* current_instr = visit_list.front();
for (auto user : current_instr->users()) {
switch (user->opcode()) {
case HloOpcode::kBitcast:
case HloOpcode::kReshape:
case HloOpcode::kTranspose: {
visit_list.push(user);
break;
}
case HloOpcode::kCustomCall: {
if (IsFwdCustomCallTofMHA(*user)) {
return true;
}
} break;
default:
break;
}
}
visit_list.pop();
}
}
return false;
}
bool IsFirstFwdMatmul(HloInstruction* gemm) {
return IsBatchedMatmul(gemm) && !IsFwdCustomCallTofMHA(*gemm->operand(0)) &&
!IsFwdCustomCallTofMHA(*gemm->operand(1)) &&
!IsSharingOperandWithFwdMha(gemm);
}
bool IsScalar(const HloInstruction* instr) {
return ShapeUtil::IsEffectiveScalar(instr->shape());
}
bool IsReduceMax(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kReduce &&
instr->to_apply()->root_instruction()->opcode() == HloOpcode::kMaximum;
}
bool IsReduceSum(const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kReduce &&
instr->to_apply()->root_instruction()->opcode() == HloOpcode::kAdd;
}
auto GetUnfusedReduceMaxSumSoftmaxPattern(
HloInstruction** softmax_input = nullptr,
HloInstruction** softmax_reduce_sum = nullptr,
HloInstruction** softmax_reduce_sum_bcast = nullptr) {
auto unfused_softmax_max_subpattern = m::SharedSubpattern(
m::Subtract(
m::Op(),
m::Broadcast(OptionalConvert(
m::Op()
.WithPredicate(IsReduceMax)
.WithOneUse()
.WithOperand(0, OptionalBitcast(OptionalConvert(
m::Op(softmax_input).WithNumUser(2)))))))
.WithOneUse());
auto unfused_softmax_sum_subpattern = m::SharedSubpattern(m::Divide(
OptionalBitcast(m::Exp(unfused_softmax_max_subpattern)),
m::Broadcast(
softmax_reduce_sum_bcast,
OptionalConvert(
m::Op(softmax_reduce_sum)
.WithOperand(0, OptionalBitcast(OptionalConvert(
m::Exp(unfused_softmax_max_subpattern))))
.WithPredicate(IsReduceSum)
.WithAtMostNumUser(2)))
.WithAtMostNumUser(2)));
return unfused_softmax_sum_subpattern;
}
std::optional<double> GetConstantValue(const HloInstruction* inst) {
if (!IsScalar(inst)) {
return std::nullopt;
}
switch (inst->shape().element_type()) {
case F16:
return static_cast<float>(inst->literal().GetFirstElement<half>());
case BF16:
return static_cast<float>(inst->literal().GetFirstElement<bfloat16>());
case F32:
return inst->literal().GetFirstElement<float>();
case F64:
return inst->literal().GetFirstElement<double>();
default:
return std::nullopt;
}
}
double GetDropoutRateFromHlo(HloInstruction* dropout) {
std::optional<double> dropout_rate_inv;
dropout_rate_inv = GetConstantValue(dropout);
if (!dropout_rate_inv.has_value()) {
return 0.0;
}
return (1.0 - (1.0 / *dropout_rate_inv));
}
bool IsComputeCapabilityAndCudnnSupported(
stream_executor::CudaComputeCapability cc,
stream_executor::dnn::VersionInfo cudnn_version,
stream_executor::dnn::VersionInfo supported_cudnn_version) {
if (cc.IsAtLeastAmpere() && cc.minor == 0 &&
cudnn_version >= supported_cudnn_version) {
return true;
}
VLOG(2) << absl::StrFormat(
"CudnnFusedMHARewriter did not run. Unsupported compute "
"capability(%s; major should be >= 8, minor should be 0) or cudnn version"
"(%s; should be >= %s)",
cc.ToString(), cudnn_version.ToString(),
supported_cudnn_version.ToString());
return false;
}
bool IsSupportedPrimitiveType(const HloInstruction* bmm) {
PrimitiveType dtype = bmm->shape().element_type();
return dtype == BF16 || dtype == F16;
}
std::vector<int64_t> GetDimensionVector(absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> dim_nums) {
std::vector<int64_t> vec(dim_nums.size());
for (int i = 0; i < dim_nums.size(); i++) {
vec[i] = dimensions.at(dim_nums.at(i));
}
return vec;
}
struct QKVLayout {
int64_t batch;
int64_t num_heads;
int64_t seqlen_q;
int64_t seqlen_kv;
int64_t hidden_dim;
};
absl::StatusOr<std::optional<QKVLayout>> GetQKVLayout(
HloInstruction* bmm_1, HloInstruction* bmm_2, bool need_canonicalization) {
const DotDimensionNumbers& bmm1_dnums = bmm_1->dot_dimension_numbers();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> bmm1_s_q_dims,
GetNonContractingDims(bmm_1->operand(0)->shape(),
bmm1_dnums.lhs_batch_dimensions(),
bmm1_dnums.lhs_contracting_dimensions()));
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> bmm1_s_kv_dims,
GetNonContractingDims(bmm_1->operand(1)->shape(),
bmm1_dnums.rhs_batch_dimensions(),
bmm1_dnums.rhs_contracting_dimensions()));
std::vector<int64_t> bmm1_bh =
GetDimensionVector(bmm_1->operand(0)->shape().dimensions(),
bmm1_dnums.lhs_batch_dimensions());
std::vector<int64_t> bmm1_s_q = GetDimensionVector(
bmm_1->operand(0)->shape().dimensions(), bmm1_s_q_dims);
std::vector<int64_t> bmm1_s_kv = GetDimensionVector(
bmm_1->operand(1)->shape().dimensions(), bmm1_s_kv_dims);
std::vector<int64_t> bmm1_d =
GetDimensionVector(bmm_1->operand(0)->shape().dimensions(),
bmm1_dnums.lhs_contracting_dimensions());
TF_RET_CHECK(bmm1_bh.size() == 2);
TF_RET_CHECK(bmm1_s_q.size() == 1);
TF_RET_CHECK(bmm1_s_kv.size() == 1);
TF_RET_CHECK(bmm1_d.size() == 1);
const DotDimensionNumbers& bmm2_dnums = bmm_2->dot_dimension_numbers();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> bmm2_lhs_non_contracting_dims,
GetNonContractingDims(bmm_2->operand(0)->shape(),
bmm2_dnums.lhs_batch_dimensions(),
bmm2_dnums.lhs_contracting_dimensions()));
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> bmm2_rhs_non_contracting_dims,
GetNonContractingDims(bmm_2->operand(1)->shape(),
bmm2_dnums.rhs_batch_dimensions(),
bmm2_dnums.rhs_contracting_dimensions()));
std::vector<int64_t> bmm2_bh =
GetDimensionVector(bmm_2->operand(0)->shape().dimensions(),
bmm2_dnums.lhs_batch_dimensions());
std::vector<int64_t> bmm2_s_kv =
GetDimensionVector(bmm_2->operand(0)->shape().dimensions(),
bmm2_dnums.lhs_contracting_dimensions());
std::vector<int64_t> bmm2_s_q =
need_canonicalization
? GetDimensionVector(bmm_2->operand(1)->shape().dimensions(),
bmm2_rhs_non_contracting_dims)
: GetDimensionVector(bmm_2->operand(0)->shape().dimensions(),
bmm2_lhs_non_contracting_dims);
std::vector<int64_t> bmm2_d =
need_canonicalization
? GetDimensionVector(bmm_2->operand(0)->shape().dimensions(),
bmm2_lhs_non_contracting_dims)
: GetDimensionVector(bmm_2->operand(1)->shape().dimensions(),
bmm2_rhs_non_contracting_dims);
TF_RET_CHECK(bmm2_bh.size() == 2);
TF_RET_CHECK(bmm2_s_q.size() == 1);
TF_RET_CHECK(bmm2_s_kv.size() == 1);
TF_RET_CHECK(bmm2_d.size() == 1);
if (bmm1_bh[0] != bmm2_bh[0] || bmm1_bh[1] != bmm2_bh[1] ||
bmm1_s_q[0] != bmm2_s_q[0] || bmm1_s_kv[0] != bmm2_s_kv[0] ||
bmm1_d[0] != bmm2_d[0]) {
return std::nullopt;
}
QKVLayout qkv_layout;
qkv_layout.batch = bmm1_bh[0];
qkv_layout.num_heads = bmm1_bh[1];
qkv_layout.seqlen_q = bmm1_s_q[0];
qkv_layout.seqlen_kv = bmm1_s_kv[0];
qkv_layout.hidden_dim = bmm1_d[0];
return qkv_layout;
}
absl::StatusOr<bool> IsFlashAttention(
QKVLayout qkv_layout, bool is_training,
stream_executor::CudaComputeCapability cc,
stream_executor::dnn::VersionInfo cudnn_version) {
int64_t s_q = qkv_layout.seqlen_q;
int64_t s_kv = qkv_layout.seqlen_kv;
int64_t hidden_dim = qkv_layout.hidden_dim;
bool is_seqlen_supported = (!is_training || (s_q % 2 == 0 && s_kv % 2 == 0));
bool is_hidden_dim_supported = hidden_dim <= 128 && hidden_dim % 8 == 0;
bool is_flash_attention = is_seqlen_supported && is_hidden_dim_supported;
if (!is_flash_attention) return false;
if ((is_training && (s_q < 64 || s_kv < 64)) &&
!IsComputeCapabilityAndCudnnSupported(
cc, cudnn_version, stream_executor::dnn::VersionInfo(9, 0, 0))) {
VLOG(2) << "Flash attention training with seq < 64 not supported cuDNN < "
"9.0.0.";
return false;
}
if ((hidden_dim != 64 && hidden_dim != 128) &&
!IsComputeCapabilityAndCudnnSupported(
cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 6))) {
VLOG(2) << "Flash attention head dim != 64 or 128 not supported with cuDNN "
"< 8.9.6.";
return false;
}
if ((is_training && s_kv % 64 != 0) &&
!IsComputeCapabilityAndCudnnSupported(
cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 5))) {
VLOG(2) << "Flash attention training with seq kv % 64 != 0 not supported "
"with cuDNN < 8.9.5.";
return false;
}
if (!IsComputeCapabilityAndCudnnSupported(
cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 4))) {
VLOG(2) << "Require cuDNN 8.9.4 to run flash attention.";
return false;
}
return is_flash_attention;
}
bool IsCausalMaskPattern(HloInstruction* mask) {
auto causal_mask =
m::Select(m::Compare(m::Iota(), m::Iota()), m::Broadcast(m::Constant()),
m::Broadcast(m::Constant()));
auto causal_mask_pattern_fwd_remat =
m::Broadcast(OptionalBitcast(causal_mask));
auto causal_mask_pattern_bwd = m::Broadcast(m::Convert(OptionalBitcast(
m::Minimum(m::Op(), m::Broadcast(OptionalBitcast(causal_mask))))));
HloInstruction* param = nullptr;
HloInstruction* gte = nullptr;
auto causal_mask_pattern_fwd = m::Broadcast(
OptionalBitcast(m::GetTupleElement(>e, m::Parameter(¶m))));
auto causal_mask_pattern = m::AnyOf<HloInstruction>(
causal_mask_pattern_fwd_remat, causal_mask_pattern_fwd,
causal_mask_pattern_bwd);
if (Match(mask, causal_mask_pattern)) {
if (param != nullptr && param->parent()->IsWhileBodyComputation()) {
auto while_instr = param->parent()->WhileCallInstruction();
auto mask_index = gte->tuple_index();
auto actual_mask =
while_instr->mutable_operand(0)->mutable_operand(mask_index);
auto causal_mask_pattern_fwd =
OptionalBitcast(m::Convert(m::MinimumAnyOrder(
m::Op(),
OptionalBitcast(m::MinimumAnyOrder(
m::Op(), m::Broadcast(OptionalBitcast(causal_mask)))))));
return Match(actual_mask, causal_mask_pattern_fwd);
}
return true;
}
return false;
}
MatchFwdResult MatchSoftmaxDropoutBmm(MatchFwdResult previous_result,
int64_t bmm2_operand_position,
HloInstruction* instr) {
MatchFwdResult match_result = previous_result;
HloInstruction* softmax_reduce_sum;
HloInstruction* softmax_reduce_sum_bcast;
HloInstruction* bmm_2;
HloInstruction* softmax_input;
HloInstruction* dropout = nullptr;
auto dropout_softmax_pattern_form_1 = m::Select(
m::Op(),
OptionalConvert(m::MultiplyAnyOrder(
OptionalBitcast(OptionalReshape(
OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern(
&softmax_input, &softmax_reduce_sum,
&softmax_reduce_sum_bcast)))),
m::Broadcast(
OptionalConvert(m::Constant(&dropout).WithPredicate(IsScalar))))),
m::Op());
auto dropout_softmax_pattern_form_2 =
OptionalBitcast(OptionalBitcast(OptionalConvert(m::MultiplyAnyOrder(
OptionalReshape(OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern(
&softmax_input, &softmax_reduce_sum, &softmax_reduce_sum_bcast))),
m::Broadcast(
OptionalConvert(OptionalBitcast(OptionalReshape(m::Select(
m::Op(),
m::Broadcast(m::Constant(&dropout).WithPredicate(IsScalar)),
m::Op())))))))));
auto dropout_softmax_pattern_form_3 = m::MultiplyAnyOrder(
m::MultiplyAnyOrder(
OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern(
&softmax_input, &softmax_reduce_sum, &softmax_reduce_sum_bcast)),
m::Op()),
m::Broadcast(m::Constant(&dropout).WithPredicate(IsScalar)));
auto softmax_dropout_bmm2_pattern =
m::Op(&bmm_2)
.WithPredicate(IsBatchedMatmul)
.WithOperand(bmm2_operand_position,
m::AnyOf<HloInstruction>(
OptionalBitcast(OptionalConvert(
GetUnfusedReduceMaxSumSoftmaxPattern(
&softmax_input, &softmax_reduce_sum,
&softmax_reduce_sum_bcast))),
dropout_softmax_pattern_form_1,
dropout_softmax_pattern_form_2,
dropout_softmax_pattern_form_3));
if (!Match(instr, softmax_dropout_bmm2_pattern) ||
!IsSupportedPrimitiveType(bmm_2)) {
match_result.has_match = false;
return match_result;
}
if (softmax_reduce_sum->users()[0]->opcode() == HloOpcode::kConvert) {
softmax_reduce_sum = softmax_reduce_sum->users()[0];
}
match_result.is_training = softmax_reduce_sum->user_count() == 2 &&
softmax_reduce_sum_bcast->user_count() == 2;
match_result.matched_bmm_2 = bmm_2;
if (dropout) {
match_result.matched_dropout_rate = GetDropoutRateFromHlo(dropout);
}
match_result.matched_softmax_input = softmax_input;
match_result.matched_reduce_sum = softmax_reduce_sum;
match_result.has_match = true;
return match_result;
}
MatchFwdResult MatchBmm1UnfusedBiasSoftmaxBmm2(MatchFwdResult previous_result,
HloInstruction* softmax_input,
bool has_dropout) {
MatchFwdResult match_result = previous_result;
HloInstruction* bmm_1;
HloInstruction* bias = nullptr;
HloInstruction* scale = nullptr;
auto first_bmm_pattern =
m::SharedSubpattern(m::Op(&bmm_1).WithPredicate(IsBatchedMatmul));
auto unfused_scaled_bmm_subpattern = m::MultiplyAnyOrder(
OptionalConvert(first_bmm_pattern.WithOneUse()),
OptionalConvert(
m::Broadcast(m::Constant(&scale).WithPredicate(IsScalar))));
if (Match(softmax_input,
OptionalConvert(OptionalBitcast(m::AnyOf<HloInstruction>(
first_bmm_pattern, unfused_scaled_bmm_subpattern))))) {
match_result.matched_bmm_1 = bmm_1;
match_result.matched_scale = scale;
match_result.matched_custom_call_name =
has_dropout ? kCudnnfMHASoftmaxDropoutCallTarget
: kCudnnfMHASoftmaxCallTarget;
match_result.has_match = true;
} else if (Match(softmax_input,
OptionalBitcast(m::AddAnyOrder(
OptionalConvert(OptionalBitcast(m::AnyOf<HloInstruction>(
unfused_scaled_bmm_subpattern.WithOneUse(),
first_bmm_pattern.WithOneUse()))),
m::Op(&bias))))) {
match_result.matched_bmm_1 = bmm_1;
match_result.matched_scale = scale;
match_result.matched_custom_call_name =
has_dropout ? kCudnnfMHAScaleBiasSoftmaxDropoutCallTarget
: kCudnnfMHAScaleBiasSoftmaxCallTarget;
match_result.is_causal_mask |= IsCausalMaskPattern(bias);
if (!match_result.is_causal_mask &&
bias->opcode() == HloOpcode::kBroadcast) {
auto dims = Cast<HloBroadcastInstruction>(bias)->dimensions();
if (dims == std::vector<int64_t>{2, 3} ||
dims == std::vector<int64_t>{0, 2, 3} ||
dims == std::vector<int64_t>{1, 2, 3}) {
HloInstruction* bias_bc = bias->mutable_operand(0);
std::vector<int64_t> bitcast_dims(bias->shape().rank(), 1);
for (int dim : dims) {
bitcast_dims[dim] = bias->shape().dimensions()[dim];
}
bias = bias_bc->AddInstruction(HloInstruction::CreateBitcast(
ShapeUtil::MakeShape(bias->shape().element_type(), bitcast_dims),
bias_bc));
}
}
match_result.matched_bias = bias;
match_result.has_match = true;
} else {
match_result.has_match = false;
}
return match_result;
}
MatchFwdResult MatchFwdMHAPatternsForCanonicalization(HloInstruction* instr) {
MatchFwdResult match_result;
for (auto bmm2_operand_pos : {0, 1}) {
if (bmm2_operand_pos == 1) {
match_result.need_canonicalization = true;
}
bool has_dropout = false;
match_result =
MatchSoftmaxDropoutBmm(match_result, bmm2_operand_pos, instr);
if (!match_result.has_match) {
continue;
}
has_dropout = match_result.matched_dropout_rate > 0.0;
match_result = MatchBmm1UnfusedBiasSoftmaxBmm2(
match_result, match_result.matched_softmax_input, has_dropout);
if (match_result.has_match) {
return match_result;
}
}
match_result.need_canonicalization = false;
return match_result;
}
bool IsBmm2GradGemm2(HloInstruction* instr) {
return (instr->user_count() == 1) || (instr->user_count() == 2);
}
MatchBwdResult MatchBmm1GradGemm1(MatchBwdResult previous_result,
HloInstruction* bmm_1) {
MatchBwdResult match_result = previous_result;
match_result.has_match = false;
const HloInstruction* q_tensor = bmm_1->operand(0);
for (int64_t i = 0; i < q_tensor->user_count(); i++) {
HloInstruction* q_tensor_user_i = q_tensor->users()[i];
if (IsBatchedMatmul(q_tensor_user_i) && q_tensor_user_i != bmm_1) {
match_result.matched_bmm_1_grad_1 = q_tensor_user_i;
if (match_result.matched_bmm_1_grad_1->operand_index(q_tensor) != 1) {
match_result.bmm_1_grad_1_need_canonicalization = true;
}
match_result.has_match = true;
}
}
return match_result;
}
MatchBwdResult MatchBmm1GradGemm2(MatchBwdResult previous_result,
HloInstruction* fwd_fmha_call) {
HloInstruction* bmm_1_grad_2 = nullptr;
MatchBwdResult match_result = previous_result;
match_result.has_match = false;
int64_t d_s_index = match_result.bmm_1_grad_1_need_canonicalization ? 1 : 0;
HloInstruction* d_s_user_0 = match_result.matched_bmm_1_grad_1;
HloInstruction* d_s = d_s_user_0->mutable_operand(d_s_index);
if (d_s->opcode() == HloOpcode::kBitcast && d_s->user_count() == 1) {
d_s = d_s->mutable_operand(0);
}
auto bmm_1_grad_2_it = std::find_if(
d_s->users().begin(), d_s->users().end(), [&](HloInstruction* instr) {
return instr != match_result.matched_bmm_1_grad_1 &&
instr->opcode() == HloOpcode::kDot;
});
if (bmm_1_grad_2_it != d_s->users().end()) {
bmm_1_grad_2 = *bmm_1_grad_2_it;
} else {
return match_result;
}
match_result.matched_bmm_1_grad_2 = bmm_1_grad_2;
if (match_result.matched_bmm_1_grad_2->operand_index(d_s) != 0) {
match_result.bmm_1_grad_2_need_canonicalization = true;
}
match_result.has_match = true;
return match_result;
}
MatchBwdResult MatchBmm2GradGemm1(HloInstruction* fwd_fmha_call) {
HloInstruction* bmm_2_grad_1 = nullptr;
MatchBwdResult matched_result;
int64_t activation_out_gte_index = 1;
if (fwd_fmha_call->user_count() < 2 ||
fwd_fmha_call->users()[activation_out_gte_index]->opcode() !=
HloOpcode::kGetTupleElement ||
fwd_fmha_call->users()[activation_out_gte_index]->user_count() > 1 ||
!IsBatchedMatmul(
fwd_fmha_call->users()[activation_out_gte_index]->users()[0])) {
matched_result.has_match = false;
return matched_result;
}
bmm_2_grad_1 = fwd_fmha_call->users()[activation_out_gte_index]->users()[0];
matched_result.matched_bmm_2_grad_1 = bmm_2_grad_1;
if (bmm_2_grad_1->operand_index(
fwd_fmha_call->users()[activation_out_gte_index]) != 0) {
matched_result.bmm_ | #include "xla/service/gpu/cudnn_fused_mha_rewriter.h"
#include <cstddef>
#include <memory>
#include <optional>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/computation_layout.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/cudnn_fused_mha_transpose_fusion.h"
#include "xla/service/hlo_cse.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_verifier.h"
#include "xla/service/layout_normalization.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/reshape_decomposer.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/dnn.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#include "third_party/gpus/cudnn/cudnn.h"
#endif
namespace xla {
namespace gpu {
namespace {
namespace m = xla::match;
class CudnnFusedMhaRewriterTestHloTest : public HloTestBase {
public:
se::CudaComputeCapability GetCudaComputeCapability() {
return se::CudaComputeCapability(8, 0);
}
se::CudaComputeCapability GetRealCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
se::dnn::VersionInfo GetCudnnVersion() {
return se::dnn::VersionInfo(8, 9, 4);
}
CudnnFusedMhaRewriterTestHloTest()
: HloTestBase(false,
false,
{}) {
#if !defined(GOOGLE_CUDA) || CUDA_VERSION < 12000
skip_reason_ = "cuDNN fused MHA requires CUDA 12 or later.";
return;
#endif
}
protected:
size_t CountFusedAttentionCall(HloModule* module, bool is_backward = false) {
return absl::c_count_if(module->entry_computation()->instructions(),
[&](const HloInstruction* instr) {
if (is_backward) {
return IsBwdCustomCallTofMHA(*instr);
} else {
return IsFwdCustomCallTofMHA(*instr);
}
});
}
DebugOptions GetDebugOptionsForTest() override {
auto debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cudnn_fmha(true);
debug_options.set_xla_gpu_fused_attention_use_cudnn_rng(true);
return debug_options;
}
HloModuleConfig GetModuleConfig() {
DebugOptions debug_options = GetDebugOptionsForTest();
HloModuleConfig config_with_fmha;
config_with_fmha.set_debug_options(debug_options);
return config_with_fmha;
}
std::optional<absl::string_view> skip_reason_;
};
class CudnnFusedMhaRewriterPipelineTest
: public CudnnFusedMhaRewriterTestHloTest {
public:
CudnnFusedMhaRewriterPipelineTest() {
if (skip_reason_) return;
#if !defined(GOOGLE_CUDA) || CUDNN_VERSION < 8800
skip_reason_ = "Pipeline test requires cuDNN 8.8.0 or later.";
return;
#endif
stream_executor::CudaComputeCapability cc = GetRealCudaComputeCapability();
if (!cc.IsAtLeastAmpere() || cc.minor != 0) {
skip_reason_ =
"Pipeline test requires Nvidia AMPERE+ GPUs with minor "
"compute capability == 0.";
return;
}
}
};
constexpr absl::string_view
hlo_BF16Bmm1SoftmaxBmm2Pattern_k_hidden_not_most_minor = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}}
region_0.7 {
Arg_0.8 = bf16[] parameter(0)
Arg_1.9 = bf16[] parameter(1)
ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9)
}
region_1.19 {
Arg_0.20 = f32[] parameter(0)
Arg_1.21 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.20, Arg_1.21)
}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,64]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
constant = bf16[] constant(-inf)
reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7
broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2}
subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3)
exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1)
convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1)
constant.1 = f32[] constant(0)
reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19
convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23)
broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2}
divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4)
ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
})";
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1SoftmaxBmm2Pattern_bmm1_rhs_contracting_dim_not_most_minor) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(
hlo_BF16Bmm1SoftmaxBmm2Pattern_k_hidden_not_most_minor));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get()));
EXPECT_TRUE(result);
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0)
.WithShape(BF16, {16, 16, 256, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config();
EXPECT_EQ(config.bmm1_dot_dimension_numbers().rhs_contracting_dimensions()[0],
2);
}
constexpr absl::string_view
hlo_BF16Bmm1SoftmaxBmm2Pattern_q_hidden_not_most_minor = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}}
region_0.7 {
Arg_0.8 = bf16[] parameter(0)
Arg_1.9 = bf16[] parameter(1)
ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9)
}
region_1.19 {
Arg_0.20 = f32[] parameter(0)
Arg_1.21 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.20, Arg_1.21)
}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,64]{2,3,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
constant = bf16[] constant(-inf)
reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7
broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2}
subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3)
exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1)
convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1)
constant.1 = f32[] constant(0)
reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19
convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23)
broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2}
divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4)
ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
})";
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1SoftmaxBmm2Pattern_bmm1_lhs_contracting_dim_not_most_minor) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(
hlo_BF16Bmm1SoftmaxBmm2Pattern_q_hidden_not_most_minor));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get()));
EXPECT_TRUE(result);
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0)
.WithShape(BF16, {16, 16, 256, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config();
EXPECT_EQ(config.bmm1_dot_dimension_numbers().lhs_contracting_dimensions()[0],
2);
EXPECT_EQ(config.bmm1_dot_dimension_numbers().rhs_contracting_dimensions()[0],
2);
}
constexpr absl::string_view
hlo_BF16Bmm1SoftmaxBmm2Pattern_v_hidden_dim_not_most_minor = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}}
region_0.7 {
Arg_0.8 = bf16[] parameter(0)
Arg_1.9 = bf16[] parameter(1)
ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9)
}
region_1.19 {
Arg_0.20 = f32[] parameter(0)
Arg_1.21 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.20, Arg_1.21)
}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{2,3,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,64]{2,3,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
constant = bf16[] constant(-inf)
reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7
broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2}
subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3)
exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1)
convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1)
constant.1 = f32[] constant(0)
reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19
convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23)
broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2}
divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4)
ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
})";
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1SoftmaxBmm2Pattern_bmm2_non_contracting_dim_not_most_minor) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(
hlo_BF16Bmm1SoftmaxBmm2Pattern_v_hidden_dim_not_most_minor));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get()));
EXPECT_TRUE(result);
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0)
.WithShape(BF16, {16, 16, 256, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config();
EXPECT_EQ(config.bmm2_dot_dimension_numbers().lhs_contracting_dimensions()[0],
3);
EXPECT_EQ(config.bmm2_dot_dimension_numbers().rhs_contracting_dimensions()[0],
3);
}
TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1CombinedMaskBiasSoftmaxBmm2) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule jit__unnamed_wrapped_function_,
entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[1,16,256,256]{3,2,1,0},pred[16,1,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}}
region_0.32.clone {
Arg_0.0 = f32[] parameter(0)
Arg_1.0 = f32[] parameter(1)
ROOT maximum.1 = f32[] maximum(Arg_0.0, Arg_1.0)
}
region_1.44 {
Arg_0.45 = f32[] parameter(0)
Arg_1.46 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.45, Arg_1.46)
}
ENTRY main.61 {
Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated}
transpose.5 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_2.3), dimensions={0,2,3,1}
Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated}
transpose.6 = bf16[16,16,256,64]{3,2,1,0} transpose(Arg_0.1), dimensions={0,2,1,3}
Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated}
transpose.7 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_1.2), dimensions={0,2,3,1}
Arg_4.5 = pred[16,1,256,256]{3,2,1,0} parameter(4), sharding={replicated}
bitcast.35 = pred[16,256,256]{2,1,0} bitcast(Arg_4.5)
convert.49 = s32[16,256,256]{2,1,0} convert(bitcast.35)
constant.5 = s32[] constant(0)
broadcast.10 = s32[16,256,256]{2,1,0} broadcast(constant.5), dimensions={}
compare = pred[16,256,256]{2,1,0} compare(convert.49, broadcast.10), direction=GT
constant.7 = bf16[] constant(0)
broadcast.12 = bf16[16,256,256]{2,1,0} broadcast(constant.7), dimensions={}
constant.9 = bf16[] constant(-9.999e+09)
broadcast.13 = bf16[16,256,256]{2,1,0} broadcast(constant.9), dimensions={}
select = bf16[16,256,256]{2,1,0} select(compare, broadcast.12, broadcast.13)
convert.51 = f32[16,256,256]{2,1,0} convert(select)
broadcast.14 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.51), dimensions={0,2,3}
Arg_3.4 = bf16[1,16,256,256]{3,2,1,0} parameter(3), sharding={replicated}
bitcast.52 = bf16[16,256,256]{2,1,0} bitcast(Arg_3.4)
convert.52 = f32[16,256,256]{2,1,0} convert(bitcast.52)
broadcast.15 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.52), dimensions={1,2,3}
add.1 = f32[16,16,256,256]{3,2,1,0} add(broadcast.14, broadcast.15)
dot.2 = bf16[16,16,256,256]{3,2,1,0} dot(transpose.6, transpose.7), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
convert.55 = f32[16,16,256,256]{3,2,1,0} convert(dot.2)
add.18 = f32[16,16,256,256]{3,2,1,0} add(convert.55, add.1)
constant.11 = f32[] constant(-inf)
reduce.36 = f32[16,16,256]{2,1,0} reduce(add.18, constant.11), dimensions={3}, to_apply=region_0.32.clone
broadcast.17 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.36), dimensions={0,1,2}
subtract.1 = f32[16,16,256,256]{3,2,1,0} subtract(add.18, broadcast.17)
exponential.1 = f32[16,16,256,256]{3,2,1,0} exponential(subtract.1)
constant.14 = f32[] constant(0)
reduce.48 = f32[16,16,256]{2,1,0} reduce(exponential.1, constant.14), dimensions={3}, to_apply=region_1.44
broadcast.18 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.48), dimensions={0,1,2}
divide = f32[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.18)
convert.68 = bf16[16,16,256,256]{3,2,1,0} convert(divide)
dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.5, convert.68), lhs_contracting_dims={3}, rhs_contracting_dims={3}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
ROOT transpose.8 = bf16[16,256,16,64]{3,2,1,0} transpose(dot.1), dimensions={0,3,1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::Transpose(
m::Transpose(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}),
0)))
.WithShape(BF16, {16, 256, 16, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
EXPECT_EQ(fmha->operands().size(), 4);
}
TEST_F(CudnnFusedMhaRewriterTestHloTest, F16Bmm1UnfusedSoftmaxBmm2) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,40,64]{3,2,1,0},f16[2,6,64,40]{3,2,1,0},f16[2,6,40,64]{3,2,1,0})->f16[2,6,40,64]{3,2,1,0}}
region_0.7 {
Arg_0.8 = f16[] parameter(0)
Arg_1.9 = f16[] parameter(1)
ROOT maximum = f16[] maximum(Arg_0.8, Arg_1.9)
}
region_1.19 {
Arg_0.20 = f32[] parameter(0)
Arg_1.21 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.20, Arg_1.21)
}
ENTRY main.31 {
Arg_0.1 = f16[2,6,40,64]{3,2,1,0} parameter(0), sharding={replicated}
Arg_1.2 = f16[2,6,64,40]{3,2,1,0} parameter(1), sharding={replicated}
dot = f16[2,6,40,40]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
constant = f16[] constant(-inf)
reduce.11 = f16[2,6,40]{2,1,0} reduce(dot, constant), dimensions={3}, to_apply=region_0.7
broadcast.3 = f16[2,6,40,40]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2}
subtract.1 = f16[2,6,40,40]{3,2,1,0} subtract(dot, broadcast.3)
exponential.1 = f16[2,6,40,40]{3,2,1,0} exponential(subtract.1)
convert.1 = f32[2,6,40,40]{3,2,1,0} convert(exponential.1)
constant.1 = f32[] constant(0)
reduce.23 = f32[2,6,40]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19
convert.2 = f16[2,6,40]{2,1,0} convert(reduce.23)
broadcast.4 = f16[2,6,40,40]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2}
divide = f16[2,6,40,40]{3,2,1,0} divide(exponential.1, broadcast.4)
Arg_2.3 = f16[2,6,40,64]{3,2,1,0} parameter(2), sharding={replicated}
ROOT dot.1 = f16[2,6,40,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0)
.WithShape(F16, {2, 6, 40, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config();
EXPECT_FLOAT_EQ(config.fmha_scale(), 1.0);
EXPECT_FLOAT_EQ(config.dropout_rate(), 0.0);
EXPECT_EQ(fmha->operands().size(), 3);
}
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1ConvertedMaskAddedAfterFirstGemmSoftmaxBmm2) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},pred[16,1,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}}
region_0.27.clone {
Arg_0.0 = f32[] parameter(0)
Arg_1.0 = f32[] parameter(1)
ROOT maximum.1 = f32[] maximum(Arg_0.0, Arg_1.0)
}
region_1.39 {
Arg_0.40 = f32[] parameter(0)
Arg_1.41 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0.40, Arg_1.41)
}
ENTRY main.56 {
Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated}
transpose.5 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_2.3), dimensions={0,2,3,1}
Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated}
transpose.6 = bf16[16,16,256,64]{3,2,1,0} transpose(Arg_0.1), dimensions={0,2,1,3}
Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated}
transpose.7 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_1.2), dimensions={0,2,3,1}
dot = bf16[16,16,256,256]{3,2,1,0} dot(transpose.6, transpose.7), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
convert.47 = f32[16,16,256,256]{3,2,1,0} convert(dot)
Arg_3.4 = pred[16,1,256,256]{3,2,1,0} parameter(3), sharding={replicated}
bitcast.37 = pred[16,256,256]{2,1,0} bitcast(Arg_3.4)
convert.42 = s32[16,256,256]{2,1,0} convert(bitcast.37)
constant.6 = s32[] constant(0)
broadcast.9 = s32[16,256,256]{2,1,0} broadcast(constant.6), dimensions={}
compare = pred[16,256,256]{2,1,0} compare(convert.42, broadcast.9), direction=GT
constant.8 = bf16[] constant(0)
broadcast.11 = bf16[16,256,256]{2,1,0} broadcast(constant.8), dimensions={}
constant.10 = bf16[] constant(-9.999e+09)
broadcast.12 = bf16[16,256,256]{2,1,0} broadcast(constant.10), dimensions={}
select = bf16[16,256,256]{2,1,0} select(compare, broadcast.11, broadcast.12)
convert.48 = f32[16,256,256]{2,1,0} convert(select)
broadcast.14 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.48), dimensions={0,2,3}
add.2 = f32[16,16,256,256]{3,2,1,0} add(convert.47, broadcast.14)
constant.13 = f32[] constant(-inf)
reduce.31 = f32[16,16,256]{2,1,0} reduce(add.2, constant.13), dimensions={3}, to_apply=region_0.27.clone
broadcast.16 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.31), dimensions={0,1,2}
subtract.1 = f32[16,16,256,256]{3,2,1,0} subtract(add.2, broadcast.16)
exponential.1 = f32[16,16,256,256]{3,2,1,0} exponential(subtract.1)
constant.14 = f32[] constant(0)
reduce.43 = f32[16,16,256]{2,1,0} reduce(exponential.1, constant.14), dimensions={3}, to_apply=region_1.39
broadcast.17 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.43), dimensions={0,1,2}
divide = f32[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.17)
convert.63 = bf16[16,16,256,256]{3,2,1,0} convert(divide)
dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.5, convert.63), lhs_contracting_dims={3}, rhs_contracting_dims={3}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1}
ROOT transpose.8 = bf16[16,256,16,64]{3,2,1,0} transpose(dot.1), dimensions={0,3,1,2}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(
m->entry_computation()->root_instruction(),
GmockMatch(
m::Transpose(
m::Transpose(m::GetTupleElement(
m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}),
0)))
.WithShape(BF16, {16, 256, 16, 64})));
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
fmha->backend_config<GpuBackendConfig>());
EXPECT_EQ(fmha->operands().size(), 4);
}
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1Bmm2Pattern_bmm1_contracting_dim_not_equal_64) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,32]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,32]{3,2,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(dot.0, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Dot(&fmha, m::Dot(m::Parameter(0), m::Parameter(1)),
m::Parameter(2))
.WithShape(BF16, {16, 16, 256, 64})));
}
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1Bmm2Pattern_bmm2_rhs_non_contracting_dim_not_equal_64) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0})->bf16[16,16,256,32]{3,2,1,0}}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,32]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,64]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,64]{3,2,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
ROOT dot.1 = bf16[16,16,256,32]{3,2,1,0} dot(dot.0, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Dot(&fmha, m::Op(), m::Parameter(2))
.WithShape(BF16, {16, 16, 256, 32})));
}
TEST_F(CudnnFusedMhaRewriterTestHloTest,
BF16Bmm1Bmm2PatternUncanonicalized_bmm1_contracting_dim_not_equal_64) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,64,256]{3,2,1,0}}
ENTRY main.6 {
Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2)
Arg_0.1 = bf16[16,16,256,32]{3,2,1,0} parameter(0)
Arg_1.2 = bf16[16,16,256,32]{3,2,1,0} parameter(1)
dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
ROOT dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(Arg_2.3, dot.0), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str));
CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(),
GetCudnnVersion()};
TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status());
const HloInstruction* fmha;
SCOPED_TRACE(m->ToString());
EXPECT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::Dot(&fmha, m::Parameter(2), m::Op())
.WithShape(BF16, {16, 16, 64, 256})));
}
TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1BiasSoftmaxDropoutBmm2) {
if (skip_reason_) GTEST_SKIP() << *skip_reason_;
const char* module_str = R"(
HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[1,16,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}}
region_0.34 {
Arg_0.35 = bf16[] parameter(0)
Arg_1.36 = bf16[] parameter(1)
ROOT maximum.37 = bf16[] maximum(Arg_0.35, Arg_1.36)
}
region_1.46 {
Arg_0.47 = f32[] parameter(0)
Arg_1.48 = f32[] parameter(1)
ROOT add.49 = f32[] add(Arg_0.47, Arg_1.48)
}
ENTRY main.82 {
Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated}
copy = bf16[16,256,16,64]{1,3,2,0} copy(Arg_2.3), sharding={replicated}
transpose.2 = bf16[16,16,64,256]{3,2,1,0} transpose(copy), dimensions={0,2,3,1}
Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated}
copy.1 = bf16[16,256,16,64]{3,1,2,0} copy(Arg_0.1), sharding={replicated}
transpose = bf16[16,16,256,64]{3,2,1,0} transpose(copy.1), dimensions={0,2,1,3}
Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated}
copy.2 = bf16[16,256,16,64]{1,3,2,0} copy(Arg_1.2), sharding={replicated}
transpose.1 = bf16[16,16,64,256]{3,2,1,0} transpose(copy.2), dimensions={0,2,3,1}
dot = bf16[16,16,256,256]{3,2,1,0} dot(transpose, transpose.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}
Arg_3.4 = bf16[1,16,256,256]{3,2,1,0} parameter(3), sharding={replicated}
reshape.31 = bf16[16,256,256]{2,1,0} reshape(Arg_3.4)
broadcast.32 = bf16[16,16,256,256]{3,2,1,0} broadcast(reshape.31), dimensions={1,2,3}
add.33 = bf16[16,16,256,256]{3,2,1,0} add(dot, broadcast.32)
constant.21 = bf16[] constant(-inf)
reduce.38 = bf16[16,16,256]{2,1,0} reduce(add.33, constant.21), dimensions={3}, to_apply=region_0.34
broadcast.42 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.38), dimensions={0,1,2}
subtract.43 = bf16[16,16,256,256]{3,2,1,0} subtract(add.33, broadcast.42)
exponential.44 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.43)
convert.45 = f32[16,16,256,256]{3,2,1,0} convert(exponential.44)
constant.9 = f32[] constant(0)
reduce.50 = f32[16,16,256]{2,1,0} reduce(convert.45, constant.9), dimensions={3}, to_apply=region_1.46
convert.1 = bf16[16,16,256]{2,1,0} convert(reduce.50)
broadcast.55 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.1), dimensions={0,1,2}
divide.56 = bf16[16,16,256,256]{3,2,1,0} divide(exponential.44, broadcast.55)
constant.18 = u32[ | 2,097 |
#ifndef XLA_SERVICE_GPU_ALIAS_PASSTHROUGH_PARAMS_H_
#define XLA_SERVICE_GPU_ALIAS_PASSTHROUGH_PARAMS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class AliasPassthroughParams : public HloModulePass {
public:
AliasPassthroughParams() = default;
~AliasPassthroughParams() override = default;
absl::string_view name() const override { return "alias_passthrough_params"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/alias_passthrough_params.h"
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> AliasPassthroughParams::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
const HloInstruction* root = module->entry_computation()->root_instruction();
if (module->entry_computation()->num_parameters() == 0 ||
root->opcode() != HloOpcode::kTuple) {
return false;
}
bool changed = false;
absl::flat_hash_set<int64_t> used_params;
for (int64_t i = 0; i < root->operand_count(); ++i) {
if (root->operand(i)->opcode() == HloOpcode::kParameter &&
used_params.count(root->operand(i)->parameter_number()) == 0) {
VLOG(2) << "Parameter " << root->operand(i)->parameter_number()
<< " with shape " << root->operand(i)->shape().ToString()
<< " in module " << module->name()
<< " is passed-through to root tuple element " << i << ": "
<< root->shape().ToString();
if (module->input_output_alias_config().OutputHasAlias({i}) ||
module->input_output_alias_config().ParameterHasAlias(
root->operand(i)->parameter_number(), {})) {
VLOG(2) << "Skip setting the above pass-through alias as an alias may"
<< " have been set up for alising resource update.";
continue;
}
TF_RETURN_IF_ERROR(module->input_output_alias_config().SetUpAlias(
{i},
root->operand(i)->parameter_number(),
{}));
used_params.insert(root->operand(i)->parameter_number());
changed = true;
}
}
return changed;
}
}
} | #include "xla/service/gpu/alias_passthrough_params.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
class AliasPassthroughParamsTest : public HloTestBase {};
TEST_F(AliasPassthroughParamsTest, AliasPassThroughParams) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
sum = f16[2048,1024] add(p0, p1)
ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1)
})")
.value();
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_config = module->input_output_alias_config();
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
EXPECT_EQ(1, alias_config.GetAliasedParameter({2})->parameter_number);
}
TEST_F(AliasPassthroughParamsTest, DoNotAliasPassThroughParamsMoreThanOnce) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p0)
})")
.value();
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_config = module->input_output_alias_config();
EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number);
EXPECT_FALSE(alias_config.OutputHasAlias({1}));
}
TEST_F(AliasPassthroughParamsTest, PresetAliases) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[2048,1024] parameter(0)
p1 = f16[2048,1024] parameter(1)
sum = f16[2048,1024] add(p0, p1)
ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1)
})")
.value();
auto& preset_alias = module->input_output_alias_config();
TF_EXPECT_OK(preset_alias.SetUpAlias({1},
0,
{}));
EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value());
const auto& alias_result = module->input_output_alias_config();
EXPECT_EQ(1, alias_result.GetAliasedParameter({2})->parameter_number);
EXPECT_FALSE(alias_result.OutputHasAlias({0}));
}
}
} | 2,098 |
#ifndef XLA_SERVICE_GPU_DOT_OPERAND_CONVERTER_H_
#define XLA_SERVICE_GPU_DOT_OPERAND_CONVERTER_H_
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/op_expander_pass.h"
#include "xla/util.h"
namespace xla::gpu {
class DotOperandConverter : public OpExpanderPass {
public:
explicit DotOperandConverter(HloPredicate extra_filter = nullptr)
: OpExpanderPass(std::move(extra_filter)) {}
absl::string_view name() const override { return "operand_converter"; }
protected:
bool InstructionMatchesPattern(HloInstruction* instruction) override;
absl::StatusOr<HloInstruction*> ExpandInstruction(
HloInstruction* instruction) override;
};
}
#endif
#include "xla/service/gpu/dot_operand_converter.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
namespace xla::gpu {
bool DotOperandConverter::InstructionMatchesPattern(
HloInstruction* instruction) {
if (instruction->opcode() != HloOpcode::kDot) {
return false;
}
HloInstruction* lhs = instruction->mutable_operand(0);
HloInstruction* rhs = instruction->mutable_operand(1);
PrimitiveType lhs_type = lhs->shape().element_type();
PrimitiveType rhs_type = rhs->shape().element_type();
if (lhs_type == rhs_type) {
return false;
}
absl::flat_hash_set<PrimitiveType> non_converting = {F8E4M3FN, F8E5M2};
if (non_converting.contains(lhs_type) && non_converting.contains(rhs_type)) {
return false;
}
PrimitiveType desired_type =
ShapeUtil::HigherPrecisionElementType(lhs->shape(), rhs->shape());
return desired_type == lhs_type || desired_type == rhs_type;
}
absl::StatusOr<HloInstruction*> DotOperandConverter::ExpandInstruction(
HloInstruction* instruction) {
HloInstruction* lhs = instruction->mutable_operand(0);
HloInstruction* rhs = instruction->mutable_operand(1);
PrimitiveType desired_type =
ShapeUtil::HigherPrecisionElementType(lhs->shape(), rhs->shape());
int operand_index = desired_type == lhs->shape().element_type() ? 1 : 0;
HloInstruction* inst_to_replace =
desired_type == lhs->shape().element_type() ? rhs : lhs;
auto upcast_shape = inst_to_replace->shape();
upcast_shape.set_element_type(desired_type);
auto* convert_inst = instruction->AddInstruction(
HloInstruction::CreateConvert(upcast_shape, inst_to_replace));
TF_RETURN_IF_ERROR(instruction->ReplaceOperandWithDifferentShape(
operand_index, convert_inst));
return nullptr;
}
} | #include "xla/service/gpu/dot_operand_converter.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/strings/substitute.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/primitive_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace op = ::xla::testing::opcode_matchers;
class DotOperandConverterTest : public HloTestBase {
public:
void TestConvert(bool left_less_precise, PrimitiveType lhs_type,
PrimitiveType rhs_type, PrimitiveType result_type) {
absl::string_view module_tmpl = R"(
HloModule module
ENTRY main {
p0 = $0[2,3]{1,0} parameter(0)
p1 = $1[3,2]{1,0} parameter(1)
ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
auto module_string = absl::Substitute(
module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type),
primitive_util::LowercasePrimitiveTypeName(rhs_type),
primitive_util::LowercasePrimitiveTypeName(result_type));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_TRUE(upcasted);
if (left_less_precise) {
auto original_lhs = op::Parameter(0);
auto upcasted_lhs =
AllOf(op::Convert(original_lhs),
op::Shape(absl::Substitute(
"$0[2,3]{1,0}",
primitive_util::LowercasePrimitiveTypeName(rhs_type))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(upcasted_lhs, op::Parameter(1)),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
} else {
auto original_rhs = op::Parameter(1);
auto upcasted_rhs =
AllOf(op::Convert(original_rhs),
op::Shape(absl::Substitute(
"$0[3,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(lhs_type))));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
AllOf(op::Dot(op::Parameter(0), upcasted_rhs),
op::Shape(absl::Substitute(
"$0[2,2]{1,0}",
primitive_util::LowercasePrimitiveTypeName(result_type)))));
}
}
};
TEST_F(DotOperandConverterTest, ConvertsLeftAndRight) {
TestConvert(true, S8, BF16, F32);
TestConvert(false, BF16, S8, F32);
}
TEST_F(DotOperandConverterTest, NoConvertHappensWithSameTypes) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = s8[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_FALSE(upcasted);
}
TEST_F(DotOperandConverterTest, NoConvertFromF8toF8) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = f8e4m3fn[2,3]{1,0} parameter(0)
p1 = f8e5m2[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool upcasted,
DotOperandConverter().Run(module.get()));
EXPECT_FALSE(upcasted);
}
TEST_F(DotOperandConverterTest, CompilerOptimizesUsingDotOperandConverter) {
absl::string_view module_string = R"(
HloModule module
ENTRY main {
p0 = s8[2,3]{1,0} parameter(0)
p1 = bf16[3,2]{1,0} parameter(1)
ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
GetOptimizedModule(module_string));
}
}
} | 2,099 |
#ifndef XLA_SERVICE_GPU_ALL_REDUCE_BLUECONNECT_H_
#define XLA_SERVICE_GPU_ALL_REDUCE_BLUECONNECT_H_
#include <cstddef>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class AllReduceBlueConnect : public HloModulePass {
public:
explicit AllReduceBlueConnect(size_t num_devices_per_host)
: num_devices_per_host_(num_devices_per_host) {}
absl::string_view name() const override { return "all-reduce-blueconnect"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
size_t num_devices_per_host_;
};
}
#endif
#include "xla/service/gpu/all_reduce_blueconnect.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
std::vector<HloInstruction*> GetOutputs(HloInstruction& instruction) {
if (!instruction.shape().IsTuple()) {
return {&instruction};
}
std::vector<HloInstruction*> outputs;
outputs.reserve(instruction.shape().tuple_shapes_size());
HloComputation& computation = *instruction.parent();
for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) {
outputs.push_back(computation.AddInstruction(
HloInstruction::CreateGetTupleElement(&instruction, i)));
}
return outputs;
}
struct DecomposedReplicaGroups {
std::vector<ReplicaGroup> scatter_gather_groups;
std::vector<ReplicaGroup> new_all_reduce_groups;
};
absl::StatusOr<std::optional<DecomposedReplicaGroups>> TryDecomposeReplicaGroup(
const ReplicaGroup& replica_group,
const DeviceAssignment& device_assignment, size_t num_devices_per_host) {
int group_size = replica_group.replica_ids_size();
TF_RET_CHECK(group_size > 0);
absl::btree_map<int, std::vector<int64_t>> replica_ids_by_host;
for (int64_t replica_id : replica_group.replica_ids()) {
int device_id = device_assignment(replica_id, 0);
TF_RET_CHECK(device_id >= 0);
int host_id = device_id / num_devices_per_host;
replica_ids_by_host[host_id].push_back(replica_id);
}
size_t num_local_devices = replica_ids_by_host.begin()->second.size();
bool same_num_devices_on_each_host =
absl::c_all_of(replica_ids_by_host, [&](const auto& entry) {
return entry.second.size() == num_local_devices;
});
if (!same_num_devices_on_each_host) {
return {std::nullopt};
}
std::vector<int64_t> sorted_replica_group;
sorted_replica_group.reserve(group_size);
for (const auto& entry : replica_ids_by_host) {
absl::c_copy(entry.second, std::back_inserter(sorted_replica_group));
}
size_t scatter_group_size = std::max(num_local_devices, size_t(2));
size_t num_scatter_groups = group_size / scatter_group_size;
if ((group_size % scatter_group_size != 0) || (num_scatter_groups < 2)) {
return {std::nullopt};
}
std::vector<ReplicaGroup> scatter_gather_groups(num_scatter_groups);
std::vector<ReplicaGroup> new_all_reduce_groups(scatter_group_size);
for (size_t i = 0; i < group_size; ++i) {
int64_t replica_id = sorted_replica_group[i];
scatter_gather_groups[i / scatter_group_size].add_replica_ids(replica_id);
new_all_reduce_groups[i % scatter_group_size].add_replica_ids(replica_id);
}
return {DecomposedReplicaGroups{std::move(scatter_gather_groups),
std::move(new_all_reduce_groups)}};
}
absl::StatusOr<std::optional<DecomposedReplicaGroups>>
TryDecomposeReplicaGroups(const HloAllReduceInstruction& all_reduce,
size_t num_devices_per_host) {
const DeviceAssignment& device_assignment =
all_reduce.GetModule()->config().static_device_assignment();
absl::Span<const ReplicaGroup> replica_groups = all_reduce.replica_groups();
ReplicaGroup all_replicas;
if (replica_groups.empty()) {
for (int i = 0; i < device_assignment.replica_count(); ++i) {
all_replicas.add_replica_ids(i);
}
replica_groups = absl::MakeSpan(&all_replicas, 1);
}
std::vector<ReplicaGroup> scatter_gather_groups;
std::vector<ReplicaGroup> new_all_reduce_groups;
for (const ReplicaGroup& replica_group : replica_groups) {
TF_ASSIGN_OR_RETURN(
std::optional<DecomposedReplicaGroups> decomposed_groups,
TryDecomposeReplicaGroup(replica_group, device_assignment,
num_devices_per_host));
if (!decomposed_groups) return {std::nullopt};
int scatter_group_size =
decomposed_groups->scatter_gather_groups[0].replica_ids_size();
if (scatter_gather_groups.empty()) {
for (const HloInstruction* operand : all_reduce.operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
if (num_elements % scatter_group_size != 0) {
return {std::nullopt};
}
}
scatter_gather_groups.reserve(
replica_groups.size() *
decomposed_groups->scatter_gather_groups.size());
new_all_reduce_groups.reserve(
replica_groups.size() *
decomposed_groups->new_all_reduce_groups.size());
} else if (scatter_group_size !=
scatter_gather_groups[0].replica_ids_size()) {
return {std::nullopt};
}
absl::c_move(decomposed_groups->scatter_gather_groups,
std::back_inserter(scatter_gather_groups));
absl::c_move(decomposed_groups->new_all_reduce_groups,
std::back_inserter(new_all_reduce_groups));
}
return {DecomposedReplicaGroups{std::move(scatter_gather_groups),
std::move(new_all_reduce_groups)}};
}
absl::StatusOr<bool> TryDecomposeAllReduce(HloAllReduceInstruction* all_reduce,
size_t num_devices_per_host) {
TF_RET_CHECK(all_reduce);
TF_RET_CHECK(!all_reduce->has_sharding());
HloComputation& computation = *all_reduce->parent();
PrimitiveType element_type = all_reduce->operand(0)->shape().element_type();
TF_ASSIGN_OR_RETURN(
std::optional<DecomposedReplicaGroups> decomposed_groups,
TryDecomposeReplicaGroups(*all_reduce, num_devices_per_host));
if (!decomposed_groups) return false;
std::vector<HloInstruction*> flat_operands;
flat_operands.reserve(all_reduce->operand_count());
std::vector<Shape> flat_shapes;
flat_shapes.reserve(all_reduce->operand_count());
std::vector<Shape> scattered_shapes;
scattered_shapes.reserve(all_reduce->operand_count());
int scatter_group_size =
decomposed_groups->scatter_gather_groups[0].replica_ids_size();
for (HloInstruction* operand : all_reduce->operands()) {
TF_RET_CHECK(operand->shape().IsArray());
int64_t num_elements = ShapeUtil::ElementsIn(operand->shape());
Shape flat_shape = ShapeUtil::MakeShape(element_type, {num_elements});
flat_operands.push_back(computation.AddInstruction(
HloInstruction::CreateBitcast(flat_shape, operand)));
flat_shapes.push_back(std::move(flat_shape));
scattered_shapes.push_back(ShapeUtil::MakeShape(
element_type, {num_elements / scatter_group_size}));
}
Shape reduce_scatter_shape = ShapeUtil::MakeMaybeTupleShape(scattered_shapes);
HloInstruction* reduce_scatter =
computation.AddInstruction(HloInstruction::CreateReduceScatter(
reduce_scatter_shape, flat_operands, all_reduce->to_apply(),
CollectiveDeviceList(decomposed_groups->scatter_gather_groups),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids(),
0));
HloInstruction* new_all_reduce =
computation.AddInstruction(HloInstruction::CreateAllReduce(
reduce_scatter_shape, GetOutputs(*reduce_scatter),
all_reduce->to_apply(),
CollectiveDeviceList(decomposed_groups->new_all_reduce_groups),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
HloInstruction* all_gather =
computation.AddInstruction(HloInstruction::CreateAllGather(
ShapeUtil::MakeMaybeTupleShape(flat_shapes),
GetOutputs(*new_all_reduce),
0,
CollectiveDeviceList(decomposed_groups->scatter_gather_groups),
false, all_reduce->channel_id(),
all_reduce->use_global_device_ids()));
std::vector<HloInstruction*> outputs = GetOutputs(*all_gather);
for (int64_t i = 0; i < outputs.size(); ++i) {
outputs[i] = computation.AddInstruction(HloInstruction::CreateBitcast(
all_reduce->operand(i)->shape(), outputs[i]));
}
HloInstruction* replacement = MaybeMakeTuple(outputs);
TF_RETURN_IF_ERROR(
all_reduce->CopyAllControlDepsTo(reduce_scatter, replacement));
TF_RETURN_IF_ERROR(all_reduce->DropAllControlDeps());
TF_RETURN_IF_ERROR(computation.ReplaceInstruction(all_reduce, replacement));
TF_RETURN_IF_ERROR(
TryDecomposeAllReduce(Cast<HloAllReduceInstruction>(new_all_reduce),
num_devices_per_host)
.status());
return true;
}
}
absl::StatusOr<bool> AllReduceBlueConnect::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
VLOG(1) << "Running AllReduceBlueConnect";
if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) {
VLOG(1)
<< "Skip AllReduceBlueConnect because the module contains all-reduce "
"with constrained layouts";
return false;
}
if (!module->config().has_static_device_assignment()) {
VLOG(1)
<< "Skip AllReduceBlueConnect because the module doesn't have static "
"device assignment";
return false;
}
std::vector<HloAllReduceInstruction*> all_reduces;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kAllReduce) {
all_reduces.push_back(Cast<HloAllReduceInstruction>(instruction));
}
}
}
bool changed = false;
for (HloAllReduceInstruction* all_reduce : all_reduces) {
TF_ASSIGN_OR_RETURN(
bool all_reduce_changed,
TryDecomposeAllReduce(all_reduce, num_devices_per_host_));
changed |= all_reduce_changed;
}
return changed;
}
} | #include "xla/service/gpu/all_reduce_blueconnect.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/computation_placer.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using ::tsl::testing::IsOkAndHolds;
namespace m = ::xla::match;
using AllReduceBlueConnectTest = HloTestBase;
void SetModuleConfig(HloModule& module, size_t replica_count) {
DeviceAssignment device_assignment(replica_count, 1);
device_assignment.FillIota(0);
auto& module_config = module.mutable_config();
module_config.set_replica_count(replica_count);
module_config.set_static_device_assignment(device_assignment);
}
TEST_F(AllReduceBlueConnectTest, OneStage) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
auto bitcast = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter =
m::ReduceScatter(bitcast).WithShape(F32, {4}).WithReplicaGroups(
scatter_gather_groups);
auto all_reduce = m::AllReduce(reduce_scatter)
.WithShape(F32, {4})
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather = m::AllGather(all_reduce)
.WithShape(F32, {16})
.WithReplicaGroups(scatter_gather_groups);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Bitcast(all_gather).WithShape(F32, {4, 4})));
}
TEST_F(AllReduceBlueConnectTest, TwoStage) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 16);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> outer_scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}, {12, 13, 14, 15}};
std::vector<std::vector<int64_t>> inner_scatter_gather_groups = {
{0, 4}, {8, 12}, {1, 5}, {9, 13}, {2, 6}, {10, 14}, {3, 7}, {11, 15}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 8}, {4, 12}, {1, 9}, {5, 13}, {2, 10}, {6, 14}, {3, 11}, {7, 15}};
auto bitcast0 = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter0 =
m::ReduceScatter(bitcast0).WithShape(F32, {4}).WithReplicaGroups(
outer_scatter_gather_groups);
auto bitcast1 = m::Bitcast(reduce_scatter0).WithShape(F32, {4});
auto reduce_scatter1 =
m::ReduceScatter(bitcast1).WithShape(F32, {2}).WithReplicaGroups(
inner_scatter_gather_groups);
auto all_reduce = m::AllReduce(reduce_scatter1)
.WithShape(F32, {2})
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather0 = m::AllGather(all_reduce)
.WithShape(F32, {4})
.WithReplicaGroups(inner_scatter_gather_groups);
auto bitcast2 = m::Bitcast(all_gather0).WithShape(F32, {4});
auto all_gather1 =
m::AllGather(bitcast2).WithShape(F32, {16}).WithReplicaGroups(
outer_scatter_gather_groups);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Bitcast(all_gather1).WithShape(F32, {4, 4})));
}
TEST_F(AllReduceBlueConnectTest, TwoOperands) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[4,4,2] parameter(1)
ROOT crs = (f32[4,4], f32[4,4,2]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
auto bitcast0 = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto bitcast1 = m::Bitcast(m::Parameter(1)).WithShape(F32, {32});
Shape expected0 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {4}), ShapeUtil::MakeShape(F32, {8})});
Shape expected1 = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {16}), ShapeUtil::MakeShape(F32, {32})});
auto reduce_scatter = m::ReduceScatter(bitcast0, bitcast1)
.WithShapeEqualTo(&expected0)
.WithReplicaGroups(scatter_gather_groups);
auto all_reduce = m::AllReduce(m::GetTupleElement(reduce_scatter, 0),
m::GetTupleElement(reduce_scatter, 1))
.WithShapeEqualTo(&expected0)
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather = m::AllGather(m::GetTupleElement(all_reduce, 0),
m::GetTupleElement(all_reduce, 1))
.WithShapeEqualTo(&expected1)
.WithReplicaGroups(scatter_gather_groups);
auto bitcast2 =
m::Bitcast(m::GetTupleElement(all_gather, 0)).WithShape(F32, {4, 4});
auto bitcast3 =
m::Bitcast(m::GetTupleElement(all_gather, 1)).WithShape(F32, {4, 4, 2});
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(bitcast2, bitcast3)));
}
TEST_F(AllReduceBlueConnectTest, DifferentNumLocalDevicesWithinReplicaGroup) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0),
replica_groups={{0,1,2,7},{3,4,5,6}}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, DifferentNumLocalDevicesAcrossReplicaGroups) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
ROOT crs = f32[4,4] all-reduce(p0),
replica_groups={{0,1,4,5},{2,3,6,7},{8,9,10,11},{12,13,14,15}}, to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 16);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, OperandIndivisible) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[9] parameter(1)
ROOT crs = (f32[4,4], f32[9]) all-reduce(p0, p1), to_apply=add
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false));
}
TEST_F(AllReduceBlueConnectTest, ControlDeps) {
constexpr absl::string_view hlo_string = R"(
HloModule module
%add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY %comp {
p0 = f32[4,4] parameter(0)
p1 = f32[4,4] parameter(1)
add = f32[4,4] add(p0, p1)
crs = f32[4,4] all-reduce(p0), to_apply=add, control-predecessors={add}
ROOT add1 = f32[4,4] add(crs, add), control-predecessors={crs}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
SetModuleConfig(*module, 8);
const HloInstruction* ar =
module->entry_computation()->root_instruction()->operand(0);
auto expected_preds = ar->control_predecessors();
auto expected_succs = ar->control_successors();
AllReduceBlueConnect pass(4);
EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true));
std::vector<std::vector<int64_t>> scatter_gather_groups = {
{0, 1, 2, 3}, {4, 5, 6, 7}};
std::vector<std::vector<int64_t>> new_all_reduce_groups = {
{0, 4}, {1, 5}, {2, 6}, {3, 7}};
const HloInstruction *matched_rs, *matched_bitcast;
auto bitcast = m::Bitcast(m::Parameter(0)).WithShape(F32, {16});
auto reduce_scatter = m::ReduceScatter(&matched_rs, bitcast)
.WithShape(F32, {4})
.WithReplicaGroups(scatter_gather_groups);
auto all_reduce = m::AllReduce(reduce_scatter)
.WithShape(F32, {4})
.WithReplicaGroups(new_all_reduce_groups);
auto all_gather = m::AllGather(all_reduce)
.WithShape(F32, {16})
.WithReplicaGroups(scatter_gather_groups);
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_THAT(root, GmockMatch(m::Add()));
EXPECT_THAT(
root->operand(0),
GmockMatch(
m::Bitcast(&matched_bitcast, all_gather).WithShape(F32, {4, 4})));
EXPECT_THAT(matched_rs, GmockMatch(m::Op().WithControlDeps(
absl::MakeSpan(expected_preds), {})));
EXPECT_THAT(matched_bitcast, GmockMatch(m::Op().WithControlDeps(
{}, absl::MakeSpan(expected_succs))));
}
}
} | 2,100 |
#ifndef XLA_SERVICE_GPU_MOVE_COPY_TO_USERS_H_
#define XLA_SERVICE_GPU_MOVE_COPY_TO_USERS_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class MoveCopyToUsers : public HloModulePass {
public:
absl::string_view name() const override { return "move_copy_to_users"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/move_copy_to_users.h"
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/hlo_creation_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class MoveCopyToUsersVisitor : public DfsHloRewriteVisitor {
absl::Status HandlePad(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
HloInstruction* c = hlo->mutable_operand(1);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_pad,
MakePadHlo(copied, c, hlo->padding_config(), &hlo->metadata()));
*earlier_pad->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_pad, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleSlice(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_slice,
MakeSliceHlo(copied, hlo->slice_starts(), hlo->slice_limits(),
hlo->slice_strides(), &hlo->metadata()));
*earlier_slice->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_slice, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleDynamicSlice(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_slice,
MakeDynamicSliceHlo(
copied,
absl::Span<HloInstruction* const>(hlo->operands()).subspan(1),
hlo->dynamic_slice_sizes(), &hlo->metadata()));
*earlier_slice->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy = MakeCopyHlo(earlier_slice, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReduceWindow(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_reduce_window,
MakeReduceWindowHlo(copied, hlo->mutable_operand(1), hlo->window(),
hlo->called_computations()[0], &hlo->metadata()));
*earlier_reduce_window->mutable_shape()->mutable_layout() =
copied->shape().layout();
HloInstruction* later_copy =
MakeCopyHlo(earlier_reduce_window, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReduce(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy && !hlo->shape().IsTuple()) {
HloInstruction* new_reduce = hlo->AddInstruction(
hlo->CloneWithNewOperands(hlo->shape(), {operand->mutable_operand(0),
hlo->mutable_operand(1)}));
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, new_reduce));
}
return absl::OkStatus();
}
absl::Status HandleBitcastConvert(HloInstruction* hlo) override {
return absl::OkStatus();
}
absl::Status HandleElementwiseUnary(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (hlo->opcode() == HloOpcode::kReducePrecision) {
return absl::OkStatus();
}
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_elementwise,
MakeUnaryHlo(hlo->opcode(), copied, &hlo->metadata()));
HloInstruction* later_copy =
MakeCopyHlo(earlier_elementwise, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleReverse(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * earlier_reverse,
MakeReverseHlo(copied, hlo->dimensions(), &hlo->metadata()));
HloInstruction* later_copy = MakeCopyHlo(earlier_reverse, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleConvert(HloInstruction* hlo) override {
HloInstruction* operand = hlo->mutable_operand(0);
if (operand->opcode() == HloOpcode::kCopy) {
HloInstruction* copied = operand->mutable_operand(0);
HloInstruction* earlier_convert = MakeConvertToHlo(
copied, hlo->shape().element_type(), &hlo->metadata());
HloInstruction* later_copy = MakeCopyHlo(earlier_convert, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
return absl::OkStatus();
}
absl::Status HandleElementwiseBinary(HloInstruction* hlo) override {
HloInstruction* a = hlo->mutable_operand(0);
HloInstruction* b = hlo->mutable_operand(1);
if (a->opcode() == HloOpcode::kCopy && b->opcode() == HloOpcode::kCopy) {
HloInstruction* copied_a = a->mutable_operand(0);
HloInstruction* copied_b = b->mutable_operand(0);
if (copied_a->shape() == copied_b->shape()) {
HloInstruction* earlier_elementwise;
if (hlo->opcode() == HloOpcode::kCompare) {
TF_ASSIGN_OR_RETURN(
earlier_elementwise,
MakeCompareHlo(hlo->comparison_direction(), copied_a, copied_b,
&hlo->metadata()));
} else {
TF_ASSIGN_OR_RETURN(earlier_elementwise,
MakeBinaryHlo(hlo->opcode(), copied_a, copied_b,
&hlo->metadata()));
}
HloInstruction* later_copy =
MakeCopyHlo(earlier_elementwise, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy));
}
}
return absl::OkStatus();
}
absl::Status HandleConcatenate(HloInstruction* hlo) override {
const HloInstruction* first = hlo->operand(0);
if (first->opcode() != HloOpcode::kCopy) {
return absl::OkStatus();
}
const HloInstruction* inner_op = first->operand(0);
const Layout& inner_op_layout = inner_op->shape().layout();
std::vector<HloInstruction*> new_operands;
new_operands.reserve(hlo->operand_count());
for (HloInstruction* op : hlo->mutable_operands()) {
if (op->opcode() != HloOpcode::kCopy ||
op->operand(0)->shape().layout() != inner_op_layout) {
VLOG(3) << "Mismatch between " << op->ToString()
<< " and expected op layout " << inner_op_layout.ToString();
return absl::OkStatus();
}
new_operands.push_back(op->mutable_operand(0));
}
TF_ASSIGN_OR_RETURN(
HloInstruction * new_concat,
MakeConcatHlo(new_operands, hlo->concatenate_dimension()));
*new_concat->mutable_shape()->mutable_layout() = inner_op_layout;
HloInstruction* new_copy = MakeCopyHlo(new_concat, hlo->shape());
TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, new_copy));
return absl::OkStatus();
}
};
}
absl::StatusOr<bool> MoveCopyToUsers::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return MoveCopyToUsersVisitor{}.RunOnModule(module, execution_threads);
}
} | #include "xla/service/gpu/move_copy_to_users.h"
#include <optional>
#include "absl/strings/string_view.h"
#include "xla/service/layout_assignment.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class MoveCopyToUsersTest : public HloTestBase {
public:
MoveCopyToUsersTest()
: HloTestBase(true,
true,
LayoutAssignment::InstructionCanChangeLayout) {}
void CheckMoveCopyToUsers(absl::string_view hlo,
std::optional<absl::string_view> expected) {
RunAndFilecheckHloRewrite(hlo, MoveCopyToUsers{}, expected);
}
};
TEST_F(MoveCopyToUsersTest, Pad) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = s8[1,17,9,9]{3,1,2,0} parameter(0)
copy = s8[1,17,9,9]{1,3,2,0} copy(input)
constant = s8[] constant(0)
ROOT pad = s8[1,32,9,9]{1,3,2,0} pad(copy, constant), padding=0_0x0_15x0_0x0_0
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Unary) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT pad = f32[1,17,9,9]{1,3,2,0} sqrt(copy)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Reverse) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT pad = f32[1,17,9,9]{1,3,2,0} reverse(copy), dimensions={1,2}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Convert) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT converted = f16[1,17,9,9]{1,3,2,0} convert(copy)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Slice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
ROOT slice = f32[1,4,6,6]{1,3,2,0} slice(copy), slice={[0:1],[0:4],[0:6],[0:6]}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, DynamicSlice) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
p0 = s32[] parameter(1)
p1 = s32[] parameter(2)
p2 = s32[] parameter(3)
p3 = s32[] parameter(4)
ROOT ds = f32[1,4,6,6]{1,3,2,0} dynamic-slice(copy, p0, p1, p2, p3), dynamic_slice_sizes={1,4,6,6}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, ReduceWindow) {
const char* hlo = R"(
HloModule R2Window
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY R2Window {
operand = f32[256,384]{1,0} parameter(0)
c = f32[256,384]{0,1} copy(operand)
constant = f32[] constant(1)
ROOT reduce-window = f32[256,384]{0,1} reduce-window(c, constant), window={size=2x3 pad=0_1x1_1}, to_apply=mul
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Reduce) {
const char* hlo = R"(
HloModule R2
mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
ENTRY R2 {
operand = f32[256,384,10]{2,1,0} parameter(0)
c = f32[256,384,10]{0,1,2} copy(operand)
constant = f32[] constant(1)
ROOT reduce = f32[384,10]{0,1} reduce(c, constant), dimensions={0}, to_apply=mul
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, Binary) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[1,17,9,9]{1,3,2,0} add(copy, copy2)
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, BinaryDifferentLayoutNoChange) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,0,1} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[1,17,9,9]{1,3,2,0} add(copy, copy2)
}
)";
CheckMoveCopyToUsers(hlo, std::nullopt);
}
TEST_F(MoveCopyToUsersTest, Concat) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,1,0} parameter(0)
input2 = f32[5,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[5,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[6,17,9,9]{1,3,2,0} concatenate(copy, copy2), dimensions={0}
}
)";
CheckMoveCopyToUsers(hlo, R"(
)");
}
TEST_F(MoveCopyToUsersTest, ConcatDifferentLayoutNoChange) {
const char* hlo = R"(
HloModule module
ENTRY main {
input = f32[1,17,9,9]{3,2,0,1} parameter(0)
input2 = f32[1,17,9,9]{3,2,1,0} parameter(1)
copy = f32[1,17,9,9]{1,3,2,0} copy(input)
copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2)
ROOT add = f32[2,17,9,9]{1,3,2,0} concatenate(copy, copy2), dimensions={0}
}
)";
CheckMoveCopyToUsers(hlo, std::nullopt);
}
}
} | 2,101 |
#ifndef XLA_SERVICE_GPU_MATMUL_UTILS_H_
#define XLA_SERVICE_GPU_MATMUL_UTILS_H_
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/shape.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_blas_lt.h"
#include "xla/xla_data.pb.h"
#if TENSORFLOW_USE_ROCM
#include "rocm/rocm_config.h"
#endif
namespace xla {
namespace gpu {
absl::StatusOr<std::vector<int64_t>> GetNonContractingDims(
const Shape& shape, absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> contracting_dims);
const tsl::protobuf::RepeatedField<int64_t>& BatchDimensionsForOperand(
const HloInstruction& dot, int operand_number);
absl::StatusOr<int64_t> ContractingDimensionIndex(const HloInstruction& dot,
int operand_number);
absl::StatusOr<int64_t> NonContractingDimensionIndex(const HloInstruction& dot,
int operand_number);
absl::StatusOr<Shape> GetBatchRowColumnShape(
const Shape& shape, absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> row_dims, absl::Span<const int64_t> col_dims);
absl::StatusOr<bool> CanFoldTransposeOperandIntoDot(const HloInstruction& dot,
int64_t operand_idx);
absl::StatusOr<bool> IsMatrixMultiplicationTooSmallForRewriting(
const HloInstruction& dot, int64_t threshold);
bool IsDotSupportedByClassicalEmitters(const HloInstruction& dot);
struct MatrixLayout : public se::gpu::MatrixLayout {
static absl::StatusOr<MatrixLayout> For(const Shape& shape);
static absl::StatusOr<MatrixLayout> For(const Shape& shape,
absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> row_dims,
absl::Span<const int64_t> col_dims);
static absl::StatusOr<MatrixLayout> For(const Shape& shape,
size_t lhs_num_batch_dims,
size_t lhs_num_row_dims,
size_t rhs_num_batch_dims,
size_t rhs_num_col_dims);
};
struct GemmConfig : public se::gpu::GemmConfig {
static constexpr int64_t kHopperWorkspace = 32 * 1024 * 1024;
static constexpr int64_t kDefaultWorkspace = 4 * 1024 * 1024;
static absl::StatusOr<GemmConfig> For(const HloInstruction* gemm);
static absl::StatusOr<GemmConfig> For(
const Shape& lhs_shape, absl::Span<const int64_t> lhs_batch_dims,
absl::Span<const int64_t> lhs_contracting_dims, const Shape& rhs_shape,
absl::Span<const int64_t> rhs_batch_dims,
absl::Span<const int64_t> rhs_contracting_dims, const Shape& output_shape,
double alpha_real, double alpha_imag, double beta,
PrecisionConfig::Algorithm precision_algorithm,
std::optional<int64_t> algorithm, int64_t compute_precision, bool grad_x,
bool grad_y);
static absl::StatusOr<GemmConfig> For(
const Shape& lhs_shape, absl::Span<const int64_t> lhs_batch_dims,
absl::Span<const int64_t> lhs_contracting_dims, const Shape& rhs_shape,
absl::Span<const int64_t> rhs_batch_dims,
absl::Span<const int64_t> rhs_contracting_dims, const Shape& c_shape,
const Shape* bias_shape_ptr, const Shape& output_shape, double alpha_real,
double alpha_imag, double beta,
PrecisionConfig::Algorithm precision_algorithm,
std::optional<int64_t> algorithm, int64_t compute_precision, bool grad_x,
bool grad_y);
struct DescriptorsTuple {
se::gpu::MatrixDescriptor lhs;
se::gpu::MatrixDescriptor rhs;
se::gpu::OutputMatrixDescriptor output;
bool operands_swapped;
};
absl::StatusOr<DescriptorsTuple> GetMatrixDescriptors(
se::DeviceMemoryBase lhs_buf, se::DeviceMemoryBase rhs_buf,
se::DeviceMemoryBase out_buf) const;
};
absl::Status RunGemm(
const GemmConfig& config, se::DeviceMemoryBase lhs_buffer,
se::DeviceMemoryBase rhs_buffer, se::DeviceMemoryBase output_buffer,
se::DeviceMemoryBase workspace_buffer, bool deterministic_ops,
se::Stream* stream,
std::optional<se::blas::AlgorithmType> algorithm = std::nullopt,
se::blas::ProfileResult* profile_result = nullptr);
namespace gpublas_lt {
absl::StatusOr<bool> EpilogueAddsVectorBias(
GemmBackendConfig_Epilogue epilogue);
absl::StatusOr<bool> EpilogueHasAuxiliaryOutput(
GemmBackendConfig_Epilogue epilogue);
absl::StatusOr<se::gpu::BlasLt::Epilogue> AsBlasLtEpilogue(
GemmBackendConfig_Epilogue epilogue);
}
struct TritonGemmConfig {
constexpr TritonGemmConfig() = default;
constexpr TritonGemmConfig(int block_m, int block_n, int block_k, int split_k,
int num_stages, int num_warps, int num_ctas = 1)
: block_m(block_m),
block_n(block_n),
block_k(block_k),
split_k(split_k),
num_stages(num_stages),
num_warps(num_warps),
num_ctas(num_ctas) {}
int block_m = 0;
int block_n = 0;
int block_k = 0;
int split_k = 0;
int num_stages = 0;
int num_warps = 0;
int num_ctas = 0;
private:
auto ToTuple() const {
return std::make_tuple(block_m, block_n, block_k, split_k, num_stages,
num_warps, num_ctas);
}
public:
static absl::StatusOr<TritonGemmConfig> FromProto(
const AutotuneResult::TritonGemmKey& proto);
AutotuneResult::TritonGemmKey ToProto() const;
std::string ToString() const;
bool operator==(const TritonGemmConfig& other) const {
return ToTuple() == other.ToTuple();
}
bool operator<(const TritonGemmConfig& other) const {
return ToTuple() < other.ToTuple();
}
template <typename H>
friend H AbslHashValue(H h, const TritonGemmConfig& config) {
return H::combine(std::move(h), config.ToTuple());
}
};
}
}
#endif
#include "xla/service/gpu/matmul_utils.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_blas_lt.h"
#include "xla/stream_executor/numeric_options.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<std::vector<int64_t>> GetNonContractingDims(
const Shape& shape, absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> contracting_dims) {
std::vector<int64_t> non_contracting_dims;
for (int64_t dim = 0; dim < shape.rank(); ++dim) {
bool is_batch = absl::c_count(batch_dims, dim) != 0;
bool is_contracting = absl::c_count(contracting_dims, dim) != 0;
TF_RET_CHECK(!(is_batch && is_contracting));
if (!(is_batch || is_contracting)) non_contracting_dims.push_back(dim);
}
TF_RET_CHECK(batch_dims.size() + contracting_dims.size() +
non_contracting_dims.size() ==
shape.rank());
return non_contracting_dims;
}
const tsl::protobuf::RepeatedField<int64_t>& BatchDimensionsForOperand(
const HloInstruction& dot, const int operand_number) {
const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();
if (operand_number == 0) {
return dimension_numbers.lhs_batch_dimensions();
}
return dimension_numbers.rhs_batch_dimensions();
}
absl::StatusOr<int64_t> ContractingDimensionIndex(const HloInstruction& dot,
const int operand_number) {
const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers();
if (operand_number == 0) {
TF_RET_CHECK(dimension_numbers.lhs_contracting_dimensions().size() == 1);
return dimension_numbers.lhs_contracting_dimensions(0);
}
TF_RET_CHECK(dimension_numbers.rhs_contracting_dimensions().size() == 1);
return dimension_numbers.rhs_contracting_dimensions(0);
}
absl::StatusOr<int64_t> NonContractingDimensionIndex(const HloInstruction& dot,
const int operand_number) {
TF_ASSIGN_OR_RETURN(int64_t contracting_dim,
ContractingDimensionIndex(dot, operand_number));
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> non_contracting_dims,
GetNonContractingDims(dot.operand(operand_number)->shape(),
BatchDimensionsForOperand(dot, operand_number),
{contracting_dim}));
TF_RET_CHECK(non_contracting_dims.size() == 1);
return non_contracting_dims.front();
}
absl::StatusOr<Shape> GetBatchRowColumnShape(
const Shape& shape, absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> row_dims, absl::Span<const int64_t> col_dims) {
TF_RET_CHECK(shape.has_layout());
std::vector<int64_t> minor_to_major;
for (size_t i = 0; i < shape.rank();) {
auto check_physically_sequential =
[&](absl::Span<const int64_t> dims) -> absl::Status {
for (auto it = dims.rbegin(); it != dims.rend(); ++it) {
if (*it != shape.layout().minor_to_major()[i++])
return InvalidArgument("dims not physically_sequential");
}
return absl::OkStatus();
};
int64_t dim = shape.layout().minor_to_major()[i];
if (!row_dims.empty() && dim == row_dims.back()) {
minor_to_major.push_back(1);
TF_RETURN_IF_ERROR(check_physically_sequential(row_dims));
} else if (!col_dims.empty() && dim == col_dims.back()) {
minor_to_major.push_back(2);
TF_RETURN_IF_ERROR(check_physically_sequential(col_dims));
} else if (!batch_dims.empty() && (dim == batch_dims.back())) {
minor_to_major.push_back(0);
TF_RETURN_IF_ERROR(check_physically_sequential(batch_dims));
} else {
return InvalidArgument("dims not physically sequential");
}
}
if (col_dims.empty()) minor_to_major.push_back(2);
if (row_dims.empty()) minor_to_major.push_back(1);
if (batch_dims.empty()) minor_to_major.push_back(0);
auto dim_size = [&](absl::Span<const int64_t> dims) {
return absl::c_accumulate(dims, 1, [&](int64_t size, int64_t dim) {
return size * shape.dimensions(dim);
});
};
return ShapeUtil::MakeShapeWithDenseLayout(
shape.element_type(),
{dim_size(batch_dims), dim_size(row_dims), dim_size(col_dims)},
minor_to_major);
}
absl::StatusOr<MatrixLayout> MatrixLayout::For(const Shape& shape) {
TF_RET_CHECK(shape.rank() == 3);
TF_RET_CHECK(shape.has_layout());
int64_t batch_size = shape.dimensions(0);
int64_t num_rows = shape.dimensions(1);
int64_t num_cols = shape.dimensions(2);
Order order{Order::kRowMajor};
int64_t leading_dim_stride = num_cols;
int64_t batch_stride = num_rows * num_cols;
absl::Span<const int64_t> minor_to_major = shape.layout().minor_to_major();
switch (64 * minor_to_major[2] + 8 * minor_to_major[1] + minor_to_major[0]) {
case 012:
break;
case 021:
order = Order::kColumnMajor;
leading_dim_stride = num_rows;
break;
case 0102:
leading_dim_stride = batch_size * num_cols;
batch_stride = num_cols;
break;
case 0201:
order = Order::kColumnMajor;
leading_dim_stride = batch_size * num_rows;
batch_stride = num_rows;
break;
default:
return Unimplemented("batch in most minor dimension");
}
if (batch_size == 1) {
batch_stride = 0;
}
return MatrixLayout{se::gpu::MatrixLayout{shape.element_type(), num_rows,
num_cols, order, batch_size,
leading_dim_stride, batch_stride}};
}
absl::StatusOr<MatrixLayout> MatrixLayout::For(
const Shape& shape, absl::Span<const int64_t> batch_dims,
absl::Span<const int64_t> row_dims, absl::Span<const int64_t> col_dims) {
TF_ASSIGN_OR_RETURN(
Shape batch_row_col_shape,
GetBatchRowColumnShape(shape, batch_dims, row_dims, col_dims));
return MatrixLayout::For(batch_row_col_shape);
}
absl::StatusOr<MatrixLayout> MatrixLayout::For(
const Shape& shape, size_t lhs_num_batch_dims, size_t lhs_num_row_dims,
size_t rhs_num_batch_dims, size_t rhs_num_col_dims) {
size_t num_batch_dims = std::max(lhs_num_batch_dims, rhs_num_batch_dims);
TF_RET_CHECK(shape.rank() ==
num_batch_dims + lhs_num_row_dims + rhs_num_col_dims);
std::vector<int64_t> dims(shape.rank());
absl::c_iota(dims, 0);
auto batch_dims = absl::Span<const int64_t>(dims).first(num_batch_dims);
auto row_dims =
absl::Span<const int64_t>(dims).subspan(num_batch_dims, lhs_num_row_dims);
auto col_dims = absl::Span<const int64_t>(dims).last(rhs_num_col_dims);
return MatrixLayout::For(shape, batch_dims, row_dims, col_dims);
}
namespace {
std::vector<int64_t> NormalizedRelativeOrder(absl::Span<const int64_t> dims) {
std::vector<int64_t> indices(dims.size());
absl::c_iota(indices, 0);
absl::c_sort(indices,
[&](int64_t a, int64_t b) { return dims[a] < dims[b]; });
return indices;
}
}
absl::StatusOr<bool> CanFoldTransposeOperandIntoDot(const HloInstruction& dot,
int64_t operand_idx) {
if (Cast<HloDotInstruction>(&dot)->sparse_operands()) {
return false;
}
TF_RET_CHECK(dot.opcode() == HloOpcode::kDot);
TF_RET_CHECK(dot.operand_count() > operand_idx);
const HloInstruction& transpose = *dot.operand(operand_idx);
TF_RET_CHECK(transpose.opcode() == HloOpcode::kTranspose);
const DotDimensionNumbers& dot_dims = dot.dot_dimension_numbers();
auto transposed = [&](const auto& dims) {
std::vector<int64_t> transposed_dims;
transposed_dims.reserve(dims.size());
for (int64_t dim : dims) {
transposed_dims.push_back(transpose.dimensions(dim));
}
return transposed_dims;
};
auto batch_dims = (operand_idx == 0) ? dot_dims.lhs_batch_dimensions()
: dot_dims.rhs_batch_dimensions();
auto contracting_dims = (operand_idx == 0)
? dot_dims.lhs_contracting_dimensions()
: dot_dims.rhs_contracting_dimensions();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> non_contracting_dims,
GetNonContractingDims(transpose.shape(), batch_dims, contracting_dims));
auto transposed_non_contracting_dims = transposed(non_contracting_dims);
if (NormalizedRelativeOrder(non_contracting_dims) !=
NormalizedRelativeOrder(transposed_non_contracting_dims)) {
return false;
}
return MatrixLayout::For(transpose.operand(0)->shape(),
transposed(batch_dims), transposed(contracting_dims),
transposed_non_contracting_dims)
.ok();
}
absl::StatusOr<GemmConfig> GemmConfig::For(
const Shape& lhs_shape, absl::Span<const int64_t> lhs_batch_dims,
absl::Span<const int64_t> lhs_contracting_dims, const Shape& rhs_shape,
absl::Span<const int64_t> rhs_batch_dims,
absl::Span<const int64_t> rhs_contracting_dims, const Shape& output_shape,
double alpha_real, double alpha_imag, double beta,
PrecisionConfig::Algorithm precision_algorithm,
std::optional<int64_t> algorithm, int64_t compute_precision, bool grad_x,
bool grad_y) {
return GemmConfig::For(lhs_shape, lhs_batch_dims, lhs_contracting_dims,
rhs_shape, rhs_batch_dims, rhs_contracting_dims,
output_shape, nullptr,
output_shape, alpha_real, alpha_imag, beta,
precision_algorithm, algorithm, compute_precision,
grad_x, grad_y);
}
absl::StatusOr<GemmConfig> GemmConfig::For(
const Shape& lhs_shape, absl::Span<const int64_t> lhs_batch_dims,
absl::Span<const int64_t> lhs_contracting_dims, const Shape& rhs_shape,
absl::Span<const int64_t> rhs_batch_dims,
absl::Span<const int64_t> rhs_contracting_dims, const Shape& c_shape,
const Shape* bias_shape_ptr, const Shape& output_shape, double alpha_real,
double alpha_imag, double beta,
PrecisionConfig::Algorithm precision_algorithm,
std::optional<int64_t> algorithm, int64_t compute_precision, bool grad_x,
bool grad_y) {
absl::Span<const int64_t> lhs_col_dims = lhs_contracting_dims;
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> lhs_row_dims,
GetNonContractingDims(lhs_shape, lhs_batch_dims, lhs_col_dims));
TF_ASSIGN_OR_RETURN(
MatrixLayout lhs_layout,
MatrixLayout::For(lhs_shape, lhs_batch_dims, lhs_row_dims, lhs_col_dims));
absl::Span<const int64_t> rhs_row_dims = rhs_contracting_dims;
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> rhs_col_dims,
GetNonContractingDims(rhs_shape, rhs_batch_dims, rhs_row_dims));
TF_ASSIGN_OR_RETURN(
MatrixLayout rhs_layout,
MatrixLayout::For(rhs_shape, rhs_batch_dims, rhs_row_dims, rhs_col_dims));
int64_t num_batch_dims =
std::max(lhs_batch_dims.size(), rhs_batch_dims.size());
TF_RET_CHECK(output_shape.rank() ==
num_batch_dims + lhs_row_dims.size() + rhs_col_dims.size());
std::vector<int64_t> output_dims(output_shape.rank());
absl::c_iota(output_dims, 0);
auto output_batch_dims =
absl::Span<const int64_t>(output_dims).first(num_batch_dims);
auto output_row_dims = absl::Span<const int64_t>(output_dims)
.subspan(num_batch_dims, lhs_row_dims.size());
auto output_col_dims =
absl::Span<const int64_t>(output_dims).last(rhs_col_dims.size());
TF_ASSIGN_OR_RETURN(MatrixLayout output_layout,
MatrixLayout::For(output_shape, output_batch_dims,
output_row_dims, output_col_dims));
Shape c_matrix_shape = c_shape;
if (primitive_util::IsF8Type(lhs_shape.element_type()) &&
primitive_util::IsF8Type(output_shape.element_type()) && beta == 0.0) {
#if GOOGLE_CUDA
c_matrix_shape.set_element_type(
bias_shape_ptr != nullptr ? bias_shape_ptr->element_type() : BF16);
#endif
}
TF_ASSIGN_OR_RETURN(MatrixLayout c_layout,
MatrixLayout::For(c_matrix_shape, output_batch_dims,
output_row_dims, output_col_dims));
if (lhs_shape.element_type() != F8E4M3FN &&
lhs_shape.element_type() != F8E5M2) {
TF_RET_CHECK(lhs_layout.num_cols == rhs_layout.num_rows);
TF_RET_CHECK(output_layout.num_rows == lhs_layout.num_rows);
TF_RET_CHECK(output_layout.num_cols == rhs_layout.num_cols);
}
TF_RET_CHECK(c_layout.num_rows == output_layout.num_rows);
TF_RET_CHECK(c_layout.num_cols == output_layout.num_cols);
TF_RET_CHECK((lhs_layout.batch_size == output_layout.batch_size) ||
(lhs_layout.batch_size == 1));
TF_RET_CHECK((rhs_layout.batch_size == output_layout.batch_size) ||
(rhs_layout.batch_size == 1));
switch (output_shape.element_type()) {
case F8E4M3FN:
case F8E5M2:
case F8E4M3FNUZ:
case F8E5M2FNUZ:
case F16:
case BF16:
case F32:
case F64:
TF_RET_CHECK(alpha_imag == 0);
break;
case C64:
case C128:
break;
case S32:
TF_RET_CHECK(alpha_imag == 0);
if (lhs_layout.dtype != PrimitiveType::S8 ||
rhs_layout.dtype != PrimitiveType::S8) {
return Internal(
"For int32 gemm output only int8 input is supported, got input: "
"%s, %s",
primitive_util::LowercasePrimitiveTypeName(lhs_layout.dtype),
primitive_util::LowercasePrimitiveTypeName(rhs_layout.dtype));
}
break;
default:
return Internal("Unexpected GEMM datatype: %s",
primitive_util::LowercasePrimitiveTypeName(
output_shape.element_type()));
}
return GemmConfig{lhs_layout,
rhs_layout,
c_layout,
output_layout,
{alpha_real, alpha_imag},
beta,
compute_precision,
precision_algorithm,
algorithm,
grad_x,
grad_y};
}
namespace {
bool IsTf32Allowed(PrecisionConfig::Algorithm algorithm,
int64_t compute_precision) {
if (algorithm == PrecisionConfig::ALG_UNSET) {
return compute_precision <= 1;
}
return algorithm_util::HasTf32InputType(algorithm);
}
}
absl::StatusOr<GemmConfig> GemmConfig::For(
const HloInstruction* gemm) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
gemm->backend_config<GpuBackendConfig>());
const GemmBackendConfig& config = gpu_config.gemm_backend_config();
std::optional<int64_t> algorithm;
if (config.algorithm_case() != GemmBackendConfig::ALGORITHM_NOT_SET) {
algorithm = config.selected_algorithm();
} else {
algorithm = se::blas::kDefaultAlgorithm;
}
const Shape& lhs_shape = gemm->operand(0)->shape();
const Shape& rhs_shape = gemm->operand(1)->shape();
const DotDimensionNumbers& dot_dims = config.dot_dimension_numbers();
const Shape& output_shape =
gemm->shape().IsTuple() ? gemm->shape().tuple_shapes(0) : gemm->shape();
bool has_matrix_bias = config.beta() != 0.;
Shape c_shape = has_matrix_bias ? gemm->operand(2)->shape() : output_shape;
std::optional<Shape> vector_bias_shape;
TF_ASSIGN_OR_RETURN(
bool has_vector_bias,
xla::gpu::gpublas_lt::EpilogueAddsVectorBias(config.epilogue()));
if (has_vector_bias) {
int vector_bias_index = has_matrix_bias ? 3 : 2;
if (primitive_util::IsF8Type(lhs_shape.element_type())) {
vector_bias_index += 4;
}
vector_bias_shape = gemm->operand(vector_bias_index)->shape();
}
auto attributes = gemm->frontend_attributes().map();
bool grad_x = (attributes["grad_x"] == "true");
bool grad_y = (attributes["grad_y"] == "true");
int64_t precision = se::blas::kDefaultComputePrecision;
for (auto operand_precision : config.precision_config().operand_precision()) {
precision = std::max(precision, static_cast<int64_t>(operand_precision));
}
const PrecisionConfig::Algorithm precision_algorithm =
config.precision_config().algorithm();
return GemmConfig::For(
lhs_shape, dot_dims.lhs_batch_dimensions(),
dot_dims.lhs_contracting_dimensions(), rhs_shape,
dot_dims.rhs_batch_dimensions(), dot_dims.rhs_contracting_dimensions(),
c_shape,
vector_bias_shape ? &vector_bias_shape.value() : nullptr, output_shape,
config.alpha_real(), config.alpha_imag(), config.beta(),
precision_algorithm, algorithm, precision, grad_x, grad_y);
}
absl::StatusOr<GemmConfig::DescriptorsTuple> GemmConfig::GetMatrixDescriptors(
se::DeviceMemoryBase lhs_buf, se::DeviceMemoryBase rhs_buf,
se::DeviceMemoryBase out_buf) const {
auto create_matrix_desc = [](const se::gpu::MatrixLayout& layout,
se::DeviceMemoryBase data)
-> absl::StatusOr<se::gpu::MatrixDescriptor> {
TF_ASSIGN_OR_RETURN(se::blas::DataType type,
se::gpu::AsBlasDataType(layout.dtype));
return se::gpu::MatrixDescriptor{
data, layout.leading_dim_stride, layout.batch_stride, type,
(layout.order == se::gpu::MatrixLayout::Order::kColumnMajor
? se::blas::Transpose::kNoTranspose
: se::blas::Transpose::kTranspose)};
};
se::gpu::MatrixLayout lhs = lhs_layout, rhs = rhs_layout, out = output_layout;
bool must_swap_operands = MakeOutputColumnMajor(lhs, rhs, out);
if (must_swap_operands) {
std::swap(lhs_buf, rhs_buf);
}
TF_ASSIGN_OR_RETURN(se::gpu::OutputMatrixDescriptor out_desc,
create_matrix_desc(out, out_buf));
out_desc.batch_size = out.batch_size;
out_desc.m = out.num_rows;
out_desc.n = out.num_cols;
out_desc.k = lhs.num_cols;
TF_ASSIGN_OR_RETURN(out_desc.compute_type,
se::gpu::GetBlasComputationType(
PrecisionConfig::ALG_UNSET, lhs.dtype, out.dtype,
se::blas::kDefaultComputePrecision));
TF_ASSIGN_OR_RETURN(se::gpu::MatrixDescriptor lhs_desc,
create_matrix_desc(lhs, lhs_buf));
TF_ASSIGN_OR_RETURN(se::gpu::MatrixDescriptor rhs_desc,
create_matrix_desc(rhs, rhs_buf));
return DescriptorsTuple{lhs_desc, rhs_desc, out_desc, must_swap_operands};
}
namespace {
template <typename Scale, typename Input, typename Output>
absl::Status DoGemmWithAlgorithm(const se::gpu::MatrixDescriptor& lhs,
const se::gpu::MatrixDescriptor& rhs,
const se::gpu::OutputMatrixDescriptor& output,
se::DeviceMemoryBase workspace, Scale alpha,
Scale beta, se::Stream* stream,
PrecisionConfig::Algorithm precision_algorithm,
se::blas::AlgorithmType algorithm,
se::blas::ComputePrecision compute_precision,
const se::NumericOptions& numeric_options,
se::blas::ProfileResult* profile_result,
se::blas:: | #include "xla/service/gpu/matmul_utils.h"
#include <cstdint>
#include <memory>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::tsl::testing::IsOkAndHolds;
TEST(GetNonContractingDimsTest, Valid) {
Shape shape = ParseShape("f32[1,2,3,4,5,6]").value();
EXPECT_THAT(GetNonContractingDims(shape, {4},
{1, 5}),
IsOkAndHolds(ElementsAre(0, 2, 3)));
}
using CanFoldTransposeOperandIntoDotTest = HloTestBase;
TEST_F(CanFoldTransposeOperandIntoDotTest, ArgTransposeFoldGemm) {
const char* hlo_text = R"(
HloModule ArgTransposeFoldGemm
ENTRY AddDotsFunc {
x = f32[3,2] parameter(0)
y = f32[3,4] parameter(1)
x_transposed = f32[2,3] transpose(x), dimensions={1, 0}
ROOT dot_a = f32[2,4] dot(x_transposed, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));
}
TEST_F(CanFoldTransposeOperandIntoDotTest, BatchedArgRowColTransposeFoldGemm) {
const char* hlo_text = R"(
HloModule BatchedArgRowColTransposeFoldGemm
ENTRY AddDotsFunc {
x = f32[5,3,2] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={0, 2, 1}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));
}
TEST_F(CanFoldTransposeOperandIntoDotTest, BatchRowTransposeFoldGemm) {
const char* hlo_text = R"(
HloModule BatchRowTransposeFoldCheck
ENTRY AddDotsFunc {
x = f32[2,5,3] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={1, 0, 2}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(true));
}
TEST_F(CanFoldTransposeOperandIntoDotTest,
BatchFromMinorDimTransposeDoesntFold) {
const char* hlo_text = R"(
HloModule BatchFromMinorDimTransposeDoesntFold
ENTRY AddDotsFunc {
x = f32[3,2,5] parameter(0)
y = f32[5,3,4] parameter(1)
x_transposed = f32[5,2,3] transpose(x), dimensions={2, 1, 0}
ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 0), IsOkAndHolds(false));
}
TEST_F(CanFoldTransposeOperandIntoDotTest,
TransposedNonContractingDimsDontFold) {
const char* hlo_text = R"(
HloModule TransposedNonContractingDimsDontFold
ENTRY AddDotsFunc {
x = f32[5,3,4]{2,1,0} parameter(1)
y = f32[5,2,6,3]{3,1,2,0} parameter(0)
y_transposed = f32[5,6,2,3]{3,2,1,0} transpose(y), dimensions={0, 2, 1, 3}
ROOT dot_a = f32[5,4,6,2]{3,2,1,0} dot(x, y_transposed), lhs_contracting_dims={1}, rhs_contracting_dims={3}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(CanFoldTransposeOperandIntoDot(*dot, 1), IsOkAndHolds(false));
}
struct GetBatchRowColumnShapeTestParams {
absl::string_view shape;
std::vector<int64_t> batch_dims;
std::vector<int64_t> row_dims;
std::vector<int64_t> col_dims;
absl::string_view expected_shape;
};
using GetBatchRowColumnShapeTest =
::testing::TestWithParam<GetBatchRowColumnShapeTestParams>;
TEST_P(GetBatchRowColumnShapeTest, ValidShape) {
const GetBatchRowColumnShapeTestParams& params = GetParam();
Shape shape = ParseShape(params.shape).value();
EXPECT_THAT(GetBatchRowColumnShape(shape, params.batch_dims, params.row_dims,
params.col_dims),
IsOkAndHolds(ParseShape(params.expected_shape).value()));
}
INSTANTIATE_TEST_SUITE_P(
GetBatchRowColumnShapeTests, GetBatchRowColumnShapeTest,
::testing::ValuesIn<GetBatchRowColumnShapeTestParams>({
{"f32[3,4]{1,0}", {}, {0}, {1},
"f32[1,3,4]{2,1,0}"},
{"f32[3,4]{0,1}", {}, {0}, {1}, "f32[1,3,4]{1,2,0}"},
{"f32[3,4]{1,0}", {}, {1}, {0}, "f32[1,4,3]{1,2,0}"},
{"f32[3,4,5]{2,1,0}", {0}, {1}, {2}, "f32[3,4,5]{2,1,0}"},
{"f32[3,4,5]{2,1,0}", {2}, {1}, {0}, "f32[5,4,3]{0,1,2}"},
{"f32[3,4,5,6,7,8]{5,2,4,1,3,0}",
{0, 3},
{1, 4},
{2, 5},
"f32[18,28,40]{2,1,0}"},
}));
TEST(GetBatchRowColumnShapeTest, BatchRowsColsInterleaved) {
Shape shape = ParseShape("f32[3,4,5,6,7,8]{5,4,3,2,1,0}").value();
auto result =
GetBatchRowColumnShape(shape, {0, 3},
{1, 4}, {2, 5});
EXPECT_FALSE(result.ok());
}
TEST(GetBatchRowColumnShapeTest, WrongPhysicalOrder) {
Shape shape = ParseShape("f32[3,4,5,6]{3,2,0,1}").value();
auto result = GetBatchRowColumnShape(shape, {0, 1},
{2}, {3});
EXPECT_FALSE(result.ok());
}
using Order = MatrixLayout::Order;
struct GetMatrixLayoutTestParams {
absl::string_view shape;
int64_t batch_size;
int64_t num_rows;
int64_t num_cols;
Order order;
int64_t leading_dim_stride;
int64_t batch_stride;
};
using GetMatrixLayoutTest = ::testing::TestWithParam<GetMatrixLayoutTestParams>;
TEST_P(GetMatrixLayoutTest, ValidShape) {
const GetMatrixLayoutTestParams& params = GetParam();
Shape shape = ParseShape(params.shape).value();
MatrixLayout result = MatrixLayout::For(shape).value();
EXPECT_EQ(result.batch_size, params.batch_size);
EXPECT_EQ(result.num_rows, params.num_rows);
EXPECT_EQ(result.num_cols, params.num_cols);
EXPECT_EQ(result.order, params.order);
EXPECT_EQ(result.leading_dim_stride, params.leading_dim_stride);
EXPECT_EQ(result.batch_stride, params.batch_stride);
}
INSTANTIATE_TEST_SUITE_P(
GetMatrixLayoutTests, GetMatrixLayoutTest,
::testing::ValuesIn<GetMatrixLayoutTestParams>({
{"f32[3,4,5]{2,1,0}", 3, 4, 5,
Order::kRowMajor, 5,
20},
{"f32[3,4,5]{1,2,0}", 3, 4, 5, Order::kColumnMajor, 4, 20},
{"f32[3,4,5]{2,0,1}", 3, 4, 5, Order::kRowMajor, 15, 5},
{"f32[3,4,5]{1,0,2}", 3, 4, 5, Order::kColumnMajor, 12, 4},
}));
TEST(GetMatrixLayoutTest, BatchInMostMinorPhysicalDimension) {
Shape shape = ParseShape("f32[3,4,5]{0,2,1}").value();
EXPECT_FALSE(MatrixLayout::For(shape).ok());
}
using GetMatrixSizeRewriteThresholdTest = HloTestBase;
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulTooSmallForRewrite) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[100,30,3] parameter(0)
y = f32[100,3,3] parameter(1)
ROOT dot = f32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),
IsOkAndHolds(true));
}
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulSupportedByClassicalEmitters) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[100,30,3] parameter(0)
y = f32[100,3,3] parameter(1)
ROOT dot = f32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_TRUE(IsDotSupportedByClassicalEmitters(*dot));
}
TEST_F(GetMatrixSizeRewriteThresholdTest,
MatMulUnsupportedByClassicalEmitters) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = s8[100,30,3] parameter(0)
y = s8[100,3,3] parameter(1)
ROOT dot = s32[100,30,3] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_FALSE(IsDotSupportedByClassicalEmitters(*dot));
}
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulLeftLargeEnoughForRewrite) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[50,2] parameter(0)
y = f32[2,2] parameter(1)
ROOT dot = f32[50,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),
IsOkAndHolds(false));
}
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulRightLargeEnoughForRewrite) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[2,2] parameter(0)
y = f32[2,50] parameter(1)
ROOT dot = f32[2,50] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),
IsOkAndHolds(false));
}
TEST_F(GetMatrixSizeRewriteThresholdTest, MatMulTogetherLargeEnoughForRewrite) {
const char* hlo_text = R"(
HloModule DotFuncModule
ENTRY DotFunc {
x = f32[4,16] parameter(0)
y = f32[16,4] parameter(1)
ROOT dot = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_text));
auto dot = module->entry_computation()->root_instruction();
EXPECT_THAT(IsMatrixMultiplicationTooSmallForRewriting(*dot, 100),
IsOkAndHolds(false));
}
}
}
} | 2,102 |
#ifndef XLA_SERVICE_GPU_GPU_SPMD_PIPELINE_H_
#define XLA_SERVICE_GPU_GPU_SPMD_PIPELINE_H_
#include <optional>
#include "absl/functional/function_ref.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_pass_pipeline.h"
namespace xla {
namespace gpu {
void AddSPMDPasses(
const HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
const se::GpuComputeCapability& compute_capability,
HloPassPipeline& spmd_pipeline,
std::optional<const absl::FunctionRef<void(HloPassPipeline&)>>
auto_sharding_func = std::nullopt);
}
}
#endif
#include "xla/service/gpu/gpu_spmd_pipeline.h"
#include <cstdint>
#include <optional>
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/conditional_simplifier.h"
#include "xla/service/gather_expander.h"
#include "xla/service/gpu/gpu_algebraic_simplifier.h"
#include "xla/service/hlo_constant_folding.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_fix.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/reshape_mover.h"
#include "xla/service/scatter_expander.h"
#include "xla/service/sharding_propagation.h"
#include "xla/service/sort_simplifier.h"
#include "xla/service/spmd/collective_permute_motion.h"
#include "xla/service/spmd/stateful_rng_spmd_partitioner.h"
#include "xla/service/tuple_simplifier.h"
#include "xla/service/while_loop_constant_sinking.h"
#include "xla/service/while_loop_simplifier.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
void AddSPMDPasses(
const HloModule* hlo_module,
const AlgebraicSimplifierOptions& layout_insensitive_algsimp_opts,
const se::GpuComputeCapability& compute_capability,
HloPassPipeline& spmd_pipeline,
std::optional<const absl::FunctionRef<void(HloPassPipeline&)>>
auto_sharding_func) {
const int64_t num_partitions = hlo_module->config().num_partitions();
CHECK_GE(num_partitions, 1);
HloPassPipeline& spmd_simplify =
spmd_pipeline.AddPass<HloPassFix<HloPassPipeline>>("spmd-simplify");
spmd_simplify.AddPass<GpuAlgebraicSimplifier>(layout_insensitive_algsimp_opts,
compute_capability);
spmd_simplify.AddPass<SortSimplifier>();
spmd_simplify.AddPass<TupleSimplifier>();
spmd_simplify.AddPass<ScatterExpander>(
ScatterExpander::kEliminateSimpleScatters);
spmd_simplify.AddPass<GatherExpander>(
GatherExpander::kEliminateSimpleGathers);
spmd_simplify.AddPass<WhileLoopConstantSinking>();
spmd_simplify.AddPass<WhileLoopSimplifier>();
ReshapeMoverOptions reshape_mover_options;
reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true;
spmd_simplify.AddPass<ReshapeMover>(reshape_mover_options);
spmd_simplify.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(
layout_insensitive_algsimp_opts, compute_capability);
spmd_simplify.AddPass<HloConstantFolding>();
spmd_simplify.AddPass<ConditionalSimplifier>();
spmd_pipeline.AddPass<HloConstantSplitter>();
spmd_simplify.AddPass<HloDCE>();
if (auto_sharding_func.has_value()) {
(*auto_sharding_func)(spmd_pipeline);
}
spmd_pipeline.AddPass<ShardingPropagation>(
true, false,
hlo_module->config().allow_spmd_sharding_propagation_to_output());
spmd_pipeline.AddPass<spmd::StatefulRngSpmdPartitioner>(
num_partitions, hlo_module->config().replica_count(),
hlo_module->config()
.debug_options()
.xla_gpu_threshold_for_windowed_einsum_mib(),
hlo_module->config()
.debug_options()
.xla_gpu_multi_streamed_windowed_einsum(),
true,
true);
spmd_pipeline.AddPass<CollectivePermuteMotion>();
}
}
} | #include "xla/service/gpu/gpu_spmd_pipeline.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "xla/client/executable_build_options.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/algebraic_simplifier.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class GpuSpmdPartitioningTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
public:
absl::StatusOr<std::unique_ptr<HloModule>> PartitionComputation(
const char* hlo_module, int64_t num_devices) {
HloModuleConfig config = GetModuleConfigForTest(
1, num_devices);
config.set_num_partitions(num_devices);
TF_ASSIGN_OR_RETURN(auto module,
ParseAndReturnVerifiedModule(hlo_module, config));
EXPECT_FALSE(config.debug_options().xla_use_shardonnay())
<< "Shardonnay not supported yet";
HloPassPipeline spmd_pipeline("spmd-partitioner");
se::CudaComputeCapability ampere(8, 0);
AlgebraicSimplifierOptions alg_simplifier_options;
AddSPMDPasses(module.get(), alg_simplifier_options, ampere, spmd_pipeline,
std::nullopt);
TF_RETURN_IF_ERROR(spmd_pipeline.Run(module.get()).status());
XLA_VLOG_LINES(10, module->ToString());
return module;
}
protected:
bool UseShardonnay() const { return GetParam(); }
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_use_shardonnay(UseShardonnay());
return debug_options;
}
};
TEST_P(GpuSpmdPartitioningTest, DotWithEntryComputationLayout) {
if (UseShardonnay()) {
GTEST_SKIP() << "Shardonnay not supported yet";
}
const char* const kHloModule = R"(
HloModule module,
entry_computation_layout={(f32[8,16]{0,1}, f32[16,24]{1,0})
->f32[8,24]{1,0}}
ENTRY main {
%p0 = f32[8,16] parameter(0), sharding={devices=[1,8]<=[8]}
%p1 = f32[16,24] parameter(1), sharding={devices=[8,1]<=[8]}
ROOT %dot = f32[8,24] dot(%p0, %p1), lhs_contracting_dims={1},
rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
PartitionComputation(kHloModule, 8));
EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(0),
ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 2}, {0, 1}));
EXPECT_EQ(module->config().entry_computation_layout().parameter_shape(1),
ShapeUtil::MakeShapeWithDenseLayout(F32, {2, 24}, {1, 0}));
EXPECT_EQ(module->config().entry_computation_layout().result_shape(),
ShapeUtil::MakeShapeWithDenseLayout(F32, {8, 24}, {1, 0}));
}
std::string TestParamToString(
const ::testing::TestParamInfo<bool>& param_info) {
return param_info.param ? "Shardonnay" : "GSPMD";
}
INSTANTIATE_TEST_SUITE_P(All, GpuSpmdPartitioningTest,
::testing::Values(true, false), TestParamToString);
}
}
} | 2,103 |
#ifndef XLA_SERVICE_GPU_CUSTOM_KERNEL_FUSION_REWRITER_H_
#define XLA_SERVICE_GPU_CUSTOM_KERNEL_FUSION_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla::gpu {
class CustomKernelFusionRewriter : public HloModulePass {
public:
explicit CustomKernelFusionRewriter(
const se::DeviceDescription* device,
const CustomKernelFusionPatternRegistry* patterns =
CustomKernelFusionPatternRegistry::Default());
absl::string_view name() const override {
return "custom-kernel-fusion-rewriter";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const se::DeviceDescription* device_;
const CustomKernelFusionPatternRegistry* patterns_;
};
}
#endif
#include "xla/service/gpu/custom_kernel_fusion_rewriter.h"
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
CustomKernelFusionRewriter::CustomKernelFusionRewriter(
const se::DeviceDescription* device,
const CustomKernelFusionPatternRegistry* patterns)
: device_(device), patterns_(patterns) {}
static std::optional<absl::flat_hash_set<HloInstruction*>>
GetPatternReplacements(const CustomKernelFusionPattern::Match& match) {
absl::flat_hash_set<HloInstruction*> requires_replacement;
absl::flat_hash_set<HloInstruction*> instructions_set(
match.instructions().begin(), match.instructions().end());
for (HloInstruction* instr : match.instructions()) {
for (HloInstruction* user : instr->users()) {
if (instr == match.root() || instructions_set.contains(user)) continue;
if (match.HasReplacement(instr)) {
requires_replacement.insert(instr);
continue;
}
VLOG(3) << "Custom kernel fusion intermediate result " << instr->name()
<< " has users outside of a matched pattern: " << user->name();
return std::nullopt;
}
}
return requires_replacement;
}
static absl::InlinedVector<HloInstruction*, 4> GetPatternCaptures(
const CustomKernelFusionPattern::Match& match) {
absl::InlinedVector<HloInstruction*, 4> captures;
absl::flat_hash_set<HloInstruction*> instructions_set(
match.instructions().begin(), match.instructions().end());
for (HloInstruction* instr : match.instructions()) {
for (HloInstruction* operand : instr->operands()) {
if (!instructions_set.contains(operand) &&
absl::c_find(captures, operand) == captures.end()) {
captures.emplace_back(operand);
}
}
}
return captures;
}
static absl::StatusOr<HloComputation*> CreateFusionBody(
HloModule* module, const CustomKernelFusionPattern::Match& match,
absl::Span<HloInstruction* const> captures) {
HloComputation::Builder builder(match.config().name());
absl::flat_hash_map<const HloInstruction*, HloInstruction*> instr_mapping;
auto mapped_operands = [&](HloInstruction* instr) {
absl::InlinedVector<HloInstruction*, 4> operands;
for (HloInstruction* operand : instr->operands()) {
operands.push_back(instr_mapping.at(operand));
}
return operands;
};
for (const HloInstruction* capture : captures) {
int64_t index = instr_mapping.size();
instr_mapping[capture] =
builder.AddInstruction(HloInstruction::CreateParameter(
index, capture->shape(), absl::StrCat("p", index)));
}
for (HloInstruction* instr : match.instructions()) {
instr_mapping[instr] = builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr)));
}
HloInstruction* root = builder.last_added_instruction();
if (match.workspace_size_bytes() > 0) {
auto workspace_shape =
ShapeUtil::MakeShape(PrimitiveType::U8, {match.workspace_size_bytes()});
HloInstruction* workspace =
builder.AddInstruction(HloInstruction::CreateCustomCall(
workspace_shape, {}, CustomKernelFusionPattern::kWorkspace, "",
CustomCallApiVersion::API_VERSION_TYPED_FFI));
builder.AddInstruction(HloInstruction::CreateTuple({root, workspace}));
}
return module->AddComputationAndUnifyNamesAndIds(builder.Build(), false);
}
static absl::StatusOr<HloInstruction*> CreateFusionInstruction(
HloModule* module, const CustomKernelFusionPattern::Match& match,
absl::Span<HloInstruction* const> captures, HloComputation* body) {
HloInstruction* root = match.root();
HloComputation* parent = root->parent();
HloInstruction* fusion = parent->AddInstruction(HloInstruction::CreateFusion(
body->root_instruction()->shape(), HloInstruction::FusionKind::kCustom,
captures, body));
module->SetAndUniquifyInstrName(fusion, match.config().name());
GpuBackendConfig gpu_config;
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind("__custom_fusion");
*backend_config.mutable_custom_fusion_config() = match.config();
TF_RETURN_IF_ERROR(fusion->set_backend_config(std::move(gpu_config)));
if (match.workspace_size_bytes() == 0) return fusion;
return parent->AddInstruction(
HloInstruction::CreateGetTupleElement(fusion, 0));
}
absl::StatusOr<bool> CustomKernelFusionRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<CustomKernelFusionPattern::Match> matches;
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instr : computation->instructions()) {
auto matched = patterns_->Match(*device_, instr);
matches.insert(matches.end(), matched.begin(), matched.end());
}
}
if (matches.empty()) return false;
for (const CustomKernelFusionPattern::Match& match : matches) {
VLOG(2) << "Matched custom kernel fusion " << match.config().name()
<< "; root instruction: " << match.instructions().back()->name();
auto replacememts = GetPatternReplacements(match);
if (!replacememts.has_value()) continue;
auto captures = GetPatternCaptures(match);
TF_ASSIGN_OR_RETURN(HloComputation * fusion_body,
CreateFusionBody(module, match, captures));
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion,
CreateFusionInstruction(module, match, captures, fusion_body));
VLOG(2) << "Added a fusion instruction: " << fusion->name()
<< " for custom kernel fusion " << match.config().name()
<< " (instruction count = " << match.instructions().size() << ")";
for (HloInstruction* instr : *replacememts) {
VLOG(2) << "Replace matched instruction: " << instr->name()
<< " with a pattern replacement";
TF_ASSIGN_OR_RETURN(
HloInstruction * replacement,
match.BuildReplacement(instr, Cast<HloFusionInstruction>(fusion)));
TF_RETURN_IF_ERROR(
instr->ReplaceAllUsesWith(replacement, match.config().name()));
VLOG(2) << "Replaced instruction: " << instr->name()
<< " with: " << replacement->name();
}
VLOG(2) << "Replace custom kernel fusion root instruction "
<< match.root()->name() << "with " << fusion->name();
HloComputation* parent = match.root()->parent();
TF_RETURN_IF_ERROR(parent->ReplaceInstruction(match.root(), fusion));
}
return true;
}
} | #include "xla/service/gpu/custom_kernel_fusion_rewriter.h"
#include <cstdint>
#include <optional>
#include <utility>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
struct SimpleGemmPattern : public CustomKernelFusionPattern {
explicit SimpleGemmPattern(int64_t workspace = 0) : workspace(workspace) {}
std::optional<Match> TryMatch(const se::DeviceDescription& device,
HloInstruction* instr) const override {
if (auto* dot = DynCast<HloDotInstruction>(instr)) {
CustomFusionConfig config;
config.set_name("simple_gemm");
return Match{config, {instr}, workspace};
}
return std::nullopt;
}
int64_t workspace;
};
class CustomKernelFusionRewriterTest : public HloTestBase {};
TEST_F(CustomKernelFusionRewriterTest, SimpleGemm) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: f16[15,19], p1: f16[19,17]) -> f16[15,17] {
%p0 = f16[15,19]{1,0} parameter(0)
%p1 = f16[19,17]{1,0} parameter(1)
ROOT %r = f16[15,17]{1,0} dot(%p0, %p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
const char* expected = R"(
; CHECK: %simple_gemm {{.*}} {
; CHECK: [[P0:%[^ ]+]] = f16[15,19]{1,0} parameter(0)
; CHECK: [[P1:%[^ ]+]] = f16[19,17]{1,0} parameter(1)
; CHECK: ROOT [[DOT:%[^ ]+]] = f16[15,17]{1,0} dot([[P0]], [[P1]]),
; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0}
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[15,17]{1,0} fusion
; CHECK: kind=kCustom, calls=%simple_gemm,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"simple_gemm"}
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<SimpleGemmPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CustomKernelFusionRewriterTest, SimpleGemmWithWorkspace) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: f16[15,19], p1: f16[19,17]) -> f16[15,17] {
%p0 = f16[15,19]{1,0} parameter(0)
%p1 = f16[19,17]{1,0} parameter(1)
ROOT %r = f16[15,17]{1,0} dot(%p0, %p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
const char* expected = R"(
; CHECK: %simple_gemm {{.*}} {
; CHECK: [[P0:%[^ ]+]] = f16[15,19]{1,0} parameter(0)
; CHECK: [[P1:%[^ ]+]] = f16[19,17]{1,0} parameter(1)
; CHECK: [[DOT:%[^ ]+]] = f16[15,17]{1,0} dot([[P0]], [[P1]]),
; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0}
; CHECK: [[WORKSPACE:%[^ ]+]] = u8[1024]{0} custom-call(),
; CHECK: custom_call_target="__custom_kernel_fusion$workspace"
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[15,17]{1,0}, u8[1024]{0})
; CHECK: tuple([[DOT]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = (f16[15,17]{1,0}, u8[1024]{0}) fusion
; CHECK: kind=kCustom, calls=%simple_gemm,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"simple_gemm"}
; CHECK: }
; CHECK: ROOT {{.*}} get-tuple-element([[FUSION]]), index=0
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<SimpleGemmPattern>(1024);
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
} | 2,104 |
#ifndef XLA_SERVICE_GPU_COPY_FUSION_H_
#define XLA_SERVICE_GPU_COPY_FUSION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class CopyFusion : public HloModulePass {
public:
CopyFusion() = default;
absl::string_view name() const override { return "copy_fusion"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> DoCopyFusion(HloComputation* computation);
};
}
}
#endif
#include "xla/service/gpu/copy_fusion.h"
#include <cstdint>
#include <queue>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
namespace gpu {
bool OnlyElementwiseOpsReachableFromParams(HloComputation* fused_computation) {
std::queue<const HloInstruction*> q;
absl::flat_hash_set<const HloInstruction*> visited;
for (auto param : fused_computation->parameter_instructions()) {
q.push(param);
visited.insert(param);
}
while (!q.empty()) {
const HloInstruction* hlo = q.front();
q.pop();
for (auto user : hlo->users()) {
if ((!user->IsElementwiseOnOperand(user->operand_index(hlo)) ||
user->opcode() == HloOpcode::kCopy) &&
user->opcode() != HloOpcode::kBitcast &&
user->opcode() != HloOpcode::kTuple) {
return false;
}
if (visited.insert(user).second) {
q.push(user);
}
}
}
return true;
}
absl::StatusOr<bool> CopyFusion::DoCopyFusion(HloComputation* computation) {
bool changed = false;
std::vector<HloInstruction*> defs_before_uses =
computation->MakeInstructionPostOrder();
for (HloInstruction* hlo : defs_before_uses) {
if (hlo->opcode() != HloOpcode::kFusion) {
continue;
}
std::vector<HloInstruction*> copies;
std::vector<HloInstruction*> other_users;
HloComputation* fused_computation = hlo->fused_instructions_computation();
if (!OnlyElementwiseOpsReachableFromParams(fused_computation)) {
continue;
}
HloInstruction* root = fused_computation->root_instruction();
if (IsReductionFromOrToContiguousDimensions(*root) ||
root->opcode() == HloOpcode::kScatter ||
(hlo->IsMultiOutputFusion() &&
absl::c_all_of(root->operands(), [](const HloInstruction* slice) {
return slice->opcode() == HloOpcode::kSlice;
}))) {
continue;
}
for (auto user : hlo->users()) {
HloInstruction* copy_user = user;
if (copy_user->opcode() == HloOpcode::kGetTupleElement &&
copy_user->user_count() == 1) {
if (IsReductionFromOrToContiguousDimensions(
*(root->operand(copy_user->tuple_index())))) {
other_users.push_back(user);
continue;
}
copy_user = copy_user->users()[0];
}
if (copy_user->opcode() == HloOpcode::kBitcast &&
copy_user->user_count() == 1) {
copy_user = copy_user->users()[0];
}
if (copy_user->opcode() == HloOpcode::kCopy &&
copy_user->shape() == copy_user->operand(0)->shape() &&
!copy_user->shape().IsTuple() &&
!copy_user->HasControlDependencies()) {
copies.push_back(copy_user);
} else {
other_users.push_back(user);
}
}
if (copies.empty()) {
continue;
}
auto fusion_adaptor = HloFusionAdaptor::ForComputation(fused_computation);
auto dynamic_update_slices =
GetOutputDefiningDynamicUpdateSlices(fusion_adaptor->GetRoots());
if (!dynamic_update_slices.empty() &&
(root->opcode() != HloOpcode::kTuple ||
dynamic_update_slices.size() == root->shape().tuple_shapes_size())) {
continue;
}
changed = true;
HloInstruction::InstructionVector tuple_elements;
int64_t num_outputs =
hlo->IsMultiOutputFusion() ? root->operand_count() : int64_t{1};
tuple_elements.reserve(copies.size() + num_outputs);
if (hlo->IsMultiOutputFusion()) {
for (HloInstruction* operand : root->operands()) {
tuple_elements.push_back(operand);
}
} else {
tuple_elements.push_back(root);
}
for (auto copy : copies) {
HloInstruction* user = copy;
std::vector<HloInstruction*> operand_chain;
operand_chain.push_back(user);
while (user->operand(0) != hlo) {
user = user->mutable_operand(0);
operand_chain.push_back(user);
}
HloInstruction* clone_operand = root;
if (hlo->IsMultiOutputFusion()) {
clone_operand = root->mutable_operand(user->tuple_index());
CHECK_EQ(operand_chain.back()->opcode(), HloOpcode::kGetTupleElement);
operand_chain.pop_back();
}
for (int64_t i = operand_chain.size() - 1; i >= 0; --i) {
HloInstruction* user = operand_chain[i];
clone_operand = fused_computation->AddInstruction(
user->CloneWithNewOperands(user->shape(), {clone_operand}));
}
tuple_elements.push_back(clone_operand);
}
HloInstruction* new_root = fused_computation->AddInstruction(
HloInstruction::CreateTuple(tuple_elements));
fused_computation->set_root_instruction(new_root,
true);
*hlo->mutable_shape() = new_root->shape();
if (root->opcode() == HloOpcode::kTuple) {
TF_RETURN_IF_ERROR(fused_computation->RemoveInstruction(root));
} else {
auto get_tuple_element_root = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(hlo, 0));
TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWithDifferentShape(
other_users, get_tuple_element_root));
}
for (int64_t i = 0; i < copies.size(); ++i) {
auto get_tuple_element = computation->AddInstruction(
HloInstruction::CreateGetTupleElement(hlo, num_outputs + i));
TF_RETURN_IF_ERROR(
computation->ReplaceInstruction(copies[i], get_tuple_element));
}
}
return changed;
}
absl::StatusOr<bool> CopyFusion::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return DoCopyFusion(module->entry_computation());
}
}
} | #include "xla/service/gpu/copy_fusion.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace m = ::xla::match;
class CopyFusionTest : public HloTestBase {
public:
CopyFusion cf_;
};
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add_computation {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0)
}
scalar_mul_computation {
scalar_lhs.1 = f32[] parameter(0)
scalar_rhs.1 = f32[] parameter(1)
ROOT mul.1 = f32[] multiply(scalar_lhs.1, scalar_rhs.1)
})";
TEST_F(CopyFusionTest, CopyFusionTransposeOfBroadcastedConstantTwoCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
two = f32[] constant(2.0)
broadcast = f32[16,32]{1,0} broadcast(two), dimensions={}
s.1 = f32[16,32]{1,0} sqrt(broadcast)
ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
fusion = f32[32,16]{1,0} fusion(), kind=kInput, calls=fused_computation
copy.1 = f32[32,16]{1,0} copy(fusion)
copy.2 = f32[32,16]{1,0} copy(fusion)
ROOT t = (f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.2, copy.1)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Transpose(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionTransposeTwoCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
param_0.1 = f32[16,32]{1,0} parameter(0)
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY main {
p = f32[16,32]{1,0} parameter(0)
fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation
copy.1 = f32[32,16]{1,0} copy(fusion)
copy.2 = f32[32,16]{1,0} copy(fusion)
ROOT t = (f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.2, copy.1)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionNegateAndTwoCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
copy.2 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
const.1 = f32[] parameter(0)
ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation
}
ENTRY entry {
p0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
fusion = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation
copy.1 = f32[512]{0} copy(fusion)
copy.2 = f32[512]{0} copy(fusion)
ROOT root = (f32[512]{0}, f32[512]{0}) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionShouldRunWithUncopiedReduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
two = f32[] constant(2.0)
broadcast = f32[128,512,28,28]{3,2,1,0} broadcast(two)
mul = f32[128,512,28,28]{3,2,1,0} multiply(broadcast, broadcast)
const = f32[] constant(0.0)
reduce = f32[512]{0} reduce(mul, const), dimensions={0,2,3}, to_apply=scalar_add_computation
ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[512]{0}) tuple(mul, reduce)
}
ENTRY entry {
fusion = (f32[128,512,28,28]{3,2,1,0}, f32[512]) fusion(), kind=kInput, calls=fused_computation
gte = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0
gte.2 = f32[512]{0} get-tuple-element(fusion), index=1
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512]{0}) tuple(copy.1, gte.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Multiply(), m::Reduce(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionShouldNotFuseForSliceMultioutputFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)
slice1 = f32[128,100,28,28]{3,2,1,0} slice(mul), slice={[0:128],[0:100],[0:28],[0:28]}
slice2 = f32[128,200,28,28]{3,2,1,0} slice(mul), slice={[0:128],[50:250],[0:28],[0:28]}
ROOT tuple = (f32[128,100,28,28]{3,2,1,0}, f32[128,200,28,28]{3,2,1,0}) tuple(slice1, slice2)
}
ENTRY entry {
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[128,100,28,28]{3,2,1,0}, f32[128,200,28,28]{3,2,1,0}) fusion(p1), kind=kInput, calls=fused_computation
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithScatter) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p0 = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} negate(p0)
ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(input_tensor, scatter_indices, updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, to_apply=scalar_add_computation
}
ENTRY entry {
param.0 = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0)
param.1 = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1)
param.2 = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2)
fusion = f32[50,49,48,47,46]{4,3,2,1,0} fusion(param.0, param.1, param.2), kind=kInput, calls=fused_computation
ROOT copy = f32[50,49,48,47,46]{4,3,2,1,0} copy(fusion)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionShouldNotRunOutsideEntryComputation) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation.549 {
param_0.8511 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} parameter(0)
bitcast.52601 = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} bitcast(param_0.8511)
slice = bf16[15,1,2,48,128,1]{5,4,3,2,1,0} slice(bitcast.52601), slice={[0:15:1], [0:1:1], [0:2:1], [0:48:1], [0:128:1], [0:1:1]}
bitcast = bf16[15,1,2,48,128]{4,3,2,1,0} bitcast(slice)
ROOT broadcast = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} broadcast(bitcast), dimensions={0,1,2,3,4}
}
condition {
constant_6915 = s32[] constant(15)
param.218 = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) parameter(0)
get-tuple-element.3714 = s32[] get-tuple-element(param.218), index=1
ROOT compare.1738 = pred[] compare(get-tuple-element.3714, constant_6915), direction=LT
}
body {
tuple_param = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) parameter(0)
param_0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} get-tuple-element(tuple_param), index=0
param_1 = s32[] get-tuple-element(tuple_param), index=1
fusion.549 = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} fusion(param_0), kind=kLoop, calls=fused_computation.549
bitcast = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} bitcast(fusion.549)
copy = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} copy(bitcast)
constant_one = s32[] constant(1)
add = s32[] add(param_1, constant_one), control-predecessors={fusion.549}
ROOT tuple = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) tuple(copy, add)
}
ENTRY main {
param_0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} parameter(0)
zero = s32[] constant(0)
copy.0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} copy(param_0)
copy.1 = s32[] copy(zero)
tuple = tuple(copy.0, copy.1)
ROOT while = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) while(tuple), condition=condition, body=body, backend_config="{\"known_trip_count\":{\"n\":\"15\"}}"
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithDynamicUpdateSliceInplace) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p.0 = f16[50,96,1024]{2,1,0} parameter(0)
p.1 = f16[1,96,1024]{2,1,0} parameter(1)
c.0 = s32[3]{0} constant({0, 0, 0})
ROOT %dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0)
}
ENTRY entry {
p0 = f16[50,96,1024]{2,1,0} parameter(0)
p1 = f16[1,96,1024]{2,1,0} parameter(1)
fusion = f16[50,96,1024]{2,1,0} fusion(p0, p1), kind=kInput, calls=fused_computation
copy.1 = f16[50,96,1024]{2,1,0} copy(fusion)
copy.2 = f16[50,96,1024]{2,1,0} copy(fusion)
ROOT root = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionWithDynamicUpdateSliceNotInplace) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
one = f32[] constant(1.0)
zero = f32[] constant(0.0)
p.0 = f16[50,96,1024]{2,1,0} broadcast(one), dimensions={}
p.1 = f16[1,96,1024]{2,1,0} broadcast(zero), dimensions={}
c.0 = s32[3]{0} constant({0, 0, 0})
dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0)
neg = f16[50,96,1024]{2,1,0} negate(dynamic-update-slice)
ROOT tuple = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) tuple(dynamic-update-slice, neg)
}
ENTRY entry {
fusion = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) fusion(), kind=kInput, calls=fused_computation
gte.0 = f16[50,96,1024]{2,1,0} get-tuple-element(fusion), index=0
gte.1 = f16[50,96,1024]{2,1,0} get-tuple-element(fusion), index=1
bitcast = f16[1,50,96,1024]{3,2,1,0} bitcast(gte.0)
copy = f16[1,50,96,1024]{3,2,1,0} copy(bitcast)
ROOT root = (f16[1,50,96,1024]{3,2,1,0}, f16[50,96,1024]{2,1,0}) tuple(copy, gte.1)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::DynamicUpdateSlice(), m::Negate(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionTransposeAndThreeCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
two = f32[] constant(2.0)
param_0.1 = f32[16,32]{1,0} broadcast(two), dimensions={}
s.1 = f32[16,32]{1,0} sqrt(param_0.1)
ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0}
}
ENTRY entry {
fusion = f32[32,16]{1,0} fusion(), kind=kInput, calls=fused_computation
copy.1 = f32[32,16]{1,0} copy(fusion)
copy.2 = f32[32,16]{1,0} copy(fusion)
copy.3 = f32[32,16]{1,0} copy(fusion)
ROOT root = (f32[32,16]{1,0}, f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.1, copy.2, copy.3)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement(), m::GetTupleElement())));
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Transpose(), m::Copy(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionRunWithOnlyOneCopy) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation
ROOT copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::GetTupleElement(m::Fusion(&fusion))));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionNegateAndTwoCopiesAndTransposeCopy) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
transpose = f32[128,512,28,28]{2,3,0,1} copy(fusion)
bitcast = f32[512,128,28,28]{3,2,1,0} bitcast(transpose)
copy.2 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, bitcast, copy.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::Bitcast(), m::GetTupleElement())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionRunWithOnlyOneNonTransposeCopy) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion)
transpose.1 = f32[128,512,28,28]{2,3,0,1} copy(fusion)
bitcast.1 = f32[512,128,28,28]{3,2,1,0} bitcast(transpose.1)
transpose.2 = f32[128,512,28,28]{2,3,0,1} copy(fusion)
bitcast.2 = f32[512,128,28,28]{3,2,1,0} bitcast(transpose.2)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}) tuple(copy.1, bitcast.1, bitcast.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::Bitcast(), m::Bitcast())));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionSkipTupleCopies) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul)
neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul)
ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation
copy.1 = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) copy(fusion)
copy.2 = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) copy(fusion)
ROOT root = ((f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}),(f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0})) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_FALSE(cf_.Run(module.get()).value());
}
TEST_F(CopyFusionTest, CopyFusionTupleAndGetTuple) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul)
neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul)
ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation
gte.1 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0
gte.2 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=1
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte.1)
copy.2 = f32[128,512,28,28]{3,2,1,0} copy(gte.2)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, copy.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)),
m::GetTupleElement())));
EXPECT_THAT(
fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Negate(), m::Copy(), m::Copy())));
}
TEST_F(CopyFusionTest, CopyFusionWithFusionReturningTupleAndOtherUser) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation {
p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1)
neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul)
neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul)
ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation
gte.1 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0
gte.2 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=1
copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte.1)
copy.2 = f32[128,512,28,28]{3,2,1,0} copy(gte.2)
transpose = f32[128,512,28,28]{2,3,0,1} copy(gte.1)
bitcast = f32[512,128,28,28]{3,2,1,0} bitcast(transpose)
ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, bitcast, copy.2)
})"))
.value();
ASSERT_TRUE(cf_.Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root = module->entry_computation()->root_instruction();
const HloInstruction* fusion = nullptr;
ASSERT_THAT(root,
GmockMatch(m::Tuple(m::Copy(), m::Bitcast(),
m::GetTupleElement(m::Fusion(&fusion)))));
EXPECT_THAT(fusion->fused_expression_root(),
GmockMatch(m::Tuple(m::Negate(), m::Negate(), m::Copy())));
}
}
} | 2,105 |
#ifndef XLA_SERVICE_GPU_CONV_LAYOUT_NORMALIZATION_H_
#define XLA_SERVICE_GPU_CONV_LAYOUT_NORMALIZATION_H_
#include <optional>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
namespace xla {
namespace gpu {
absl::StatusOr<std::optional<HloInstruction*>> NormalizeLayoutForGpuCustomCalls(
HloCustomCallInstruction*);
}
}
#endif
#include "xla/service/gpu/conv_layout_normalization.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::StatusOr<std::optional<HloInstruction*>> UpdateLayoutForCudnnConvolution(
HloCustomCallInstruction* hlo) {
HloInstruction* lhs = hlo->mutable_operand(0);
HloInstruction* rhs = hlo->mutable_operand(1);
const ConvolutionDimensionNumbers& dim_numbers =
hlo->convolution_dimension_numbers();
auto transpose_dim = [&](int64_t dim, const Shape& unnormalized_shape) {
return unnormalized_shape.rank() -
FindIndex(unnormalized_shape.layout().minor_to_major(), dim) - 1;
};
auto transpose_dims = [&](tsl::protobuf::RepeatedField<int64_t>& dims,
const Shape& unnormalized_shape) {
for (auto& dim : dims) {
dim = transpose_dim(dim, unnormalized_shape);
}
};
const Shape& conv_output_shape =
hlo->shape().IsTuple() ? hlo->shape().tuple_shapes(0) : hlo->shape();
Shape input_shape, filter_shape, output_shape;
TF_ASSIGN_OR_RETURN(
gpu::CudnnConvKind conv_kind,
gpu::GetCudnnConvKind(Cast<HloCustomCallInstruction>(hlo)));
switch (conv_kind) {
case gpu::CudnnConvKind::kForward:
case gpu::CudnnConvKind::kForwardActivation:
case gpu::CudnnConvKind::kForwardGraph: {
input_shape = lhs->shape();
filter_shape = rhs->shape();
output_shape = conv_output_shape;
break;
}
case gpu::CudnnConvKind::kBackwardInput: {
filter_shape = rhs->shape();
output_shape = lhs->shape();
input_shape = conv_output_shape;
break;
}
case gpu::CudnnConvKind::kBackwardFilter: {
input_shape = lhs->shape();
output_shape = rhs->shape();
filter_shape = conv_output_shape;
break;
}
}
ConvolutionDimensionNumbers new_dim_numbers = dim_numbers;
new_dim_numbers.set_input_batch_dimension(
transpose_dim(dim_numbers.input_batch_dimension(), input_shape));
new_dim_numbers.set_input_feature_dimension(
transpose_dim(dim_numbers.input_feature_dimension(), input_shape));
transpose_dims(*new_dim_numbers.mutable_input_spatial_dimensions(),
input_shape);
new_dim_numbers.set_kernel_input_feature_dimension(transpose_dim(
dim_numbers.kernel_input_feature_dimension(), filter_shape));
new_dim_numbers.set_kernel_output_feature_dimension(transpose_dim(
dim_numbers.kernel_output_feature_dimension(), filter_shape));
transpose_dims(*new_dim_numbers.mutable_kernel_spatial_dimensions(),
filter_shape);
new_dim_numbers.set_output_batch_dimension(
transpose_dim(dim_numbers.output_batch_dimension(), output_shape));
new_dim_numbers.set_output_feature_dimension(
transpose_dim(dim_numbers.output_feature_dimension(), output_shape));
transpose_dims(*new_dim_numbers.mutable_output_spatial_dimensions(),
output_shape);
Shape normalized_shape;
if (hlo->shape().IsTuple()) {
TF_RET_CHECK(hlo->shape().tuple_shapes().back().rank() == 1)
<< "The last element in the tuple returned by a convolution Custom "
"Call is expected to be an "
"allocator of rank one";
std::vector<Shape> new_tuple_shape;
for (const Shape& tuple_shape : hlo->shape().tuple_shapes()) {
new_tuple_shape.emplace_back(
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
tuple_shape));
}
normalized_shape = ShapeUtil::MakeTupleShape(new_tuple_shape);
} else {
normalized_shape =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
hlo->shape());
}
std::vector<HloInstruction*> normalized_operands;
bool performed_normalization = false;
for (int idx = 0; idx < hlo->operand_count(); idx++) {
HloInstruction* op = hlo->mutable_operand(idx);
const Shape& s = op->shape();
Shape s_reordered =
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(s);
HloInstruction* normalized_op = op->mutable_operand(0);
HloInstruction* new_op;
if (normalized_op->shape() == s_reordered) {
new_op = normalized_op;
} else {
new_op = MakeBitcastHlo(op, s_reordered);
performed_normalization = true;
}
normalized_operands.push_back(new_op);
}
if (!performed_normalization &&
ShapeUtil::Equal(normalized_shape, hlo->shape()) &&
ConvolutionDimensionNumbersToString(new_dim_numbers) ==
ConvolutionDimensionNumbersToString(dim_numbers)) {
return std::nullopt;
}
HloInstruction* normalized_conv = hlo->parent()->AddInstruction(
HloInstruction::CreateCustomCall(normalized_shape, normalized_operands,
hlo->custom_call_target()),
&hlo->metadata());
normalized_conv->set_window(hlo->window());
normalized_conv->set_convolution_dimension_numbers(new_dim_numbers);
normalized_conv->set_feature_group_count(hlo->feature_group_count());
normalized_conv->set_raw_backend_config_string(
hlo->raw_backend_config_string());
*normalized_conv->mutable_precision_config() = hlo->precision_config();
normalized_conv->parent()->parent()->SetAndUniquifyInstrName(normalized_conv,
hlo->name());
HloInstruction* bc_to_orig;
if (normalized_conv->shape().IsTuple()) {
std::vector<HloInstruction*> tuple_elements(
normalized_conv->shape().tuple_shapes_size());
for (int i = 0; i < normalized_conv->shape().tuple_shapes_size(); ++i) {
TF_ASSIGN_OR_RETURN(HloInstruction * normalized_out,
MakeGetTupleElementHlo(normalized_conv, i));
tuple_elements[i] =
MakeBitcastHlo(normalized_out, hlo->shape().tuple_shapes(i));
}
bc_to_orig = MaybeMakeTuple(tuple_elements);
} else {
bc_to_orig = MakeBitcastHlo(normalized_conv, hlo->shape());
}
return bc_to_orig;
}
}
absl::StatusOr<std::optional<HloInstruction*>> NormalizeLayoutForGpuCustomCalls(
HloCustomCallInstruction* hlo) {
if (IsCustomCallToDnnConvolution(*hlo)) {
TF_ASSIGN_OR_RETURN(std::optional<HloInstruction*> bc_to_orig,
UpdateLayoutForCudnnConvolution(hlo));
return bc_to_orig;
}
return std::nullopt;
}
}
} | #include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_macros.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ConvolutionLayoutNormalizationTest = HloTestBase;
TEST_F(ConvolutionLayoutNormalizationTest, BackwardInput) {
const char* hlo = R"(
HloModule TestModule
%TestComputation1 (param_0: f32[1,20,257], param_1: f32[31,257,136]) -> (f32[1,23,136], u8[0]) {
%param_0 = f32[1,20,257]{2,1,0} parameter(0)
%copy.3 = f32[1,20,257]{1,2,0} copy(f32[1,20,257]{2,1,0} %param_0)
%param_1 = f32[31,257,136]{2,1,0} parameter(1)
%copy.4 = f32[31,257,136]{0,2,1} copy(f32[31,257,136]{2,1,0} %param_1)
%custom-call.1 = (f32[1,23,136]{1,2,0}, u8[0]{0}) custom-call(f32[1,20,257]{1,2,0} %copy.3, f32[31,257,136]{0,2,1} %copy.4), window={size=31 stride=2 pad=23_23}, dim_labels=b0f_0oi->b0f, custom_call_target="__cudnn$convBackwardInput", backend_config={"cudnn_conv_backend_config":{conv_result_scale:1}}
%get-tuple-element.2 = f32[1,23,136]{1,2,0} get-tuple-element((f32[1,23,136]{1,2,0}, u8[0]{0}) %custom-call.1), index=0
%copy.5 = f32[1,23,136]{2,1,0} copy(f32[1,23,136]{1,2,0} %get-tuple-element.2)
%get-tuple-element.3 = u8[0]{0} get-tuple-element((f32[1,23,136]{1,2,0}, u8[0]{0}) %custom-call.1), index=1
ROOT %tuple.1 = (f32[1,23,136]{2,1,0}, u8[0]{0}) tuple(f32[1,23,136]{2,1,0} %copy.5, u8[0]{0} %get-tuple-element.3)
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(ConvolutionLayoutNormalizationTest, Forward) {
const char* hlo = R"(
HloModule TestModule
ENTRY %TestComputation {
%param_0 = f32[2,128,1,378]{3,2,1,0} parameter(0)
%param_1 = f32[1,5,128,128]{1,0,2,3} parameter(1)
ROOT %custom-call.1 = (f32[2,128,1,378]{3,2,1,0}, u8[0]{0}) custom-call(%param_0, %param_1), window={size=1x5 pad=0_0x2_2}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward", backend_config={"cudnn_conv_backend_config":{conv_result_scale:1}}
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
TEST_F(ConvolutionLayoutNormalizationTest, DISABLED_ON_GPU_ROCM(FusedConv3D)) {
const char* hlo = R"(
HloModule TestModule
ENTRY TestComputation {
%p0 = f32[8,4,5,5,1] parameter(0)
%p1 = f32[3,3,3,1,32] parameter(1)
%conv = f32[8,4,5,5,32] convolution(p0, p1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f
%bias = f32[32] parameter(2)
%broadcasted_bias = f32[8,4,5,5,32] broadcast(%bias), dimensions={4}
%add = f32[8,4,5,5,32] add(%conv, %broadcasted_bias)
%zero = f32[] constant(0)
%zeros = f32[8,4,5,5,32] broadcast(%zero), dimensions={}
ROOT relu = f32[8,4,5,5,32] maximum(%zeros, %add)
}
)";
MatchOptimizedHlo(hlo, R"(
)");
}
}
}
} | 2,106 |
#ifndef XLA_SERVICE_GPU_GPU_SCHEDULE_POSTPROCESSING_H_
#define XLA_SERVICE_GPU_GPU_SCHEDULE_POSTPROCESSING_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuSchedulePostprocessing : public HloModulePass {
public:
absl::string_view name() const override {
return "gpu-schedule-postprocessing";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/gpu_schedule_postprocessing.h"
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using CustomCallInComputation =
absl::flat_hash_map<const HloComputation*, bool>;
bool MayInvokeCustomCall(
const HloInstruction* hlo,
const CustomCallInComputation& custom_call_in_computation) {
if (hlo->opcode() == HloOpcode::kCustomCall) {
return true;
}
return absl::c_any_of(
hlo->called_computations(), [&](const HloComputation* callee) {
return custom_call_in_computation.find(callee)->second;
});
}
absl::StatusOr<bool> IsRelevantAsynchronousStart(const HloInstruction* hlo) {
if (!hlo_query::IsAsyncCollectiveStartOp(hlo,
false)) {
return false;
}
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
hlo->backend_config<GpuBackendConfig>());
const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
return !collective_backend_config.is_sync();
}
absl::StatusOr<bool> IsRelevantAsynchronousDone(const HloInstruction* hlo) {
return hlo_query::IsAsyncCollectiveDoneOp(hlo,
false);
}
absl::StatusOr<bool> ProcessComputation(
const HloSchedule& schedule, HloComputation* computation,
CustomCallInComputation& custom_call_in_computation) {
bool changed = false;
bool has_custom_call = false;
absl::flat_hash_set<HloInstruction*> async_starts;
const HloInstructionSequence& sequence = schedule.sequence(computation);
const std::vector<HloInstruction*>& all_instructions =
sequence.instructions();
for (HloInstruction* hlo : all_instructions) {
if (MayInvokeCustomCall(hlo, custom_call_in_computation)) {
async_starts.clear();
has_custom_call = true;
continue;
}
TF_ASSIGN_OR_RETURN(bool is_async_start, IsRelevantAsynchronousStart(hlo));
if (is_async_start) {
async_starts.insert(hlo);
continue;
}
TF_ASSIGN_OR_RETURN(bool is_async_done, IsRelevantAsynchronousDone(hlo));
if (is_async_done) {
HloInstruction* async_start = hlo->mutable_operand(0);
if (async_starts.contains(async_start)) {
changed = true;
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
async_start->backend_config<GpuBackendConfig>());
CollectiveBackendConfig& collective_backend_config =
*gpu_config.mutable_collective_backend_config();
collective_backend_config.set_no_parallel_custom_call(true);
TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config));
async_starts.erase(async_start);
}
}
}
custom_call_in_computation[computation] = has_custom_call;
return changed;
}
}
absl::StatusOr<bool> GpuSchedulePostprocessing::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
if (!module->has_schedule()) return false;
HloSchedule& schedule = module->schedule();
bool changed = false;
CustomCallInComputation custom_call_in_computation;
std::vector<HloComputation*> all_computations =
module->MakeComputationPostOrder(execution_threads);
for (auto iter = all_computations.begin(); iter != all_computations.end();
++iter) {
HloComputation* computation = *iter;
if (computation->IsFusionComputation()) {
custom_call_in_computation[computation] = false;
continue;
}
TF_ASSIGN_OR_RETURN(
bool result,
ProcessComputation(schedule, computation, custom_call_in_computation));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/gpu_schedule_postprocessing.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using GpuSchedulePostprocessingTest = HloTestBase;
TEST_F(GpuSchedulePostprocessingTest, SynchronousOpsNotChanged) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
pf32 = f32[1] parameter(0)
all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":true,"no_parallel_custom_call":false}}
ROOT all-gather-done = f32[2] all-gather-done(all-gather-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
GpuSchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(GpuSchedulePostprocessingTest, P2POpsNotChanged) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY main {
f0 = f32[] constant(0.0)
init = f32[1, 1024, 1024] broadcast(f0), dimensions={}
after-all = token[] after-all()
recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=2,
frontend_attributes={
_xla_send_recv_source_target_pairs="{{0,1}, {1,2}}"
}
recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=2
ROOT recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
GpuSchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
}
TEST_F(GpuSchedulePostprocessingTest, AsynchronousOpsChanged) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
pf32 = f32[1] parameter(0)
pf32.2 = f32[1] custom-call(pf32), custom_call_target="my_custom_call"
all-gather-start = (f32[1], f32[2]) all-gather-start(pf32.2), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}}
ROOT all-gather-done = f32[2] all-gather-done(all-gather-start)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
GpuSchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_TRUE(changed);
HloInstruction* start = FindInstruction(module.get(), "all-gather-start");
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
start->backend_config<GpuBackendConfig>());
const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
EXPECT_TRUE(collective_backend_config.no_parallel_custom_call());
}
TEST_F(GpuSchedulePostprocessingTest, AsynchronousOpsWithParallelCustomcall) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
ENTRY entry {
pf32 = f32[1] parameter(0)
all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}}
pf32.2 = f32[1] custom-call(pf32), custom_call_target="my_custom_call"
all-gather-done = f32[2] all-gather-done(all-gather-start)
ROOT out = (f32[1], f32[2]) tuple(f32[1] pf32.2, f32[2] all-gather-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
GpuSchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
HloInstruction* start = FindInstruction(module.get(), "all-gather-start");
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
start->backend_config<GpuBackendConfig>());
const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
EXPECT_FALSE(collective_backend_config.no_parallel_custom_call());
}
TEST_F(GpuSchedulePostprocessingTest,
AsynchronousOpsWithParallelNestedCustomcall) {
constexpr absl::string_view kHloString = R"(
HloModule module, is_scheduled=true
foo {
v = f32[1] parameter(0)
ROOT ret = f32[1] custom-call(v), custom_call_target="my_custom_call"
}
ENTRY entry {
pf32 = f32[1] parameter(0)
all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}}
pf32.2 = f32[1] call(f32[1] pf32), to_apply=foo
all-gather-done = f32[2] all-gather-done(all-gather-start)
ROOT out = (f32[1], f32[2]) tuple(f32[1] pf32.2, f32[2] all-gather-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule((kHloString)));
GpuSchedulePostprocessing pass;
TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get()));
EXPECT_FALSE(changed);
HloInstruction* start = FindInstruction(module.get(), "all-gather-start");
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
start->backend_config<GpuBackendConfig>());
const CollectiveBackendConfig& collective_backend_config =
gpu_config.collective_backend_config();
EXPECT_FALSE(collective_backend_config.no_parallel_custom_call());
}
}
}
} | 2,107 |
#ifndef XLA_SERVICE_GPU_FUSION_WRAPPER_H_
#define XLA_SERVICE_GPU_FUSION_WRAPPER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class FusionWrapper : public HloModulePass {
public:
absl::string_view name() const override { return "fusion-wrapper"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/fusion_wrapper.h"
#include <functional>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "tsl/platform/errors.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> FusionWrapper::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
auto instructions = module->entry_computation()->MakeInstructionPostOrder();
bool changed = false;
std::function<absl::Status(HloInstruction*)> handle_instruction;
handle_instruction = [&](HloInstruction* instruction) -> absl::Status {
switch (instruction->opcode()) {
case HloOpcode::kConditional:
case HloOpcode::kWhile:
for (auto* computation : instruction->called_computations()) {
for (auto* inner_instruction :
computation->MakeInstructionPostOrder()) {
TF_RETURN_IF_ERROR(handle_instruction(inner_instruction));
}
}
break;
case HloOpcode::kAbs:
case HloOpcode::kAdd:
case HloOpcode::kAnd:
case HloOpcode::kAtan2:
case HloOpcode::kBitcastConvert:
case HloOpcode::kBroadcast:
case HloOpcode::kCeil:
case HloOpcode::kCbrt:
case HloOpcode::kClamp:
case HloOpcode::kClz:
case HloOpcode::kCompare:
case HloOpcode::kComplex:
case HloOpcode::kConcatenate:
case HloOpcode::kConvert:
case HloOpcode::kCopy:
case HloOpcode::kCos:
case HloOpcode::kDivide:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kErf:
case HloOpcode::kExp:
case HloOpcode::kExpm1:
case HloOpcode::kFloor:
case HloOpcode::kGather:
case HloOpcode::kImag:
case HloOpcode::kIota:
case HloOpcode::kIsFinite:
case HloOpcode::kLog:
case HloOpcode::kLog1p:
case HloOpcode::kMap:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kMultiply:
case HloOpcode::kNegate:
case HloOpcode::kNot:
case HloOpcode::kOr:
case HloOpcode::kPad:
case HloOpcode::kPopulationCount:
case HloOpcode::kPower:
case HloOpcode::kReal:
case HloOpcode::kReshape:
case HloOpcode::kReduce:
case HloOpcode::kReducePrecision:
case HloOpcode::kReduceWindow:
case HloOpcode::kRemainder:
case HloOpcode::kReverse:
case HloOpcode::kRoundNearestAfz:
case HloOpcode::kRoundNearestEven:
case HloOpcode::kRsqrt:
case HloOpcode::kScatter:
case HloOpcode::kSelect:
case HloOpcode::kShiftLeft:
case HloOpcode::kShiftRightLogical:
case HloOpcode::kShiftRightArithmetic:
case HloOpcode::kSign:
case HloOpcode::kSin:
case HloOpcode::kSlice:
case HloOpcode::kSqrt:
case HloOpcode::kSubtract:
case HloOpcode::kStochasticConvert:
case HloOpcode::kTan:
case HloOpcode::kTanh:
case HloOpcode::kTranspose:
case HloOpcode::kXor: {
auto* computation = instruction->parent();
auto* fusion_instruction =
computation->AddInstruction(HloInstruction::CreateFusion(
instruction->shape(),
ChooseFusionKind(*instruction, *instruction), instruction));
const absl::string_view wrapped_opcode =
HloOpcodeString(instruction->opcode());
module->SetAndUniquifyInstrName(
fusion_instruction, absl::StrCat("wrapped_", wrapped_opcode));
module->SetAndUniquifyComputationName(
fusion_instruction->fused_instructions_computation(),
absl::StrCat("wrapped_", wrapped_opcode, "_computation"));
if (module->has_schedule()) {
module->schedule().replace_instruction(computation, instruction,
fusion_instruction);
}
TF_RETURN_IF_ERROR(
fusion_instruction->CopyAllControlDepsFrom(instruction));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(fusion_instruction));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction));
changed = true;
break;
}
default:
break;
}
return absl::OkStatus();
};
for (auto* instruction : instructions) {
TF_RETURN_IF_ERROR(handle_instruction(instruction));
}
return changed;
}
}
} | #include "xla/service/gpu/fusion_wrapper.h"
#include <optional>
#include <gtest/gtest.h>
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
class FusionWrapperTest : public HloTestBase {};
TEST_F(FusionWrapperTest, SimpleOp) {
RunAndFilecheckHloRewrite(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[30,41] parameter(0)
p1 = f16[30,41] parameter(1)
ROOT result = f16[60, 41] concatenate(p0, p1), dimensions={0}
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, Scatter) {
RunAndFilecheckHloRewrite(R"(
HloModule ScatterIntoScalar
update_s32 {
lhs = s32[] parameter(0)
ROOT rhs = s32[] parameter(1)
}
ENTRY main {
parameter.1 = s32[] parameter(0)
parameter.2 = s32[0]{0} parameter(1)
parameter.3 = s32[] parameter(2)
ROOT scatter_ScatterIntoScalar = s32[] scatter(parameter.1, parameter.2, parameter.3),
update_window_dims={},
inserted_window_dims={},
scatter_dims_to_operand_dims={},
index_vector_dim=0,
to_apply=update_s32
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, ControlDependency) {
RunAndFilecheckHloRewrite(R"(
HloModule TestModule
fusion {
ROOT param = f32[] parameter(0)
}
ENTRY main {
param = f32[] parameter(0)
fusion = f32[] fusion(param), kind=kLoop, calls=fusion
constant_one = f32[] constant(1)
ROOT add = f32[] add(param, constant_one), control-predecessors={fusion}
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, While) {
RunAndFilecheckHloRewrite(R"(
HloModule While
%body {
%parameter.5 = (f32[5]{0}) parameter(0)
%constant_8 = f32[] constant(0)
%broadcast.9 = f32[5]{0} broadcast(f32[] %constant_8), dimensions={}
ROOT %tuple.2 = (f32[5]{0}) tuple(f32[5]{0} %broadcast.9)
}
%cond {
%parameter.12 = (f32[5]{0}) parameter(0)
ROOT %constant_1 = pred[] constant(false)
}
ENTRY %main (parameter.1: f32[5]) -> (f32[5]) {
%parameter.1 = f32[5]{0} parameter(0)
%copy.3 = f32[5]{0} copy(f32[5]{0} %parameter.1)
%tuple = (f32[5]{0}) tuple(f32[5]{0} %copy.3)
ROOT %while.19 = (f32[5]{0}) while((f32[5]{0}) %tuple), condition=%cond, body=%body
})",
FusionWrapper(), R"(
}
TEST_F(FusionWrapperTest, WhileInFusion) {
RunAndFilecheckHloRewrite(R"(
HloModule While
%body {
%parameter.5 = (f32[5]{0}) parameter(0)
%constant_8 = f32[] constant(0)
%broadcast.9 = f32[5]{0} broadcast(f32[] %constant_8), dimensions={}
ROOT %tuple.2 = (f32[5]{0}) tuple(f32[5]{0} %broadcast.9)
}
%cond {
%parameter.12 = (f32[5]{0}) parameter(0)
ROOT %constant_1 = pred[] constant(false)
}
%fusion {
%parameter.1 = f32[5]{0} parameter(0)
%copy.3 = f32[5]{0} copy(f32[5]{0} %parameter.1)
%tuple = (f32[5]{0}) tuple(f32[5]{0} %copy.3)
ROOT %while.19 = (f32[5]{0}) while((f32[5]{0}) %tuple), condition=%cond, body=%body
}
ENTRY %main (parameter.1: f32[5]) -> (f32[5]) {
%parameter.1 = f32[5]{0} parameter(0)
ROOT %fusion = (f32[5]{0}) fusion(f32[5]{0} %parameter.1), kind=kLoop, calls=%fusion
})",
FusionWrapper(),
std::nullopt);
}
}
}
} | 2,108 |
#ifndef XLA_SERVICE_GPU_STREAM_ATTRIBUTE_ANNOTATOR_H_
#define XLA_SERVICE_GPU_STREAM_ATTRIBUTE_ANNOTATOR_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla::gpu {
class StreamAttributeAnnotator : public HloModulePass {
public:
absl::string_view name() const override {
return "stream-attribute-annotator";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
#endif
#include "xla/service/gpu/stream_attribute_annotator.h"
#include <cstdint>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
bool IsOnlyRootNonDefaultStream(HloComputation* computation) {
HloInstruction* root = computation->root_instruction();
auto root_gpu_config = root->backend_config<GpuBackendConfig>();
if (!root_gpu_config.ok() || root->opcode() == HloOpcode::kTuple) {
return false;
}
int64_t root_stream_id = root_gpu_config->operation_queue_id();
VLOG(2) << "Found fusion computation's root stream id to be "
<< root_stream_id;
if (root_stream_id == Thunk::kDefaultExecutionStreamId.value()) {
return false;
}
for (HloInstruction* instr : computation->MakeInstructionPostOrder()) {
if (instr == root) {
continue;
}
int64_t instr_stream_id =
instr->backend_config<GpuBackendConfig>()->operation_queue_id();
if (instr_stream_id != Thunk::kDefaultExecutionStreamId.value() &&
instr_stream_id != root_stream_id) {
return false;
}
}
return true;
}
absl::StatusOr<bool> AnnotateStreamAttributesForInstruction(
HloInstruction* instr, GpuBackendConfig& instr_gpu_config) {
if (instr->called_computations().size() != 1) {
return false;
}
HloComputation* called_comp = instr->called_computations()[0];
int64_t stream_id = instr_gpu_config.operation_queue_id();
if (!IsOnlyRootNonDefaultStream(called_comp) ||
stream_id != Thunk::kDefaultExecutionStreamId.value()) {
return false;
}
auto comp_root_gpu_config =
called_comp->root_instruction()->backend_config<GpuBackendConfig>();
instr_gpu_config.set_operation_queue_id(
comp_root_gpu_config->operation_queue_id());
*instr_gpu_config.mutable_wait_on_operation_queues() =
comp_root_gpu_config->wait_on_operation_queues();
TF_RETURN_IF_ERROR(instr->set_backend_config(instr_gpu_config));
return true;
}
absl::StatusOr<bool> AnnotateStreamAttributesForCopyStart(
HloInstruction* instr, int64_t channel_id,
GpuBackendConfig& instr_gpu_config) {
if (instr_gpu_config.operation_queue_id() !=
Thunk::kDefaultExecutionStreamId.value()) {
return false;
}
instr_gpu_config.set_operation_queue_id(channel_id);
TF_RETURN_IF_ERROR(instr->set_backend_config(instr_gpu_config));
VLOG(3) << "Add copy-start's backend config: " << channel_id;
return true;
}
absl::StatusOr<bool> WrapIntoFusionAndAnnotateStreamAttributes(
HloInstruction* instruction, int64_t channel_id,
GpuBackendConfig& instr_gpu_config) {
auto* computation = instruction->parent();
auto* module = computation->parent();
auto* fusion_instruction =
computation->AddInstruction(HloInstruction::CreateFusion(
instruction->shape(), ChooseFusionKind(*instruction, *instruction),
instruction));
const absl::string_view wrapped_opcode =
HloOpcodeString(instruction->opcode());
module->SetAndUniquifyInstrName(fusion_instruction,
absl::StrCat("wrapped_", wrapped_opcode));
module->SetAndUniquifyComputationName(
fusion_instruction->fused_instructions_computation(),
absl::StrCat("wrapped_", wrapped_opcode, "_computation"));
if (module->has_schedule()) {
module->schedule().replace_instruction(computation, instruction,
fusion_instruction);
}
TF_RETURN_IF_ERROR(fusion_instruction->CopyAllControlDepsFrom(instruction));
TF_RETURN_IF_ERROR(instruction->DropAllControlDeps());
TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(fusion_instruction));
TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction));
instr_gpu_config.set_operation_queue_id(channel_id);
TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(instr_gpu_config));
VLOG(3) << "Add async stream " << channel_id << " and wrapped instruction "
<< instruction->ToString();
VLOG(3) << " Fusion wrapper: " << fusion_instruction->ToString();
return true;
}
absl::StatusOr<bool> AnnotateStreamAttributesForUsers(
HloInstruction* instr, GpuBackendConfig& instr_gpu_config) {
bool changed = false;
int64_t stream_id = instr_gpu_config.operation_queue_id();
if (stream_id == Thunk::kDefaultExecutionStreamId.value()) {
return changed;
}
std::vector<HloInstruction*> all_consumers;
for (auto user : instr->users()) {
if (user->opcode() == HloOpcode::kGetTupleElement) {
user = user->users()[0];
}
all_consumers.push_back(user);
}
for (auto user : all_consumers) {
TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config,
user->backend_config<GpuBackendConfig>());
auto it = absl::c_find(gpu_config.wait_on_operation_queues(), stream_id);
if (it == gpu_config.wait_on_operation_queues().end() &&
gpu_config.operation_queue_id() != stream_id) {
gpu_config.mutable_wait_on_operation_queues()->Add(stream_id);
TF_RETURN_IF_ERROR(user->set_backend_config(gpu_config));
changed = true;
}
}
return changed;
}
}
absl::StatusOr<bool> StreamAttributeAnnotator::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(
5, "StreamAttributeAnnotator::Run(), before:\n" + module->ToString());
bool changed = false;
int64_t channel_id = hlo_query::NextChannelId(*module);
for (const HloComputation* comp :
module->MakeComputationPostOrder(execution_threads)) {
for (HloInstruction* instr : comp->MakeInstructionPostOrder()) {
auto instr_gpu_config = instr->backend_config<GpuBackendConfig>();
if (!instr_gpu_config.ok()) {
continue;
}
if (instr->opcode() == HloOpcode::kFusion) {
TF_ASSIGN_OR_RETURN(bool comp_result,
AnnotateStreamAttributesForInstruction(
instr, instr_gpu_config.value()));
changed |= comp_result;
} else if (instr->opcode() == HloOpcode::kCopyStart) {
TF_ASSIGN_OR_RETURN(bool comp_result,
AnnotateStreamAttributesForCopyStart(
instr, channel_id, instr_gpu_config.value()));
changed |= comp_result;
continue;
} else if (comp->IsAsyncComputation() &&
(instr->opcode() == HloOpcode::kDynamicSlice ||
instr->opcode() == HloOpcode::kDynamicUpdateSlice)) {
TF_ASSIGN_OR_RETURN(bool comp_result,
WrapIntoFusionAndAnnotateStreamAttributes(
instr, channel_id, instr_gpu_config.value()));
changed |= comp_result;
continue;
}
TF_ASSIGN_OR_RETURN(
bool user_result,
AnnotateStreamAttributesForUsers(instr, instr_gpu_config.value()));
changed |= user_result;
}
}
XLA_VLOG_LINES(
5, "StreamAttributeAnnotator::Run(), after:\n" + module->ToString());
return changed;
}
} | #include "xla/service/gpu/stream_attribute_annotator.h"
#include <cstdint>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
using StreamAttributeAnnotatorTest = HloTestBase;
TEST_F(StreamAttributeAnnotatorTest, AllUsersAreAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
ENTRY entry {
p1_32 = f32[1] parameter(0)
p2_32 = f32[1] parameter(1)
add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[]}
exp_32 = f32[1] exponential(add_32)
neg32 = f32[1] negate(add_32)
ROOT add_out_32 = f32[1] add(neg32, exp_32)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* add = FindInstruction(module.get(), "add_32");
for (auto user : add->users()) {
EXPECT_TRUE(user->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
user->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.wait_on_operation_queues()[0], 1);
}
}
TEST_F(StreamAttributeAnnotatorTest, MultipleStreamsAreCombined) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
ENTRY entry {
p1_32 = f32[1] parameter(0)
p2_32 = f32[1] parameter(1)
add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[]}
exp_32 = f32[1] exponential(p2_32), backend_config={"operation_queue_id":"2", "wait_on_operation_queues":[]}
ROOT add_out_32 = f32[1] add(add_32, exp_32)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_TRUE(root->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
root->backend_config<GpuBackendConfig>());
std::vector<int64_t> expected_stream_ids = {1, 2};
for (auto id : expected_stream_ids) {
auto it = absl::c_find(gpu_config.wait_on_operation_queues(), id);
EXPECT_NE(it, gpu_config.wait_on_operation_queues().end());
}
}
TEST_F(StreamAttributeAnnotatorTest, GTEUserIsAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsync
ENTRY entry {
p1_32 = f32[16,32] parameter(0)
p2_32 = f32[32,16] parameter(1)
custom-call.3 = (f32[16,16], s8[1028]{0}) custom-call(p1_32, p2_32), custom_call_target="__cublas$gemm", backend_config={"operation_queue_id":"1","wait_on_operation_queues":[],"gemm_backend_config":{"alpha_real":1,"alpha_imag":0,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":["1"],"rhs_contracting_dimensions":["0"],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT","grad_x":false,"grad_y":false}}
get-tuple-element.24 = f32[16,16] get-tuple-element(custom-call.3), index=0
exp_32 = f32[16,16] exponential(get-tuple-element.24)
ROOT neg32 = f32[16,16] negate(exp_32)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* exp = FindInstruction(module.get(), "exp_32");
EXPECT_TRUE(exp->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
exp->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.wait_on_operation_queues()[0], 1);
}
TEST_F(StreamAttributeAnnotatorTest, FusionIsAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithFusion
fused_computation.1 {
fusion_p0_32 = f32[16,16] parameter(0)
fusion_p2_32 = f32[16,16] parameter(1)
ROOT add = f32[16,16] add(fusion_p0_32, fusion_p2_32), backend_config={"operation_queue_id":"1","wait_on_operation_queues":[]}
}
ENTRY entry {
p1_32 = f32[16,16] parameter(0)
p2_32 = f32[16,16] parameter(1)
ROOT fusion.1 = f32[16,16] fusion(p1_32, p2_32), kind=kLoop, calls=fused_computation.1
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* fusion = FindInstruction(module.get(), "fusion.1");
EXPECT_TRUE(fusion->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
fusion->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
}
TEST_F(StreamAttributeAnnotatorTest, CopyStartIsAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule offloading
ENTRY %main (param_0: f32[1024], param_1: f32[1024]) -> f32[1024] {
%param_1 = f32[1024]{0} parameter(1)
%param_0 = f32[1024]{0} parameter(0)
%res_3 = f32[1024]{0} add(f32[1024]{0} %param_0, f32[1024]{0} %param_1)
%copy-start = (f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) copy-start(f32[1024]{0} %res_3)
%res_4 = f32[1024]{0} tanh(f32[1024]{0} %res_3)
%copy-start.2 = (f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) copy-start(f32[1024]{0} %res_4)
%res_5 = f32[1024]{0} tanh(f32[1024]{0} %res_4)
%copy-done = f32[1024]{0:S(5)} copy-done((f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) %copy-start)
%res_6 = f32[1024]{0} tanh(f32[1024]{0} %res_5)
%copy-done.2 = f32[1024]{0:S(5)} copy-done((f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) %copy-start.2)
%copy-start.3 = (f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) copy-start(f32[1024]{0:S(5)} %copy-done.2)
%res_7 = f32[1024]{0} add(f32[1024]{0} %res_6, f32[1024]{0} %res_6)
%copy-start.1 = (f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) copy-start(f32[1024]{0:S(5)} %copy-done)
%res_8 = f32[1024]{0} add(f32[1024]{0} %res_7, f32[1024]{0} %res_5)
%copy-done.3 = f32[1024]{0} copy-done((f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) %copy-start.3)
%res_9 = f32[1024]{0} add(f32[1024]{0} %res_8, f32[1024]{0} %copy-done.3)
%copy-done.1 = f32[1024]{0} copy-done((f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) %copy-start.1)
%res_10 = f32[1024]{0} add(f32[1024]{0} %res_9, f32[1024]{0} %copy-done.1)
ROOT %res_11 = f32[1024]{0} tanh(f32[1024]{0} %res_10)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
StreamAttributeAnnotator attr_annotator;
bool changed;
TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get()));
EXPECT_TRUE(changed);
for (std::string i : {"", ".1", ".2", ".3"}) {
const HloInstruction* cp_start =
FindInstruction(module.get(), "copy-start" + i);
EXPECT_TRUE(cp_start->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
cp_start->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
}
}
TEST_F(StreamAttributeAnnotatorTest, DynamicUpdateSliceWrappedAndAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsyncDynamicUpdateSlice
ENTRY entry (param_0: f32[256,128,128], param_1: f32[1,128,128]) -> f32[256,128,128] {
param_0 = f32[256,128,128]{2,1,0:S(5)} parameter(0)
param_1 = f32[1,128,128]{2,1,0} parameter(1)
izero = s32[] constant(0)
dynamic-update-slice-start.2 = ((f32[256,128,128]{2,1,0:S(5)}, f32[1,128,128]{2,1,0}, s32[], s32[], s32[]), f32[256,128,128]{2,1,0:S(5)}, u32[])
dynamic-update-slice-start(param_0, param_1, izero, izero, izero)
ROOT dynamic-update-slice-done.2 = f32[256,128,128]{2,1,0:S(5)}
dynamic-update-slice-done(dynamic-update-slice-start.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
StreamAttributeAnnotator().Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* dus =
FindInstruction(module.get(), HloOpcode::kDynamicUpdateSlice);
const HloComputation* computation = dus->parent();
EXPECT_TRUE(computation->IsFusionComputation());
const HloInstruction* fusion = computation->FusionInstruction();
EXPECT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(fusion->parent()->IsAsyncComputation());
EXPECT_TRUE(fusion->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
fusion->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
}
TEST_F(StreamAttributeAnnotatorTest, DynamicSliceWrappedAndAnnotated) {
constexpr absl::string_view kHloString = R"(
HloModule ModuleWithAsyncDynamicSlice
ENTRY entry (param_0: f32[256,128,128]) -> f32[1,128,128] {
param_0 = f32[256,128,128]{2,1,0:S(5)} parameter(0)
izero = s32[] constant(0)
dynamic-slice-start.2 = ((f32[256,128,128]{2,1,0:S(5)}, s32[], s32[], s32[]), f32[1,128,128]{2,1,0}, u32[])
dynamic-slice-start(param_0, izero, izero, izero), dynamic_slice_sizes={1,128,128}
ROOT dynamic-slice-done.2 = f32[1,128,128]{2,1,0}
dynamic-slice-done(dynamic-slice-start.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed,
StreamAttributeAnnotator().Run(module.get()));
EXPECT_TRUE(changed);
const HloInstruction* ds =
FindInstruction(module.get(), HloOpcode::kDynamicSlice);
const HloComputation* computation = ds->parent();
EXPECT_TRUE(computation->IsFusionComputation());
const HloInstruction* fusion = computation->FusionInstruction();
EXPECT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(fusion->parent()->IsAsyncComputation());
EXPECT_TRUE(fusion->has_backend_config());
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
fusion->backend_config<GpuBackendConfig>());
EXPECT_EQ(gpu_config.operation_queue_id(), 1);
}
}
} | 2,109 |
#ifndef XLA_SERVICE_GPU_DYNAMIC_SLICE_FUSION_REWRITER_H_
#define XLA_SERVICE_GPU_DYNAMIC_SLICE_FUSION_REWRITER_H_
#include <string>
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class DynamicSliceFusionRewriter : public HloModulePass {
public:
absl::string_view name() const override {
return "address-computation-fusion-rewriter";
}
explicit DynamicSliceFusionRewriter(std::string platform_name)
: platform_name_(std::move(platform_name)) {}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
std::string platform_name_;
};
}
}
#endif
#include "xla/service/gpu/dynamic_slice_fusion_rewriter.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/ffi/api/c_api.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/gpu_constants.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using DefUseDataflowPath = absl::InlinedVector<HloInstruction*, 2>;
using DefUseDataflowPaths = absl::InlinedVector<DefUseDataflowPath, 4>;
using UseDefDataflowPath = absl::InlinedVector<HloInstruction*, 4>;
using UseDefDataflowPaths = absl::InlinedVector<HloInstruction*, 8>;
using DataflowPathView = absl::Span<HloInstruction* const>;
using DataflowPathsView = absl::Span<DataflowPathView>;
using InstructionSet = absl::flat_hash_set<HloInstruction*>;
bool IsNoOp(const HloInstruction* hlo) {
return HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kTuple,
HloOpcode::kGetTupleElement>(hlo);
}
bool IsCustomCall(const HloInstruction* hlo, absl::string_view platform_name) {
auto* custom_call = DynCast<HloCustomCallInstruction>(hlo);
if (custom_call == nullptr) return false;
if (custom_call->shape().IsTuple() &&
absl::c_any_of(
custom_call->shape().tuple_shapes(),
[&](const Shape& sub_shape) { return sub_shape.IsToken(); }))
return false;
const std::string call_target_name = custom_call->custom_call_target();
bool is_ffi_custom_call =
custom_call->api_version() == CustomCallApiVersion::API_VERSION_TYPED_FFI;
void* call_target = CustomCallTargetRegistry::Global()->Lookup(
call_target_name, std::string(platform_name));
absl::StatusOr<ffi::HandlerRegistration> handler_registration =
ffi::FindHandler(call_target_name, platform_name);
bool found_custom_call = !is_ffi_custom_call && call_target != nullptr;
bool found_ffi_handler = is_ffi_custom_call && handler_registration.ok();
return found_custom_call || found_ffi_handler;
}
bool IsAlignedSlice(const Shape& src_shape, const Shape& dst_shape,
const HloSliceInstruction* slice) {
if (!IsContiguousSlice(src_shape, dst_shape)) return false;
auto strides = ShapeUtil::ByteStrides(dst_shape);
if (!strides.has_value()) return false;
for (auto dim : dst_shape.layout().minor_to_major()) {
if ((strides.value()[dim] % kXlaAllocatedBufferAlignBytes) == 0)
return true;
if (dst_shape.dimensions(dim) < src_shape.dimensions(dim)) {
return (slice != nullptr &&
((strides.value()[dim] * slice->slice_starts(dim)) %
kXlaAllocatedBufferAlignBytes ==
0));
}
}
return true;
}
UseDefDataflowPaths GetSlicedOperandPaths(const HloInstruction* instr) {
UseDefDataflowPaths sliced_operand_paths;
InstructionSet processed_instrs;
const auto& aliasing_pairs =
Cast<HloCustomCallInstruction>(instr)->output_to_operand_aliasing();
absl::flat_hash_set<int64_t> aliased_operands;
for (const auto& pair : aliasing_pairs) {
aliased_operands.insert(pair.second.first);
}
for (const auto* operand : instr->operands()) {
if (aliased_operands.contains(instr->operand_index(operand))) continue;
UseDefDataflowPath maybe_sliced_operand_path;
bool slice_found = false;
auto maybe_slice_instr =
HloFindIf({operand}, [&](const HloInstruction* cur) {
if (processed_instrs.contains(cur)) return true;
maybe_sliced_operand_path.push_back(const_cast<HloInstruction*>(cur));
if (IsOpcodeAnyOf<HloOpcode::kDynamicSlice, HloOpcode::kSlice>(cur)) {
if (IsAlignedSlice(cur->operand(0)->shape(), cur->shape(),
DynCast<HloSliceInstruction>(cur))) {
slice_found = true;
return slice_found;
}
}
return !IsNoOp(cur);
});
if (maybe_slice_instr == std::nullopt) continue;
if (slice_found || processed_instrs.contains(maybe_slice_instr.value())) {
sliced_operand_paths.insert(sliced_operand_paths.end(),
maybe_sliced_operand_path.rbegin(),
maybe_sliced_operand_path.rend());
processed_instrs.insert(maybe_sliced_operand_path.begin(),
maybe_sliced_operand_path.end());
}
}
sliced_operand_paths.push_back(const_cast<HloInstruction*>(instr));
return sliced_operand_paths;
}
DefUseDataflowPaths GetSlicedUserPaths(const HloInstruction* instr) {
DefUseDataflowPaths sliced_user_paths;
InstructionSet processed_instrs;
auto traverse_hlo_and_collect = [&](HloInstruction* start) {
DefUseDataflowPath maybe_sliced_user_path;
bool dus_found = false;
auto maybe_dus_instr = HloFindIf(
{start},
[&](const HloInstruction* cur) {
if (processed_instrs.contains(cur)) return true;
maybe_sliced_user_path.push_back(const_cast<HloInstruction*>(cur));
if (const auto slice_instr =
DynCast<HloDynamicUpdateSliceInstruction>(cur)) {
if (IsAlignedSlice(slice_instr->shape(),
slice_instr->update()->shape(), nullptr)) {
dus_found = true;
return true;
}
}
return cur->user_count() > 1 || !IsNoOp(cur);
},
false);
if (maybe_dus_instr == std::nullopt) return;
if (dus_found || processed_instrs.contains(maybe_dus_instr.value())) {
processed_instrs.insert(maybe_sliced_user_path.begin(),
maybe_sliced_user_path.end());
sliced_user_paths.push_back(std::move(maybe_sliced_user_path));
}
};
if (instr->shape().IsTuple()) {
for (auto* user : instr->users()) {
if (DynCast<HloGetTupleElementInstruction>(user)) {
traverse_hlo_and_collect(user);
}
}
} else {
if (instr->user_count() == 1) {
traverse_hlo_and_collect(instr->users().front());
}
}
return sliced_user_paths;
}
absl::InlinedVector<HloInstruction*, 4> GetPatternCaptures(
DataflowPathView matches) {
absl::InlinedVector<HloInstruction*, 4> captures;
InstructionSet matched_instrs(matches.begin(), matches.end());
for (HloInstruction* instr : matches) {
for (HloInstruction* operand : instr->operands()) {
if (!matched_instrs.contains(operand) &&
absl::c_find(captures, operand) == captures.end()) {
captures.emplace_back(operand);
}
}
}
return captures;
}
absl::Status CreateRootTuple(
HloInstruction* hero, HloComputation::Builder& builder,
DataflowPathsView sliced_user_paths,
absl::flat_hash_map<const HloInstruction*, HloInstruction*>&
instr_mapping) {
unsigned tuple_size = hero->shape().tuple_shapes_size();
std::vector<HloInstruction*> sliced_elems(tuple_size, nullptr);
for (auto& sliced_user_path : sliced_user_paths) {
auto gte = Cast<HloGetTupleElementInstruction>(sliced_user_path.front());
sliced_elems[gte->tuple_index()] = sliced_user_path.back();
}
std::vector<HloInstruction*> elements;
for (size_t i = 0; i < tuple_size; ++i) {
if (sliced_elems[i] != nullptr) {
elements.push_back(instr_mapping[sliced_elems[i]]);
continue;
}
auto* gte = builder.AddInstruction(
HloInstruction::CreateGetTupleElement(instr_mapping[hero], i));
if (hero->shape().tuple_shapes(i).IsTuple()) {
instr_mapping[gte] = gte;
TF_RETURN_IF_ERROR(CreateRootTuple(gte, builder, {}, instr_mapping));
elements.push_back(builder.last_added_instruction());
} else {
elements.push_back(gte);
}
}
if (elements.size() > 1)
builder.AddInstruction(HloInstruction::CreateTuple(elements));
return absl::OkStatus();
}
absl::StatusOr<HloComputation*> CreateFusionBody(
HloModule* module, DataflowPathView sliced_operand_paths,
DataflowPathsView sliced_user_paths, DataflowPathView captures) {
HloComputation::Builder builder("address-computation");
absl::flat_hash_map<const HloInstruction*, HloInstruction*> instr_mapping;
auto mapped_operands = [&](HloInstruction* instr) {
absl::InlinedVector<HloInstruction*, 4> operands;
for (HloInstruction* operand : instr->operands()) {
operands.push_back(instr_mapping.at(operand));
}
return operands;
};
for (const HloInstruction* capture : captures) {
int64_t index = instr_mapping.size();
instr_mapping[capture] =
builder.AddInstruction(HloInstruction::CreateParameter(
index, capture->shape(), absl::StrCat("p", index)));
}
HloInstruction* hero;
for (HloInstruction* instr : sliced_operand_paths) {
instr_mapping[instr] = builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr)));
hero = instr;
}
for (auto& sliced_user_path : sliced_user_paths) {
for (HloInstruction* instr : sliced_user_path) {
instr_mapping[instr] = builder.AddInstruction(
instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr)));
}
}
if (hero->shape().IsTuple() && hero->shape().tuple_shapes_size() > 0) {
TF_RETURN_IF_ERROR(
CreateRootTuple(hero, builder, sliced_user_paths, instr_mapping));
}
return module->AddComputationAndUnifyNamesAndIds(builder.Build(), false);
}
absl::StatusOr<HloInstruction*> CreateFusionInstruction(
HloModule* module, HloInstruction* orig, DataflowPathView captures,
HloComputation* body, bool dynamic) {
HloComputation* parent = orig->parent();
HloInstruction* fusion = parent->AddInstruction(HloInstruction::CreateFusion(
body->root_instruction()->shape(), HloInstruction::FusionKind::kCustom,
captures, body));
module->SetAndUniquifyInstrName(fusion, "address_computation");
GpuBackendConfig gpu_config;
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind("__custom_fusion");
CustomFusionConfig config;
config.set_name(dynamic ? "dynamic_address_computation"
: "address_computation");
*backend_config.mutable_custom_fusion_config() = config;
TF_RETURN_IF_ERROR(fusion->set_backend_config(std::move(gpu_config)));
return fusion;
}
}
absl::StatusOr<bool> DynamicSliceFusionRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
absl::flat_hash_map<HloInstruction*,
std::pair<UseDefDataflowPaths, DefUseDataflowPaths>>
matches;
for (HloComputation* computation : module->computations()) {
if (computation->IsFusionComputation()) continue;
for (HloInstruction* instr : computation->instructions()) {
if (IsLegacyCublasMatmul(*instr) ||
(IsCustomCall(instr, platform_name_))) {
UseDefDataflowPaths sliced_operand_paths = GetSlicedOperandPaths(instr);
bool has_sliced_operand_paths = sliced_operand_paths.size() > 1;
DefUseDataflowPaths sliced_user_paths = GetSlicedUserPaths(instr);
bool has_sliced_user_paths = absl::c_any_of(
sliced_user_paths,
[&](auto& sliced_user_path) { return !sliced_user_path.empty(); });
if (absl::c_any_of(sliced_user_paths, [&](auto& sliced_user_path) {
return DynCast<HloDynamicUpdateSliceInstruction>(
sliced_user_path.back()) == nullptr;
})) {
return absl::InternalError(
"Expect sliced user path to end with a DUS.");
}
if (has_sliced_operand_paths || has_sliced_user_paths) {
matches[instr] = std::make_pair(std::move(sliced_operand_paths),
std::move(sliced_user_paths));
}
}
}
}
if (matches.empty()) return false;
for (auto& [hero, paths] : matches) {
auto& [sliced_operand_paths, sliced_user_paths] = paths;
std::vector<HloInstruction*> matched_instrs;
absl::c_copy(sliced_operand_paths, std::back_inserter(matched_instrs));
std::vector<DataflowPathView> sliced_user_paths_view;
for (auto& sliced_user_path : sliced_user_paths) {
absl::c_copy(sliced_user_path, std::back_inserter(matched_instrs));
DataflowPathView sliced_user_path_view{&sliced_user_path.front(),
sliced_user_path.size()};
sliced_user_paths_view.push_back(std::move(sliced_user_path_view));
}
auto captures = GetPatternCaptures(matched_instrs);
TF_ASSIGN_OR_RETURN(
HloComputation * fusion_body,
CreateFusionBody(module, sliced_operand_paths,
DataflowPathsView(sliced_user_paths_view), captures));
bool has_dynamic_slices = absl::c_any_of(matched_instrs, [&](auto* instr) {
return DynCast<HloDynamicIndexInstruction>(instr) != nullptr;
});
TF_ASSIGN_OR_RETURN(
HloInstruction * fusion,
CreateFusionInstruction(module, hero, captures, fusion_body,
has_dynamic_slices));
HloComputation* parent = hero->parent();
if (fusion->shape().IsTuple()) {
TF_RETURN_IF_ERROR(parent->ReplaceInstructionWithDifferentShape(
const_cast<HloInstruction*>(hero), fusion));
for (auto& sliced_user_path : sliced_user_paths) {
auto old_gte =
Cast<HloGetTupleElementInstruction>(sliced_user_path.front());
HloInstruction* gte =
parent->AddInstruction(HloInstruction::CreateGetTupleElement(
fusion, old_gte->tuple_index()));
TF_RETURN_IF_ERROR(
parent->ReplaceInstruction(sliced_user_path.back(), gte));
}
} else {
auto* instr_to_be_replaced = const_cast<HloInstruction*>(hero);
if (sliced_user_paths.empty()) {
if (hero->shape().IsTuple()) {
if (hero->user_count() != 1 ||
!DynCast<HloGetTupleElementInstruction>(hero->users().front())) {
return absl::InternalError(
"Expect a single get-tuple-element user of the original "
"tuple-shaped hero op when address computation fusion does "
"not return a tuple");
}
instr_to_be_replaced = hero->users().front();
}
} else {
instr_to_be_replaced = sliced_user_paths.front().back();
}
TF_RETURN_IF_ERROR(
parent->ReplaceInstruction(instr_to_be_replaced, fusion));
}
}
return true;
}
}
} | #include "xla/service/gpu/dynamic_slice_fusion_rewriter.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <optional>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "xla/client/lib/constants.h"
#include "xla/client/xla_builder.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/buffer_value.h"
#include "xla/service/custom_call_target_registry.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/stream.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#define PLATFORM "GPU"
namespace xla::gpu {
class DynamicSliceFusionRewriterTest : public HloTestBase {};
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemm) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion
; CHECK: kind=kCustom, calls=%address-computation,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation"}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWithWorkspace) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0
; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0})
; CHECK: tuple([[DOT]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion
; CHECK: kind=kCustom, calls=%address-computation,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation"}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWorkspaceIgnored) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(%custom-call.1), index=0
}
)";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0
; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1
; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0})
; CHECK: tuple([[DOT]], [[WORKSPACE]])
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion
; CHECK: kind=kCustom, calls=%address-computation,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation"}
; CHECK: }
; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[FUSION]]), index=0
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotRoot) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1)
}
)";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion
; CHECK: kind=kCustom, calls=%address-computation,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation"}
; CHECK: }
; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandHasMultipleUsers) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[4,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[2:3], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %bitcast.41)
}
)";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[2:3], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[P0]], [[P1]])
; CHECK-DAG: kind=kCustom, calls=%address-computation,
; CHECK-DAG: backend_config={
; CHECK-DAG: "kind":"__custom_fusion",
; CHECK-DAG: "custom_fusion_config":{"name":"address_computation"}
; CHECK-DAG: }
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[B0]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandsHaveMultipleUsers) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.0 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %address-computation{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: %address-computation{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmSlicingNotParameter) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[4,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.12 = f16[2,8,8]{2,1,0} slice(%p0), slice={[0:2], [0:8], [0:8]}
%slice.13 = f16[1,8,8]{2,1,0} slice(%slice.12), slice={[1:2], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
%custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1)
}
)";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[2,8,8]{2,1,0} slice([[P0]]), slice={[0:2], [0:8], [0:8]}
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[S0]], [[P1]])
; CHECK: kind=kCustom, calls=%address-computation,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation"}
; CHECK: }
; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]])
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotContiguousSlice) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,4,6]{2,1,0} slice(%p0), slice={[1:2], [0:4], [0:6]}
%bitcast.41 = f16[4,6]{1,0} bitcast(%slice.13)
%slice.14 = f16[1,6,4]{2,1,0} slice(%p1), slice={[1:2], [0:6], [0:4]}
%bitcast.42 = f16[6,4]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = f16[4,4]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
std::nullopt);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNonNoOpInSliceChain) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]}
%slice.14 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]}
%add.0 = f16[1,8,8]{2,1,0} add(%slice.13, %slice.14)
%bitcast.41 = f16[8,8]{1,0} bitcast(%add.0)
%slice.15 = f16[1,8,8]{2,1,0} slice(%p1), slice={[0:1], [0:8], [0:8]}
%slice.16 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%add.1 = f16[1,8,8]{2,1,0} add(%slice.15, %slice.16)
%bitcast.42 = f16[8,8]{1,0} bitcast(%add.1)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
std::nullopt);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmDuplicateOperand) {
const char* hlo = R"(
HloModule test
ENTRY %main {
%p0 = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0)
%get-tuple-element.240 = f32[100,100]{1,0} get-tuple-element(%p0), index=0
%get-tuple-element.241 = f32[100,100]{1,0} get-tuple-element(%p0), index=1
%concatenate.10 = f32[200,100]{1,0} concatenate(%get-tuple-element.240, %get-tuple-element.241), dimensions={0}
%custom-call.16 = (f32[200,100]{1,0}, s8[120000]{0}) custom-call(%concatenate.10, %get-tuple-element.240),
custom_call_target="__cublas$gemm",
backend_config={
"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["HIGHEST","HIGHEST"]},
"epilogue":"DEFAULT",
"lhs_stride":"20000",
"rhs_stride":"10000",
"grad_x":false,
"grad_y":false
}
}
%get-tuple-element.97 = f32[200,100]{1,0} get-tuple-element(%custom-call.16), index=0
%slice.26 = f32[100,100]{1,0} slice(%get-tuple-element.97), slice={[0:100], [0:100]}
ROOT %custom-call.17 = (f32[100,100]{1,0}, s8[80000]{0}) custom-call(%slice.26, %slice.26),
custom_call_target="__cublas$gemm",
backend_config={
"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["HIGHEST","HIGHEST"]},
"epilogue":"DEFAULT",
"lhs_stride":"10000",
"rhs_stride":"10000",
"grad_x":false,
"grad_y":false
}
}
})";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK: [[P0:%[^ ]+]] = f32[200,100]{1,0} parameter(0)
; CHECK: [[S0:%[^ ]+]] = f32[100,100]{1,0} slice([[P0]]), slice={[0:100], [0:100]}
; CHECK-NOT: slice
; CHECK: [[CC:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) custom-call([[S0]], [[S0]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) fusion
; CHECK: kind=kCustom, calls=%address-computation,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation"}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(1)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%p1 = f16[2,8,8]{2,1,0} parameter(0)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[0:1], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]])
; CHECK: kind=kCustom, calls=%address-computation,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation"}
; CHECK: }
; CHECK: }
)";
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter(PLATFORM),
expected);
}
TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder2) {
const char* hlo = R"(
HloModule test
ENTRY %main.9 {
%p0 = f16[2,8,8]{2,1,0} parameter(0)
%slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]}
%bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13)
%p1 = f16[2,8,8]{2,1,0} parameter(1)
%slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]}
%bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14)
ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{
"alpha_real":1,
"beta":0,
"dot_dimension_numbers":{
"lhs_contracting_dimensions":["1"],
"rhs_contracting_dimensions":["0"],
"lhs_batch_dimensions":[],
"rhs_batch_dimensions":[]
},
"alpha_imag":0,
"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},
"epilogue":"DEFAULT",
"lhs_stride":"64",
"rhs_stride":"64",
"grad_x":false,
"grad_y":false
}}
}
)";
const char* expected = R"(
; CHECK: %address-computation {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]}
; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]])
; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[0:1], [0:8], [0:8]}
; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]])
; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]),
; CHECK: custom_call_target="__cublas$gemm"
; CHECK: }
; CHECK: ENTRY %main{{.*}} {
; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1)
; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0)
; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]])
; CHECK: kind=kCustom, calls=%address-computation,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"address_computation"}
; CHECK: }
; CHECK: } | 2,110 |
#ifndef XLA_SERVICE_GPU_DOT_SPARSITY_REWRITER_H_
#define XLA_SERVICE_GPU_DOT_SPARSITY_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class DotSparsityRewriter : public HloModulePass {
public:
absl::string_view name() const override { return "dot_sparsity_rewriter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/dot_sparsity_rewriter.h"
#include <utility>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_creation_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class SparseDotRewriterImpl : public DfsHloRewriteVisitor {
public:
absl::Status HandleDot(HloInstruction* instr) override {
HloDotInstruction* dot = Cast<HloDotInstruction>(instr);
if (dot->sparse_operands() != 1 || dot->sparsity().front().index() != 1) {
return absl::OkStatus();
}
HloInstruction* lhs = dot->mutable_operand(0);
HloInstruction* rhs = dot->mutable_operand(1);
HloInstruction* meta = dot->mutable_operand(2);
DotDimensionNumbers dnums = dot->dot_dimension_numbers();
std::swap(*dnums.mutable_lhs_batch_dimensions(),
*dnums.mutable_rhs_batch_dimensions());
std::swap(*dnums.mutable_lhs_contracting_dimensions(),
*dnums.mutable_rhs_contracting_dimensions());
PrecisionConfig precision_config = dot->precision_config();
std::swap(precision_config.mutable_operand_precision()->at(0),
precision_config.mutable_operand_precision()->at(1));
SparsityDescriptor sparsity = dot->sparsity().front();
sparsity.set_index(0);
TF_ASSIGN_OR_RETURN(
HloInstruction * new_dot,
MakeDotHlo(rhs, lhs, dnums, precision_config,
dot->shape().element_type(), {std::move(sparsity)}, {meta}));
dot->SetupDerivedInstruction(new_dot);
int batch_dims = dnums.lhs_batch_dimensions().size();
int new_lhs_noncontracting = rhs->shape().rank() - batch_dims -
dnums.lhs_contracting_dimensions().size();
int new_rhs_noncontracting = lhs->shape().rank() - batch_dims -
dnums.rhs_contracting_dimensions().size();
int rank = dot->shape().rank();
DimensionVector dimensions(rank);
for (int i = 0; i < batch_dims; ++i) {
dimensions[i] = i;
}
for (int i = 0; i < new_lhs_noncontracting; ++i) {
dimensions[i + batch_dims] = i + batch_dims + new_rhs_noncontracting;
}
for (int i = 0; i < new_rhs_noncontracting; ++i) {
dimensions[i + batch_dims + new_lhs_noncontracting] = i + batch_dims;
}
TF_ASSIGN_OR_RETURN(HloInstruction * transpose,
MakeTransposeHlo(new_dot, dimensions));
transpose->set_metadata(dot->metadata());
*transpose->mutable_shape()->mutable_layout() = dot->shape().layout();
return ReplaceInstruction(dot, transpose);
}
};
}
absl::StatusOr<bool> DotSparsityRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
return SparseDotRewriterImpl().RunOnModule(module, execution_threads);
}
}
} | #include "xla/service/gpu/dot_sparsity_rewriter.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
class DotSparsityRewriterTest : public HloTestBase {
public:
DotSparsityRewriterTest() : HloTestBase(true) {}
};
TEST_F(DotSparsityRewriterTest, SparseDotRhsToLhs) {
const char* module_string = R"(
HloModule m
ENTRY e {
lhs = f16[4,2,16,8,64] parameter(0)
rhs = f16[2,4,8,32,128] parameter(1)
meta = u16[2,4,8,4,128] parameter(2)
ROOT dot = f16[4,2,16,128] dot(lhs, rhs, meta),
lhs_contracting_dims={3,4}, rhs_contracting_dims={2,3},
lhs_batch_dims={0,1}, rhs_batch_dims={1,0}, sparsity=R.3@2:4
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_string));
TF_ASSERT_OK_AND_ASSIGN(bool modified,
DotSparsityRewriter().Run(module.get()));
EXPECT_TRUE(modified);
const HloTransposeInstruction* transpose = DynCast<HloTransposeInstruction>(
module->entry_computation()->root_instruction());
ASSERT_TRUE(transpose != nullptr);
EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 1, 3, 2));
const HloDotInstruction* dot =
DynCast<HloDotInstruction>(transpose->operand(0));
ASSERT_TRUE(dot != nullptr);
const DotDimensionNumbers& dnums = dot->dot_dimension_numbers();
EXPECT_EQ(dnums.lhs_contracting_dimensions(0), 2);
EXPECT_EQ(dnums.lhs_contracting_dimensions(1), 3);
EXPECT_EQ(dnums.rhs_contracting_dimensions(0), 3);
EXPECT_EQ(dnums.rhs_contracting_dimensions(1), 4);
EXPECT_EQ(dnums.lhs_batch_dimensions(0), 1);
EXPECT_EQ(dnums.lhs_batch_dimensions(1), 0);
EXPECT_EQ(dnums.rhs_batch_dimensions(0), 0);
EXPECT_EQ(dnums.rhs_batch_dimensions(1), 1);
EXPECT_EQ(dot->sparse_operands(), 1);
EXPECT_EQ(dot->sparsity().front().index(), 0);
}
}
}
} | 2,111 |
#ifndef XLA_SERVICE_GPU_GEMM_ALGORITHM_PICKER_H_
#define XLA_SERVICE_GPU_GEMM_ALGORITHM_PICKER_H_
#include <functional>
#include <optional>
#include <string_view>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/autotune_results.pb.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class GemmAlgorithmPicker : public HloModulePass {
public:
explicit GemmAlgorithmPicker(AutotuneConfig config) : config_(config) {}
absl::string_view name() const override { return "gemm-algorithm-picker"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
AutotuneConfig config_;
};
}
}
#endif
#include "xla/service/gpu/gemm_algorithm_picker.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/variant_visitor.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace gpu {
namespace {
using se::gpu::BlasLt;
absl::StatusOr<BlasLt::Epilogue> AsBlasLtEpilogue(
GemmBackendConfig_Epilogue epilogue) {
switch (epilogue) {
case GemmBackendConfig::DEFAULT:
return BlasLt::Epilogue::kDefault;
case GemmBackendConfig::RELU:
return BlasLt::Epilogue::kReLU;
case GemmBackendConfig::GELU:
return BlasLt::Epilogue::kGELU;
case GemmBackendConfig::GELU_AUX:
return BlasLt::Epilogue::kGELUWithAux;
case GemmBackendConfig::BIAS:
return BlasLt::Epilogue::kBias;
case GemmBackendConfig::BIAS_RELU:
return BlasLt::Epilogue::kBiasThenReLU;
case GemmBackendConfig::BIAS_GELU:
return BlasLt::Epilogue::kBiasThenGELU;
case GemmBackendConfig::BIAS_GELU_AUX:
return BlasLt::Epilogue::kBiasThenGELUWithAux;
default:
return Internal("Unsupported Epilogue.");
}
}
class GemmAutotuner {
const AutotuneConfig& autotune_config_;
RedzoneBuffers rz_buffers_;
se::Stream* stream_ = nullptr;
bool deterministic_ops_ = false;
size_t solutions_limit_ = 0;
public:
explicit GemmAutotuner(const AutotuneConfig& autotune_config)
: autotune_config_(autotune_config) {}
absl::StatusOr<AutotuneResult> operator()(const HloInstruction* gemm,
const AutotuneCacheKey& key) {
if (autotune_config_.IsDeviceless()) {
return AutotuneResult{};
}
VLOG(3) << "Starting autotune of GemmThunk " << gemm->ToString();
TF_ASSIGN_OR_RETURN(stream_, autotune_config_.GetStream());
const DebugOptions& debug_options =
gemm->GetModule()->config().debug_options();
deterministic_ops_ = debug_options.xla_gpu_deterministic_ops() ||
debug_options.xla_gpu_exclude_nondeterministic_ops();
solutions_limit_ = debug_options.xla_gpu_autotune_max_solutions();
TF_ASSIGN_OR_RETURN(auto gemm_config, GemmConfig::For(gemm));
absl::MutexLock gpu_lock(&GetGpuMutex(stream_->parent()));
TF_ASSIGN_OR_RETURN(rz_buffers_, RedzoneBuffers::FromInstruction(
*gemm, autotune_config_, debug_options,
RedzoneBuffers::kAllInputsAllOutputs));
return IsCublasLtMatmul(*gemm) || IsCublasLtMatmulF8(*gemm)
? TuneGpuBlasLt(gemm, gemm_config)
: TuneGpuBlas(gemm, gemm_config);
}
private:
se::DeviceMemoryBase LhsBuffer() { return rz_buffers_.input_buffers().at(0); }
se::DeviceMemoryBase RhsBuffer() { return rz_buffers_.input_buffers().at(1); }
se::DeviceMemoryBase OutputBuffer() {
return rz_buffers_.output_buffers().at(0);
}
const Shape& GetOutputShape(const HloInstruction* gemm) {
return gemm->shape().IsTuple() ? gemm->shape().tuple_shapes(0)
: gemm->shape();
}
absl::StatusOr<AutotuneResult> TuneGpuBlasLt(const HloInstruction* gemm,
const GemmConfig& gemm_config) {
auto workspace_buffer =
rz_buffers_.output_buffers().at(gemm->shape().tuple_shapes_size() - 1);
GpuBackendConfig gpu_config =
gemm->backend_config<GpuBackendConfig>().value();
const GemmBackendConfig& backend_config = gpu_config.gemm_backend_config();
bool has_matrix_bias = gemm_config.beta != 0.;
TF_ASSIGN_OR_RETURN(
bool has_vector_bias,
gpublas_lt::EpilogueAddsVectorBias(backend_config.epilogue()));
TF_ASSIGN_OR_RETURN(
bool has_aux_output,
gpublas_lt::EpilogueHasAuxiliaryOutput(backend_config.epilogue()));
TF_ASSIGN_OR_RETURN(auto epilogue,
AsBlasLtEpilogue(backend_config.epilogue()));
se::DeviceMemoryBase a_scale_buffer, b_scale_buffer, c_scale_buffer,
d_scale_buffer, d_amax_buffer, bias_buffer, aux_buffer;
if (has_vector_bias) {
bias_buffer = rz_buffers_.input_buffers().at(has_matrix_bias ? 3 : 2);
}
if (has_aux_output) {
aux_buffer = rz_buffers_.output_buffers().at(1);
}
TF_ASSIGN_OR_RETURN(auto plan,
BlasLt::GetMatmulPlan(stream_, gemm_config, epilogue));
TF_ASSIGN_OR_RETURN(
auto algorithms,
plan->GetAlgorithms( 128,
workspace_buffer.size()));
auto tuned_func = [&](const BlasLt::MatmulAlgorithm& algorithm)
-> absl::StatusOr<se::blas::ProfileResult> {
TF_RETURN_IF_ERROR(plan->ExecuteOnStream(
stream_, LhsBuffer(), RhsBuffer(), OutputBuffer(), OutputBuffer(),
bias_buffer, aux_buffer, a_scale_buffer, b_scale_buffer,
c_scale_buffer, d_scale_buffer, d_amax_buffer, algorithm,
workspace_buffer));
se::blas::ProfileResult profile_result;
profile_result.set_warmup_run_executed(true);
TF_RETURN_IF_ERROR(plan->ExecuteOnStream(
stream_, LhsBuffer(), RhsBuffer(), OutputBuffer(), OutputBuffer(),
bias_buffer, aux_buffer, a_scale_buffer, b_scale_buffer,
c_scale_buffer, d_scale_buffer, d_amax_buffer, algorithm,
workspace_buffer, &profile_result));
return std::move(profile_result);
};
return GetBestAlgorithm<BlasLt::MatmulAlgorithm>(
gemm, algorithms, gemm_config.beta, tuned_func);
}
absl::StatusOr<AutotuneResult> TuneGpuBlas(const HloInstruction* gemm,
const GemmConfig& gemm_config) {
auto workspace_buffer = rz_buffers_.output_buffers().at(1);
std::vector<se::blas::AlgorithmType> algorithms;
TF_ASSIGN_OR_RETURN(GemmConfig::DescriptorsTuple desc,
gemm_config.GetMatrixDescriptors(
LhsBuffer(), RhsBuffer(), OutputBuffer()));
auto blas = stream_->parent()->AsBlas();
if (blas == nullptr) {
return absl::InternalError("No BLAS support for stream");
}
blas->GetBlasGemmAlgorithms(stream_, desc.lhs, desc.rhs, &desc.output,
&gemm_config.alpha, &gemm_config.beta,
&algorithms);
AutotuneResult best_algorithm;
auto tuned_func = [&](const se::blas::AlgorithmType& algorithm)
-> absl::StatusOr<se::blas::ProfileResult> {
static_cast<void>(RunGemm(gemm_config, LhsBuffer(), RhsBuffer(),
OutputBuffer(), workspace_buffer,
deterministic_ops_, stream_, algorithm));
se::blas::ProfileResult profile_result;
profile_result.set_warmup_run_executed(true);
TF_RETURN_IF_ERROR(RunGemm(gemm_config, LhsBuffer(), RhsBuffer(),
OutputBuffer(), workspace_buffer,
deterministic_ops_, stream_, algorithm,
&profile_result));
return std::move(profile_result);
};
TF_ASSIGN_OR_RETURN(best_algorithm,
GetBestAlgorithm<se::blas::AlgorithmType>(
gemm, algorithms, gemm_config.beta, tuned_func));
if (best_algorithm.has_gemm()) {
int alg_idx = best_algorithm.gemm().algorithm();
best_algorithm.mutable_gemm()->set_algorithm(algorithms[alg_idx]);
}
return best_algorithm;
}
template <typename AlgoT, typename TunedFunc>
absl::StatusOr<AutotuneResult> GetBestAlgorithm(
const HloInstruction* gemm, absl::Span<const AlgoT> algorithms,
double beta, TunedFunc&& run_benchmark) {
static_assert(std::is_invocable_r_v<absl::StatusOr<se::blas::ProfileResult>,
TunedFunc, const AlgoT&>,
"Tuned function has incorrect prototype!");
if (!stream_->parent()->SynchronizeAllActivity()) {
return Internal("Failed to synchronize GPU for autotuning.");
}
tsl::profiler::ScopedAnnotation annotation([&] {
return absl::StrFormat("XlaAutotunerMeasurement:#hlo_op=%s#",
gemm->name());
});
auto& hlo_module_config = gemm->GetModule()->mutable_config();
const auto& output_shape = GetOutputShape(gemm);
se::DeviceMemoryBase reference_buffer;
if (autotune_config_.should_check_correctness()) {
TF_ASSIGN_OR_RETURN(reference_buffer,
rz_buffers_.RedzoneAllocator().AllocateBytes(
ShapeUtil::ByteSizeOf(output_shape)));
}
BufferComparator comparator(output_shape, hlo_module_config);
std::vector<AutotuneResult> results;
results.reserve(algorithms.size());
std::optional<int64_t> reference_algorithm;
auto num = algorithms.size();
if (solutions_limit_ > 0) num = std::min(num, solutions_limit_);
for (size_t i = 0; i < num; i++) {
const AlgoT& algorithm = algorithms[i];
if (autotune_config_.should_reinit_output_buffer() && beta != 0) {
int64_t rng_state = 0;
InitializeBuffer(stream_, output_shape.element_type(), &rng_state,
OutputBuffer());
}
TF_ASSIGN_OR_RETURN(auto profile_result, run_benchmark(algorithm));
AutotuneResult& result = results.emplace_back();
result.mutable_gemm()->set_algorithm(profile_result.algorithm());
if (!profile_result.is_valid()) {
result.mutable_failure()->set_kind(AutotuneResult::DISQUALIFIED);
continue;
}
VLOG(2) << "gemm algorithm " << profile_result.algorithm() << " took "
<< profile_result.elapsed_time_in_ms() << "ms";
*result.mutable_run_time() = tsl::proto_utils::ToDurationProto(
absl::Milliseconds(profile_result.elapsed_time_in_ms()));
if (!autotune_config_.should_check_correctness()) {
continue;
}
TF_ASSIGN_OR_RETURN(
se::RedzoneAllocator::RedzoneCheckStatus rz_check_status,
rz_buffers_.RedzoneAllocator().CheckRedzones());
if (!rz_check_status.ok()) {
result.mutable_failure()->set_kind(AutotuneResult::REDZONE_MODIFIED);
*result.mutable_failure()->mutable_msg() =
rz_check_status.RedzoneFailureMsg();
LOG(ERROR) << "Detected out-of-bounds write in gemm buffer";
CHECK(!autotune_config_.should_crash_on_check_failure());
continue;
}
if (!reference_algorithm) {
TF_RETURN_IF_ERROR(stream_->Memcpy(&reference_buffer, OutputBuffer(),
OutputBuffer().size()));
reference_algorithm = profile_result.algorithm();
} else {
TF_ASSIGN_OR_RETURN(
bool outputs_match,
comparator.CompareEqual(stream_, OutputBuffer(),
reference_buffer));
if (!outputs_match) {
LOG(ERROR) << "Results mismatch between different GEMM algorithms. "
<< "This is likely a bug/unexpected loss of precision.";
CHECK(!autotune_config_.should_crash_on_check_failure());
result.mutable_failure()->set_kind(AutotuneResult::WRONG_RESULT);
result.mutable_failure()->mutable_reference_gemm()->set_algorithm(
*reference_algorithm);
}
}
}
absl::StatusOr<AutotuneResult> best =
PickBestResult(results, gemm->ToString(), hlo_module_config);
if (best.ok()) {
for (size_t i = 0; i < results.size(); ++i) {
if (best->gemm().algorithm() == results[i].gemm().algorithm()) {
best->mutable_gemm()->set_algorithm(i);
return best;
}
}
return Internal("unknown best algorithm");
}
LOG(WARNING) << "Failed to find best cuBLAS algorithm, GEMM performance "
"might be suboptimal: "
<< best.status();
return AutotuneResult{};
}
};
absl::StatusOr<bool> RunOnInstruction(HloInstruction* gemm,
const AutotuneConfig& config) {
VLOG(3) << "Loading the autotune result of GemmThunk " << gemm->ToString();
GpuBackendConfig gpu_config =
gemm->backend_config<GpuBackendConfig>().value();
GemmBackendConfig& backend_config = *gpu_config.mutable_gemm_backend_config();
if (backend_config.alpha_real() == 0.0 &&
backend_config.alpha_imag() == 0.0 && backend_config.beta() == 0.0) {
VLOG(3) << "Skip degenerate gemm instruction auto tuning";
return false;
}
AutotuneCacheKey key(config.GetModelStr(), *gemm);
GemmAutotuner autotuner(config);
TF_ASSIGN_OR_RETURN(AutotuneResult algorithm,
AutotunerUtil::Autotune(
gemm, config, [&] { return autotuner(gemm, key); }));
auto old_algorithm = backend_config.selected_algorithm();
bool update_algorithm =
IsCublasLtMatmulF8(*gemm) ||
std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) {
return !cc.IsAtLeast(
se::CudaComputeCapability::AMPERE);
},
[](const se::RocmComputeCapability&) {
return true;
}},
config.GetGpuComputeCapability());
if (update_algorithm) {
int64_t new_algorithm{};
if (algorithm.has_gemm()) {
new_algorithm = algorithm.gemm().algorithm();
} else {
new_algorithm = se::blas::kDefaultAlgorithm;
}
if (new_algorithm == old_algorithm &&
backend_config.has_selected_algorithm()) {
return false;
}
backend_config.set_selected_algorithm(new_algorithm);
TF_RETURN_IF_ERROR(gemm->set_backend_config(gpu_config));
return true;
}
return false;
}
absl::StatusOr<bool> RunOnComputation(HloComputation* computation,
AutotuneConfig config) {
bool changed = false;
for (HloInstruction* instr : computation->instructions()) {
if (IsCublasGemm(*instr)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(instr, config));
changed |= result;
}
}
return changed;
}
}
absl::StatusOr<bool> GemmAlgorithmPicker::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_SCOPED_LOGGING_TIMER(
absl::StrCat("GemmAlgorithmPicker for ", module->name()));
if (module->config().debug_options().xla_gpu_autotune_level() == 0) {
VLOG(2) << "GEMM auto-tuning disabled, GemmAlgorithmPicker returning early";
return false;
}
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation, config_));
changed |= result;
}
return changed;
}
}
} | #include "xla/service/gpu/gemm_algorithm_picker.h"
#include <cstdint>
#include <variant>
#include <vector>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gemm_rewriter.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/platform.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/protobuf/dnn.pb.h"
namespace xla::gpu {
namespace {
namespace m = ::xla::match;
class GemmAlgorithmPickerTest : public HloTestBase,
public ::testing::WithParamInterface<bool> {
public:
GemmAlgorithmPickerTest() { AutotunerUtil::ClearAutotuneResults(); }
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_cublaslt(GetParam());
debug_options.set_xla_gpu_enable_triton_gemm(false);
return debug_options;
}
void SetUp() override {
const auto& gpu_cc = backend()
.default_stream_executor()
->GetDeviceDescription()
.gpu_compute_capability();
if (auto* procm = std::get_if<se::RocmComputeCapability>(&gpu_cc)) {
if (GetDebugOptionsForTest().xla_gpu_enable_cublaslt() &&
!procm->has_hipblaslt()) {
GTEST_SKIP() << "No gpublas-lt support on this architecture!";
}
}
}
};
TEST_P(GemmAlgorithmPickerTest, SetAlgorithm) {
auto comp = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (comp.IsAtLeast(se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Skipping this test for Ampere+ as it is supported and "
"recommended with "
"the Nvidia Volta+ GPUs.";
}
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[100,100]{1,0} parameter(0)
%arg1 = f32[100,100]{1,0} parameter(1)
ROOT %dot = f32[100,100]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
auto module_cfg = GetModuleConfigForTest();
TF_ASSERT_OK_AND_ASSIGN(auto m,
ParseAndReturnVerifiedModule(kHlo, module_cfg));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
stream_exec->GetDeviceDescription().gpu_compute_capability(),
12040),
m.get()));
changed = false;
DebugOptions opts;
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_algo_id = result.algorithm().algo_id();
int64_t new_algo_id = old_algo_id + 1;
result.mutable_gemm()->set_algorithm(new_algo_id);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo, module_cfg));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
stream_exec->GetDeviceDescription().gpu_compute_capability(),
12040),
m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
SCOPED_TRACE(m->ToString());
HloInstruction* dot;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&dot), 0)));
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
dot->backend_config<GpuBackendConfig>());
const GemmBackendConfig& config = gpu_config.gemm_backend_config();
EXPECT_EQ(config.selected_algorithm(), new_algo_id);
}
TEST_P(GemmAlgorithmPickerTest, GetAlgorithmWithoutDevice) {
auto comp = backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
if (comp.IsAtLeast(se::CudaComputeCapability::AMPERE)) {
GTEST_SKIP() << "Skipping this test for Ampere+ as it is supported and "
"recommended with "
"the Nvidia Volta+ GPUs.";
}
constexpr absl::string_view kHlo = R"(
HloModule module
ENTRY main {
%arg0 = f32[100,100]{1,0} parameter(0)
%arg1 = f32[100,100]{1,0} parameter(1)
ROOT %dot = f32[100,100]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
TF_ASSERT_OK_AND_ASSIGN(
auto m, ParseAndReturnVerifiedModule(kHlo, GetModuleConfigForTest()));
se::Platform* platform = PlatformUtil::GetDefaultPlatform().value();
TF_ASSERT_OK_AND_ASSIGN(std::vector<se::StreamExecutor*> executors,
PlatformUtil::GetStreamExecutors(platform));
ASSERT_GT(executors.size(), 0);
se::StreamExecutor* stream_exec = executors[0];
bool changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
stream_exec->GetDeviceDescription().gpu_compute_capability(),
12040),
m.get()));
changed = false;
DebugOptions opts;
AutotuneConfig cfg{DeviceConfig{stream_exec, nullptr}, opts};
TF_ASSERT_OK_AND_ASSIGN(changed,
RunHloPass(GemmAlgorithmPicker(cfg), m.get()));
ASSERT_TRUE(changed);
AutotuneResults results;
TF_ASSERT_OK(AutotunerUtil::SerializeAutotuneResults(&results));
ASSERT_EQ(results.results_size(), 1);
auto& result = *results.mutable_results(0)->mutable_result();
int64_t old_algo_id = result.algorithm().algo_id();
int64_t new_algo_id = old_algo_id + 1;
result.mutable_gemm()->set_algorithm(new_algo_id);
AutotunerUtil::ClearAutotuneResults();
TF_ASSERT_OK(AutotunerUtil::LoadAutotuneResults(results));
auto module_cfg = GetModuleConfigForTest();
TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(kHlo, module_cfg));
changed = false;
DevicelessConfig deviceless_config{
stream_exec->GetDeviceDescription().model_str(),
stream_exec->GetDeviceDescription().cuda_compute_capability()};
AutotuneConfig deviceless_cfg{deviceless_config, opts};
TF_ASSERT_OK_AND_ASSIGN(
changed,
RunHloPass(
GemmRewriter(
stream_exec->GetDeviceDescription().gpu_compute_capability(),
12040),
m.get()));
changed = false;
TF_ASSERT_OK_AND_ASSIGN(
changed, RunHloPass(GemmAlgorithmPicker(deviceless_cfg), m.get()))
ASSERT_TRUE(changed);
SCOPED_TRACE(m->ToString());
HloInstruction* dot;
ASSERT_THAT(m->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::CustomCall(&dot), 0)));
TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config,
dot->backend_config<GpuBackendConfig>());
const GemmBackendConfig& config = gpu_config.gemm_backend_config();
EXPECT_EQ(config.selected_algorithm(), new_algo_id);
}
INSTANTIATE_TEST_SUITE_P(GemmAlgorithmPickerTestSuite, GemmAlgorithmPickerTest,
::testing::Bool());
}
} | 2,112 |
#ifndef XLA_SERVICE_GPU_VARIADIC_OP_SPLITTER_H_
#define XLA_SERVICE_GPU_VARIADIC_OP_SPLITTER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class VariadicOpSplitter : public HloModulePass {
public:
absl::string_view name() const override { return "variadic-op-splitter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
}
}
#endif
#include "xla/service/gpu/variadic_op_splitter.h"
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
constexpr int32_t kMaxParameters = 128;
absl::StatusOr<bool> SplitConcatenate(HloInstruction* concat,
HloComputation* comp) {
auto operands = concat->operands();
std::vector<HloInstruction*> operands_to_split(operands.begin(),
operands.end());
while (operands_to_split.size() > 1) {
std::vector<HloInstruction*> new_operands;
absl::Span<HloInstruction*> operands_span(operands_to_split);
for (int64_t offset = 0; offset < operands_to_split.size();
offset += kMaxParameters) {
if (offset > 0 && offset + kMaxParameters > operands_to_split.size()) {
new_operands.insert(new_operands.end(),
operands_to_split.begin() + offset,
operands_to_split.end());
} else {
Shape new_shape = concat->shape();
int64_t concat_dimension_size = 0;
for (int64_t i = 0;
i < kMaxParameters && offset + i < operands_to_split.size(); ++i) {
concat_dimension_size +=
operands_to_split[i + offset]->shape().dimensions(
concat->concatenate_dimension());
}
new_shape.set_dimensions(concat->concatenate_dimension(),
concat_dimension_size);
auto new_concat = comp->AddInstruction(concat->CloneWithNewOperands(
new_shape, operands_span.subspan(offset, kMaxParameters)));
new_operands.push_back(new_concat);
}
}
operands_to_split = new_operands;
}
TF_RETURN_IF_ERROR(comp->ReplaceInstruction(concat, operands_to_split[0]));
return true;
}
std::vector<HloInstruction*> GetRelevantVariadicOps(HloComputation* comp) {
std::vector<HloInstruction*> ops;
for (HloInstruction* instr : comp->instructions()) {
if (instr->opcode() == HloOpcode::kConcatenate &&
instr->operand_count() > kMaxParameters) {
ops.push_back(instr);
}
}
return ops;
}
}
absl::StatusOr<bool> VariadicOpSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* comp :
module->MakeNonfusionComputations(execution_threads)) {
for (HloInstruction* op : GetRelevantVariadicOps(comp)) {
TF_ASSIGN_OR_RETURN(bool result, SplitConcatenate(op, comp));
changed |= result;
}
}
return changed;
}
}
} | #include "xla/service/gpu/variadic_op_splitter.h"
#include <cstdint>
#include <vector>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/literal_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using match::Concatenate;
class VariadicOpSplitterTest : public HloTestBase {};
TEST_F(VariadicOpSplitterTest, DontSplit) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule TestModule
ENTRY TestComputation {
p0 = f16[30,41] parameter(0)
p1 = f16[30,41] parameter(1)
ROOT result = f16[60, 41] concatenate(p0, p1), dimensions={0}
})")
.value();
EXPECT_FALSE(VariadicOpSplitter().Run(module.get()).value());
}
TEST_F(VariadicOpSplitterTest, SplitInto2) {
auto builder = HloComputation::Builder(TestName());
auto operand = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32_t>({42})));
std::vector<HloInstruction*> concat_operands(255, operand);
builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(S32, {255}), concat_operands, 0));
auto module = CreateNewVerifiedModule();
auto entry_computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(VariadicOpSplitter().Run(module.get()).value());
EXPECT_TRUE(Match(entry_computation->root_instruction(),
Concatenate().WithNumOperands(128).WithOperand(
0, Concatenate().WithNumOperands(128))));
}
TEST_F(VariadicOpSplitterTest, SplitInto3) {
auto builder = HloComputation::Builder(TestName());
auto operand = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32_t>({42})));
std::vector<HloInstruction*> concat_operands(256, operand);
builder.AddInstruction(HloInstruction::CreateConcatenate(
ShapeUtil::MakeShape(S32, {256}), concat_operands, 0));
auto module = CreateNewVerifiedModule();
auto entry_computation = module->AddEntryComputation(builder.Build());
EXPECT_TRUE(VariadicOpSplitter().Run(module.get()).value());
EXPECT_TRUE(Match(entry_computation->root_instruction(),
Concatenate(Concatenate().WithNumOperands(128),
Concatenate().WithNumOperands(128))));
}
}
}
} | 2,113 |
#ifndef XLA_SERVICE_GPU_EXECUTION_STREAM_ASSIGNMENT_H_
#define XLA_SERVICE_GPU_EXECUTION_STREAM_ASSIGNMENT_H_
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/runtime/thunk.h"
namespace xla::gpu {
class ExecutionStreamAssignment {
public:
explicit ExecutionStreamAssignment(const HloModule* module);
absl::StatusOr<ExecutionStreamId> GetSyncExecutionStreamId(
const HloInstruction* instruction) const;
struct AsyncExecutionStreamIds {
ExecutionStreamId source_stream_id;
ExecutionStreamId destination_stream_id;
};
absl::StatusOr<AsyncExecutionStreamIds> GetAsyncExecutionStreamIds(
const HloAsyncInstruction* instruction) const;
private:
absl::flat_hash_map<HloInstruction*, ExecutionStreamId> sync_instructions_;
absl::flat_hash_map<HloInstruction*, AsyncExecutionStreamIds>
async_instructions_;
};
inline bool operator==(
const ExecutionStreamAssignment::AsyncExecutionStreamIds& first,
const ExecutionStreamAssignment::AsyncExecutionStreamIds& second) {
return first.source_stream_id == second.source_stream_id &&
first.destination_stream_id == second.destination_stream_id;
}
}
#endif
#include "xla/service/gpu/execution_stream_assignment.h"
#include <deque>
#include <memory>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/call_graph.h"
#include "xla/service/gpu/runtime/thunk.h"
namespace xla::gpu {
ExecutionStreamAssignment::ExecutionStreamAssignment(const HloModule* module) {
std::unique_ptr<CallGraph> call_graph = CallGraph::Build(module);
ExecutionStreamId next_stream_id = ExecutionStreamId(1);
struct Pending {
Pending(HloComputation* node, ExecutionStreamId stream_id)
: node(node), stream_id(stream_id) {}
HloComputation* node;
ExecutionStreamId stream_id;
};
std::deque<Pending> queue;
queue.emplace_back(module->entry_computation(), ExecutionStreamId(0));
auto enqueue_called_computations = [&](const CallSite& callsite,
ExecutionStreamId stream) {
if (GetInstructionCallContext(callsite.instruction()->opcode()) ==
CallContext::kEmbedded) {
return;
}
for (HloComputation* computation : callsite.called_computations()) {
queue.emplace_back(computation, stream);
}
};
while (!queue.empty()) {
Pending pending = queue.front();
queue.pop_front();
for (HloInstruction* instruction : pending.node->instructions()) {
if (instruction->IsAsynchronous()) continue;
CHECK(sync_instructions_.try_emplace(instruction, pending.stream_id)
.second);
}
for (const CallSite& callsite :
call_graph->GetNode(pending.node).callsites()) {
if (callsite.instruction()->IsAsynchronous()) {
CHECK_EQ(callsite.instruction()->opcode(), HloOpcode::kAsyncStart);
const ExecutionStreamId async_stream_id = next_stream_id++;
enqueue_called_computations(callsite, async_stream_id);
AsyncExecutionStreamIds streams;
streams.source_stream_id = pending.stream_id;
streams.destination_stream_id = async_stream_id;
CHECK(async_instructions_.try_emplace(callsite.instruction(), streams)
.second);
} else {
enqueue_called_computations(callsite, pending.stream_id);
}
}
for (HloInstruction* instruction : pending.node->instructions()) {
if (!instruction->IsAsynchronous()) continue;
if (instruction->opcode() == HloOpcode::kAsyncStart) {
CHECK(async_instructions_.find(instruction) !=
async_instructions_.end());
} else {
HloInstruction* async_start =
Cast<HloAsyncInstruction>(instruction)->async_chain_start();
AsyncExecutionStreamIds async_start_streams =
async_instructions_.at(async_start);
CHECK(async_instructions_.try_emplace(instruction, async_start_streams)
.second);
}
}
}
}
namespace {
absl::Status StreamNotFoundError(const HloInstruction* instruction) {
return absl::NotFoundError(absl::StrCat(
"No ExecutionStreamId found for ", instruction->ToString(),
"; this may happen if the Computation is not reachable from the module's "
"entrypoint, or if it's only reachable through a embedded calls."));
}
}
absl::StatusOr<ExecutionStreamId>
ExecutionStreamAssignment::GetSyncExecutionStreamId(
const HloInstruction* instruction) const {
CHECK(!instruction->IsAsynchronous());
auto stream = sync_instructions_.find(instruction);
if (stream == sync_instructions_.end()) {
return StreamNotFoundError(instruction);
}
return stream->second;
}
absl::StatusOr<ExecutionStreamAssignment::AsyncExecutionStreamIds>
ExecutionStreamAssignment::GetAsyncExecutionStreamIds(
const HloAsyncInstruction* instruction) const {
auto streams = async_instructions_.find(instruction);
if (streams == async_instructions_.end()) {
return StreamNotFoundError(instruction);
}
return streams->second;
}
} | #include "xla/service/gpu/execution_stream_assignment.h"
#include <memory>
#include <string_view>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
using ::tsl::testing::IsOkAndHolds;
using ::tsl::testing::StatusIs;
using AsyncExecutionStreamIds =
::xla::gpu::ExecutionStreamAssignment::AsyncExecutionStreamIds;
namespace xla::gpu {
namespace {
class ExecutionStreamAssignmentTest : public HloTestBase {
protected:
void ExpectExecutionStreamForSyncInstructions(
const ExecutionStreamAssignment& assignment, HloComputation* computation,
ExecutionStreamId stream) const {
for (const HloInstruction* instruction : computation->instructions()) {
if (instruction->IsAsynchronous()) continue;
EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),
IsOkAndHolds(stream));
}
}
};
TEST_F(ExecutionStreamAssignmentTest, AsyncFusion) {
const char* kModuleStr = R"(
HloModule m
leaf1 {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
leaf2 {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
ENTRY entry {
p0 = f32[2,2] parameter(0)
start1 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),
kind=kLoop, calls=leaf1
start2 = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0),
kind=kLoop, calls=leaf2
update1 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start1)
update2 = ((f32[2,2]), f32[2,2], s32[]) fusion-update(start2)
done1 = f32[2,2] fusion-done(update1)
done2 = f32[2,2] fusion-done(update2)
ROOT done = f32[2,2] add(done1, done2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ExecutionStreamAssignment assignment(module.get());
ExpectExecutionStreamForSyncInstructions(
assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0));
for (std::string_view instruction : {"start1", "update1", "done1"}) {
EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast<HloAsyncInstruction>(
FindInstruction(module.get(), instruction))),
IsOkAndHolds(AsyncExecutionStreamIds(
ExecutionStreamId(0),
ExecutionStreamId(1))));
}
for (std::string_view instruction : {"start2", "update2", "done2"}) {
EXPECT_THAT(assignment.GetAsyncExecutionStreamIds(Cast<HloAsyncInstruction>(
FindInstruction(module.get(), instruction))),
IsOkAndHolds(AsyncExecutionStreamIds(
ExecutionStreamId(0),
ExecutionStreamId(2))));
}
ExpectExecutionStreamForSyncInstructions(
assignment,
Cast<HloAsyncInstruction>(FindInstruction(module.get(), "start1"))
->async_wrapped_computation(),
ExecutionStreamId(1));
ExpectExecutionStreamForSyncInstructions(
assignment,
Cast<HloAsyncInstruction>(FindInstruction(module.get(), "start2"))
->async_wrapped_computation(),
ExecutionStreamId(2));
}
TEST_F(ExecutionStreamAssignmentTest, FusionComputations) {
const char* kModuleStr = R"(
HloModule m
reduce {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
p0 = f32[4] parameter(0)
c0 = f32[] constant(0)
ROOT reduce = f32[] reduce(p0, c0), dimensions={0}, to_apply=reduce
}
ENTRY entry {
p0 = f32[4] parameter(0)
ROOT done = f32[] fusion(p0), kind=kLoop, calls=fusion
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ExecutionStreamAssignment assignment(module.get());
ExpectExecutionStreamForSyncInstructions(
assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0));
for (std::string_view computation : {"reduce", "fusion"}) {
for (const HloInstruction* instruction :
FindComputation(module.get(), computation)->instructions()) {
EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),
StatusIs(absl::StatusCode::kNotFound));
}
}
}
TEST_F(ExecutionStreamAssignmentTest, UnreachableComputation) {
const char* kModuleStr = R"(
HloModule m
unreachable {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
ENTRY entry {
p0 = f32[2,2] parameter(0)
ROOT add = f32[2,2] add(p0, p0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(kModuleStr));
ExecutionStreamAssignment assignment(module.get());
ExpectExecutionStreamForSyncInstructions(
assignment, FindComputation(module.get(), "entry"), ExecutionStreamId(0));
for (const HloInstruction* instruction :
FindComputation(module.get(), "unreachable")->instructions()) {
EXPECT_THAT(assignment.GetSyncExecutionStreamId(instruction),
StatusIs(absl::StatusCode::kNotFound));
}
}
}
} | 2,114 |
#ifndef XLA_SERVICE_GPU_GPU_FUSIBLE_H_
#define XLA_SERVICE_GPU_GPU_FUSIBLE_H_
#include <cstddef>
#include <cstdint>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/instruction_fusion.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
bool IfFusedReadsElementsMultipleTimes(const HloInstruction& instr);
bool IsExpensiveToRepeat(const HloInstruction& instr);
struct FusionInfoCache {
public:
void Invalidate(const HloInstruction* instr) {
shared_memory_usage.erase(instr);
num_unnested_reductions.erase(instr);
}
absl::flat_hash_map<const HloInstruction*, int64_t> shared_memory_usage;
absl::flat_hash_map<const HloInstruction*, int64_t> num_unnested_reductions;
};
std::vector<HloComputation*> GetFusibleComputations(
const HloModule& module,
const absl::flat_hash_set<absl::string_view>& execution_threads);
int64_t SharedMemoryUsage(const HloInstruction& instr,
FusionInfoCache* cache = nullptr);
inline constexpr int64_t MaxOperandsAndOutputsPerFusion() { return 96; }
bool IsPhysicallyTransposing(const HloInstruction& instr);
bool TransposesMinorDimension(const HloInstruction* instr);
bool IsReduceInputFusion(const HloInstruction& instr);
bool IsInputFusibleReduction(const HloInstruction& instr);
bool IsNestableVariadicReduction(const HloInstruction& instr);
bool IsInputFusibleScatter(const HloInstruction& instr);
FusionDecision FusionFitsInBudget(const HloInstruction& instr1,
const HloInstruction& instr2,
const se::DeviceDescription& device_info,
bool is_consumer_producer_fusion = false,
FusionInfoCache* cache = nullptr);
bool CreatesHeavyComputation(const HloInstruction& producer,
const HloInstruction& consumer);
const HloInstruction* GetRealHeroForMultiOutputFusion(
const HloInstruction& instr);
FusionDecision FusionHeroesAreCompatible(const HloInstruction* hero1,
const HloInstruction* hero2);
FusionDecision ShapesCompatibleForMultiOutputFusion(
const HloInstruction& instr1, const HloInstruction& instr2);
FusionDecision CanEmitInputFusedScatter(const HloInstruction& producer,
const HloInstruction& consumer);
FusionDecision IsProducerConsumerFusible(const HloInstruction& producer,
const HloInstruction& consumer);
FusionDecision IsProducerMultiOutputFusible(const HloInstruction& producer);
bool IsFusibleAsMultiOutputFusionRoot(const HloInstruction& instr);
HloInstruction::FusionKind ChooseFusionKind(const HloInstruction& producer,
const HloInstruction& consumer);
bool IsConsumerTheOnlyNonRootUser(const HloInstruction& instr,
const HloInstruction& consumer);
size_t GetInstrCountOfFusible(const HloInstruction& instr);
absl::InlinedVector<const HloInstruction*, 2> GetOutputsOfFusible(
const HloInstruction& instr);
size_t GetOutputSizeOfFusible(const HloInstruction& instr);
std::vector<const HloInstruction*> GetFusionRoots(
const HloComputation& computation);
bool IsGenericTritonFusion(const HloInstruction& instr);
bool MayPreventVectorization(const HloFusionAdaptor& fusion);
}
}
#endif
#include "xla/service/gpu/gpu_fusible.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <optional>
#include <stack>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/permutation_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
bool HasAnyTiledTransposeRoot(const HloComputation& computation) {
return absl::c_any_of(GetFusionRoots(computation),
[&](const HloInstruction* instr) {
return GetDescriptionForTiledTransposeEmitter(
*instr, FindNonTrivialHero(*instr))
.has_value();
});
}
}
bool IfFusedReadsElementsMultipleTimes(const HloInstruction& instr) {
CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused.";
if (instr.opcode() == HloOpcode::kGather ||
instr.opcode() == HloOpcode::kBroadcast) {
return ShapeUtil::ElementsIn(instr.shape()) >
ShapeUtil::ElementsIn(instr.operand(0)->shape());
}
if (instr.opcode() == HloOpcode::kReduceWindow) {
for (const auto& dim : instr.window().dimensions()) {
if (dim.size() > dim.stride()) {
return true;
}
}
}
return false;
}
bool IsExpensiveToRepeat(const HloInstruction& instr) {
CHECK_NE(instr.opcode(), HloOpcode::kFusion) << "`instr` has to be unfused.";
constexpr int kMaxInputsPerOutput = 10;
if (instr.opcode() == HloOpcode::kReduce &&
!IsReductionFromOrToContiguousDimensions(instr)) {
int64_t reduction_ratio = ShapeUtil::ElementsIn(instr.operand(0)->shape()) /
ShapeUtil::ElementsIn(instr.shape());
if (reduction_ratio > kMaxInputsPerOutput) return true;
}
if (instr.opcode() == HloOpcode::kReduceWindow) {
int64_t reduction_ratio = 1;
for (const auto& dim : instr.window().dimensions())
reduction_ratio *= dim.size();
if (reduction_ratio > kMaxInputsPerOutput) return true;
}
return false;
}
bool IsPhysicallyTransposing(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kFusion) {
for (const HloInstruction* fused_instr : instr.fused_instructions()) {
if (IsPhysicallyTransposing(*fused_instr)) {
return true;
}
}
}
return instr.opcode() == HloOpcode::kCopy ||
(instr.opcode() == HloOpcode::kTranspose &&
!ShapeUtil::TransposeIsBitcast(instr.operand(0)->shape(),
instr.shape(), instr.dimensions()));
}
namespace {
std::pair<int64_t, int64_t> MostMinorNonTrivialDimension(const Shape& shape) {
int64_t position_of_first_non_trivial_dim = 0;
for (int64_t dim : shape.layout().minor_to_major()) {
if (shape.dimensions()[dim] > 1) {
return {dim, position_of_first_non_trivial_dim};
}
++position_of_first_non_trivial_dim;
}
return {-1, position_of_first_non_trivial_dim};
}
}
bool TransposesMinorDimension(const HloInstruction* instr) {
switch (instr->opcode()) {
case HloOpcode::kFusion:
return absl::c_any_of(instr->fused_instructions(),
TransposesMinorDimension);
case HloOpcode::kCopy: {
int64_t first_non_trivial_operand_dim =
MostMinorNonTrivialDimension(instr->operand(0)->shape()).first;
int64_t first_non_trivial_output_dim =
MostMinorNonTrivialDimension(instr->shape()).first;
return first_non_trivial_operand_dim != first_non_trivial_output_dim;
}
case HloOpcode::kTranspose: {
auto position_in_minor_to_major = InversePermutation(
instr->operand(0)->shape().layout().minor_to_major());
int64_t position_of_first_non_trivial_dim =
MostMinorNonTrivialDimension(instr->operand(0)->shape()).second;
for (int64_t output_dim : instr->shape().layout().minor_to_major()) {
if (instr->shape().dimensions()[output_dim] == 1) {
continue;
}
int64_t operand_dim = instr->dimensions().at(output_dim);
return position_in_minor_to_major[operand_dim] >
position_of_first_non_trivial_dim;
}
return false;
}
default:
return false;
}
}
bool IsReduceInputFusion(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kFusion &&
absl::c_any_of(GetFusionRoots(*instr.called_computations()[0]),
[](const HloInstruction* root) {
return IsRealReductionHero(*root,
FindNonTrivialHero(*root));
});
}
bool IsInputFusibleReduction(const HloInstruction& instr) {
return IsReduceInputFusion(instr) ||
IsReductionFromOrToContiguousDimensions(instr);
}
bool IsNestableVariadicReduction(const HloInstruction& instr) {
return instr.shape().IsTuple() &&
((instr.opcode() == HloOpcode::kReduce &&
!IsReductionFromOrToContiguousDimensions(instr)) ||
(instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kLoop &&
instr.fused_expression_root()->opcode() == HloOpcode::kReduce));
}
bool IsInputFusibleTranspose(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kBitcast || instr.IsCustomFusion()) {
return false;
}
if (instr.opcode() == HloOpcode::kFusion) {
return HasAnyTiledTransposeRoot(*instr.fused_instructions_computation());
}
return GetDescriptionForTiledTransposeEmitter(instr, instr).has_value();
}
const HloInstruction* GetRealHeroForMultiOutputFusion(
const HloInstruction& instr) {
if (instr.opcode() != HloOpcode::kFusion) {
return &instr;
}
auto fused_expression_root = instr.fused_expression_root();
if (!instr.IsMultiOutputFusion()) {
const auto& hero = FindNonTrivialHero(*fused_expression_root);
if (IsRealReductionHero(*fused_expression_root, hero) ||
GetDescriptionForTiledTransposeEmitter(*fused_expression_root, hero)
.has_value()) {
return &hero;
}
return fused_expression_root;
}
for (auto* inst : fused_expression_root->mutable_operands()) {
const auto& hero = FindNonTrivialHero(*inst);
if (IsRealReductionHero(*inst, hero) ||
GetDescriptionForTiledTransposeEmitter(*inst, hero).has_value()) {
return &hero;
}
}
return fused_expression_root->operands()[0];
}
FusionDecision FusionHeroesAreCompatible(const HloInstruction* hero1,
const HloInstruction* hero2) {
auto hero1_is_unnested_reduce =
IsReductionFromOrToContiguousDimensions(*hero1);
auto tiled_transpose_hero1 =
GetDescriptionForTiledTransposeEmitter(*hero1, *hero1);
bool hero1_is_unnested_transpose = tiled_transpose_hero1.has_value();
bool hero2_is_unnested_reduce =
IsReductionFromOrToContiguousDimensions(*hero2);
auto tiled_transpose_hero2 =
GetDescriptionForTiledTransposeEmitter(*hero2, *hero2);
bool hero2_is_unnested_transpose = tiled_transpose_hero2.has_value();
if (hero1_is_unnested_reduce && hero2_is_unnested_reduce &&
!AreReductionsMultiOutputFusionCompatible(hero2, hero1)) {
return "tiled reductions with different shapes";
} else if (hero1_is_unnested_transpose && hero2_is_unnested_transpose &&
!tiled_transpose_hero1->IsEquivalent(*tiled_transpose_hero2)) {
return "tiled transposes with different shapes";
} else if ((hero1_is_unnested_transpose && hero2_is_unnested_reduce) ||
(hero1_is_unnested_reduce && hero2_is_unnested_transpose)) {
return "MOF-fusion of a transpose and a reduction";
}
if (hero1_is_unnested_transpose || hero2_is_unnested_transpose) {
auto check_path_of_intermediate_ops = [](HloInstruction* param) {
if (param->user_count() != 1) {
return false;
}
HloInstruction* hlo = param->users()[0];
while (hlo->user_count() > 0) {
if (!IsIntermediate(hlo)) {
return false;
}
hlo = hlo->users()[0];
}
return true;
};
HloInstruction* fusion1 = hero1->parent()->FusionInstruction();
HloInstruction* fusion2 = hero2->parent()->FusionInstruction();
if (fusion1 != nullptr && fusion2 != nullptr) {
if (hero1_is_unnested_transpose && fusion2->IsUserOf(fusion1)) {
int64_t operand_idx = fusion2->operand_index(fusion1);
auto hlo = fusion2->fused_parameter(operand_idx);
if (!check_path_of_intermediate_ops(hlo)) {
return "tiled transpose would become untiled";
}
} else if (hero2_is_unnested_transpose && fusion1->IsUserOf(fusion2)) {
int64_t operand_idx = fusion1->operand_index(fusion2);
auto hlo = fusion1->fused_parameter(operand_idx);
if (!check_path_of_intermediate_ops(hlo)) {
return "tiled transpose would become untiled";
}
}
}
}
return {};
}
FusionDecision ShapesCompatibleForMultiOutputFusion(
const HloInstruction& instr1, const HloInstruction& instr2) {
auto get_loop_shape = [&](const HloInstruction* element_instr) {
const auto& hero = element_instr->parent()->IsFusionComputation()
? FindNonTrivialHero(*element_instr)
: *element_instr;
if (IsReductionFromOrToContiguousDimensions(*element_instr) ||
GetDescriptionForTiledTransposeEmitter(*element_instr, hero)
.has_value()) {
return hero.operand(0)->shape();
}
return element_instr->shape();
};
const HloInstruction* hero1 = GetRealHeroForMultiOutputFusion(instr1);
const HloInstruction* hero2 = GetRealHeroForMultiOutputFusion(instr2);
if (auto compatible = FusionHeroesAreCompatible(hero1, hero2); !compatible) {
return compatible;
}
const Shape& l1 = get_loop_shape(hero1);
const Shape& l2 = get_loop_shape(hero2);
bool accept_unequal_shape = !l1.IsTuple() && !l2.IsTuple();
if (!ShapeUtil::EqualIgnoringElementType(l1, l2) &&
(!accept_unequal_shape ||
!ShapeUtil::IsReshapeOrTransposeBitcast(l1, l2,
true))) {
return "different loop shapes";
}
return {};
}
bool IsInputFusibleScatter(const HloInstruction& instr) {
if (instr.opcode() == HloOpcode::kScatter ||
(instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kInput &&
instr.fused_expression_root()->opcode() == HloOpcode::kScatter)) {
return true;
}
return false;
}
bool IsInputFusible(const HloInstruction& instr) {
return instr.IsFusible() &&
(IsInputFusibleReduction(instr) || IsInputFusibleScatter(instr) ||
IsInputFusibleTranspose(instr));
}
bool IsUniversallyLoopFusible(const HloInstruction& instr) {
if (instr.IsElementwise() && instr.operand_count() > 0 &&
instr.opcode() != HloOpcode::kCopy) {
return true;
}
switch (instr.opcode()) {
case HloOpcode::kCopy:
return !GetDescriptionForTiledTransposeEmitter(instr, instr).has_value();
case HloOpcode::kFusion:
return instr.fusion_kind() == HloInstruction::FusionKind::kLoop;
case HloOpcode::kBitcast:
case HloOpcode::kBroadcast:
case HloOpcode::kConcatenate:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kGather:
case HloOpcode::kPad:
case HloOpcode::kReduceWindow:
case HloOpcode::kReshape:
case HloOpcode::kReverse:
case HloOpcode::kSlice:
case HloOpcode::kTranspose:
return true;
default:
return false;
}
}
bool IsLoopFusibleAsConsumer(const HloInstruction& instr) {
if (!instr.IsFusible()) return false;
if (instr.opcode() == HloOpcode::kBitcast) return false;
if (instr.opcode() == HloOpcode::kReduce) return true;
if (!IsInputFusible(instr) && instr.opcode() == HloOpcode::kFusion &&
instr.fusion_kind() == HloInstruction::FusionKind::kInput) {
return true;
}
return IsUniversallyLoopFusible(instr);
}
bool IsLoopFusibleAsProducer(const HloInstruction& instr) {
if (!instr.IsFusible()) return false;
switch (instr.opcode()) {
case HloOpcode::kIota:
case HloOpcode::kConstant:
return true;
case HloOpcode::kReduce:
return !instr.shape().IsTuple();
default:
return IsUniversallyLoopFusible(instr);
}
}
static bool AllSatisfy(const HloInstruction& instr,
const HloPredicate& predicate) {
if (instr.opcode() != HloOpcode::kFusion) {
return predicate(&instr);
}
return absl::c_all_of(
instr.fused_instructions(), [&](const HloInstruction* i) {
return i->opcode() == HloOpcode::kParameter || predicate(i);
});
}
FusionDecision CanEmitInputFusedScatter(const HloInstruction& producer,
const HloInstruction& consumer) {
if (IsInputFusibleScatter(producer)) {
return "do not fuse into the output of scatter";
}
if (!IsInputFusibleScatter(consumer)) {
return {};
}
const HloInstruction* inplace_operand;
if (consumer.opcode() == HloOpcode::kFusion) {
const HloInstruction* scatter = consumer.fused_expression_root();
CHECK_EQ(scatter->opcode(), HloOpcode::kScatter);
CHECK_EQ(scatter->operand(0)->opcode(), HloOpcode::kParameter);
inplace_operand = consumer.operand(scatter->operand(0)->parameter_number());
} else {
inplace_operand = consumer.operand(0);
}
if (inplace_operand == &producer) {
return "do not fuse into the in-place operand of scatter";
}
if (absl::c_linear_search(producer.operands(), inplace_operand)) {
return "Producer uses the in-place operand of a scatter";
}
return {};
}
FusionDecision IsProducerConsumerFusible(const HloInstruction& producer,
const HloInstruction& consumer) {
if (!IsLoopFusibleAsProducer(producer) &&
!IsInputFusibleTranspose(producer)) {
return "the producer is not loop-fusible";
}
if (IsInputFusibleReduction(producer)) {
if (!producer.GetModule()
->config()
.debug_options()
.xla_gpu_enable_reduction_epilogue_fusion()) {
return "Reduction epilogue fusion is not enabled.";
}
const HloInstruction& reduce_hero =
producer.opcode() == HloOpcode::kFusion
? FindNonTrivialHero(*producer.fused_expression_root())
: producer;
if (!ReductionIsRaceFree(
reduce_hero.GetModule()->config(),
GetReductionKindAndContiguousComponents(reduce_hero))) {
return "Reduction output fusion only works for race free reductions";
}
if (!AllSatisfy(consumer, [](const HloInstruction* hlo) {
return IsIntermediate(hlo, 1);
})) {
return "Reductions from/to continuous dims epilogue not fusible";
}
if (producer.user_count() > 1) {
return "reduction output fusion only works for single user";
}
}
if (auto can_fuse = CanEmitInputFusedScatter(producer, consumer); !can_fuse) {
return can_fuse;
}
if (!IsInputFusible(consumer) && !IsLoopFusibleAsConsumer(consumer)) {
return "the consumer is not input-fusible and not loop-fusible";
}
if (producer.IsMultiOutputFusion()) {
return "the producer is not fusible as it is a multi-output fusion";
}
if (producer.opcode() == HloOpcode::kConstant &&
(!ShapeUtil::IsEffectiveScalar(producer.shape()) ||
consumer.opcode() != HloOpcode::kFusion)) {
return "not fusing constant";
}
return InstructionFusion::ShouldFuseInPlaceOp(&producer, &consumer);
}
FusionDecision IsProducerMultiOutputFusible(const HloInstruction& producer) {
if (producer.IsMultiOutputFusion()) {
return "Producer is a multi-output fusion";
} | #include "xla/service/gpu/gpu_fusible.h"
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
using ::testing::ElementsAre;
using GpuFusibleTest = HloTestBase;
const char kModulePrefix[] = R"(
HloModule test_module
scalar_add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
})";
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_ElementwiseProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
p0 = f32[2,2,2]{2,1,0} parameter(0)
c0 = f32[] constant(0)
exp = f32[2,2,2]{2,1,0} exponential(p0)
ROOT reduce = f32[2,2]{1,0} reduce(exp, c0), dimensions={2}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* exp =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(exp->opcode(), HloOpcode::kExp);
EXPECT_FALSE(IsPhysicallyTransposing(*exp));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_MixedLayoutProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
copy = f16[128,1024,32,32]{1,3,2,0} copy(p1.1)
c0 = f16[] constant(0)
broadcast = f16[128,1024,32,32]{1,3,2,0} broadcast(c0), dimensions={}
greater-than = pred[128,1024,32,32]{1,3,2,0} compare(copy, broadcast), direction=GT
ROOT root = f16[128,1024,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1024]{0}, f16[128,1024,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(1);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect);
EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest,
IsPhysicallyTransposing_MixedLayoutProducerWithTrivialDim) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
mixed_input_layouts_computation {
p0.1 = f16[128,1,32,32]{1,3,2,0} parameter(0)
p1.1 = f16[128,1,32,32]{3,2,1,0} parameter(1)
bitcast = f16[128,1,32,32]{1,3,2,0} bitcast(p1.1)
c0 = f16[] constant(0)
broadcast = f16[128,1,32,32]{1,3,2,0} broadcast(c0), dimensions={}
greater-than = pred[128,1,32,32]{1,3,2,0} compare(bitcast, broadcast), direction=GT
ROOT root = f16[128,1,32,32]{1,3,2,0} select(greater-than, p0.1, broadcast)
}
fused_reduce {
p0.2 = f16[128,1,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1,32,32]{1,3,2,0} parameter(0)
p1 = f16[128,1,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=mixed_input_layouts_computation
reduce_fusion = f32[1]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
ROOT root = (f32[1]{0}, f16[128,1,32,32]{1,3,2,0}) tuple(reduce_fusion, loop_fusion)
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(1);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kSelect);
EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_CopyProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce {
p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
c0.1 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
copy = f32[128,1024,32,32]{1,3,2,0} copy(p0)
ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* copy =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(copy->opcode(), HloOpcode::kCopy);
EXPECT_TRUE(IsPhysicallyTransposing(*copy));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_PhysicalTranspose) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduce {
p0.1 = f32[1024,128,32,32]{3,2,1,0} parameter(0)
c0.1 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(p0.1, c0.1), dimensions={1,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
copy = f32[1024,128,32,32]{3,2,1,0} transpose(p0), dimensions={1,0,2,3}
ROOT reduce_fusion = f32[1024]{0} fusion(copy), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* transpose =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(transpose->opcode(), HloOpcode::kTranspose);
EXPECT_TRUE(IsPhysicallyTransposing(*transpose));
}
TEST_F(GpuFusibleTest, IsPhysicallyTransposing_LayoutChangingFusionProducer) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
layout_changing_computation {
p0.1 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
p1.1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
c0 = f16[] constant(0)
broadcast = f16[128,1024,32,32]{3,2,1,0} broadcast(c0), dimensions={}
greater-than = pred[128,1024,32,32]{3,2,1,0} compare(p1.1, broadcast), direction=GT
select = f16[128,1024,32,32]{3,2,1,0} select(greater-than, p0.1, broadcast)
ROOT root = f16[128,1024,32,32]{1,3,2,0} copy(select)
}
fused_reduce {
p0.2 = f16[128,1024,32,32]{1,3,2,0} parameter(0)
convert = f32[128,1024,32,32]{1,3,2,0} convert(p0.2)
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(convert, c0.2), dimensions={0,2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f16[128,1024,32,32]{3,2,1,0} parameter(0)
p1 = f16[128,1024,32,32]{3,2,1,0} parameter(1)
loop_fusion = f16[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=layout_changing_computation
ROOT reduce_fusion = f32[1024]{0} fusion(loop_fusion), kind=kInput, calls=fused_reduce
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kCopy);
EXPECT_TRUE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest,
IsPhysicallyTransposing_ConsiderMaximumTrueRanksParamsOnly) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
broadcasting_computation {
p0.1 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
p1.1 = f32[1,128,1,1]{3,2,1,0} parameter(1)
reshape = f32[128]{0} reshape(p1.1)
broadcast = f32[128,1024,32,32]{1,3,2,0} broadcast(reshape), dimensions={0}
ROOT add = f32[128,1024,32,32]{1,3,2,0} add(p0.1, broadcast)
}
ENTRY entry {
p0 = f32[128,1024,32,32]{1,3,2,0} parameter(0)
p1 = f32[1,128,1,1]{3,2,1,0} parameter(1)
loop_fusion = f32[128,1024,32,32]{1,3,2,0} fusion(p0, p1), kind=kLoop, calls=broadcasting_computation
c0.2 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(loop_fusion, c0.2), dimensions={0,2,3}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* loop_fusion =
module->entry_computation()->root_instruction()->operand(0);
ASSERT_EQ(loop_fusion->fused_expression_root()->opcode(), HloOpcode::kAdd);
EXPECT_FALSE(IsPhysicallyTransposing(*loop_fusion));
}
TEST_F(GpuFusibleTest, TransposesMinorDimension) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1)
transpose_minor_default = f32[10,20,40,30]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2}
no_transpose_minor_default = f32[10,20,40,30]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_major_default = f32[10,30,20,40]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3}
transpose_minor_non_default = f32[10,30,20,40]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3}
no_transpose_minor_non_default = f32[10,20,40,30]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2}
transpose_major_non_default = f32[10,20,40,30]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2}
ROOT r = tuple(transpose_minor_default, no_transpose_minor_default, transpose_major_default,
transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(4)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5)));
}
TEST_F(GpuFusibleTest, TransposesMinorDimensionSkipTrivialDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1)
transpose_minor_default = f32[10,20,1,1]{3,2,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_nontrivial_minor_default = f32[10,1,20,1]{3,2,1,0} transpose(default_layout), dimensions={0,2,1,3}
no_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} transpose(default_layout), dimensions={0,1,3,2}
transpose_one_major_default = f32[1,20,10,1]{3,2,1,0} transpose(default_layout), dimensions={2,1,0,3}
transpose_two_major_default = f32[20,10,1,1]{3,2,1,0} transpose(default_layout), dimensions={1,0,2,3}
transpose_minor_non_default = f32[10,1,20,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,2,1,3}
no_transpose_minor_non_default = f32[10,20,1,1]{1,2,0,3} transpose(non_default_layout), dimensions={0,1,3,2}
transpose_major_non_default = f32[10,20,1,1]{1,2,3,0} transpose(non_default_layout), dimensions={0,1,3,2}
ROOT r = tuple(transpose_minor_default, transpose_nontrivial_minor_default, no_transpose_minor_default, transpose_one_major_default, transpose_two_major_default,
transpose_minor_non_default, no_transpose_minor_non_default, transpose_major_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(3)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(4)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(5)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(6)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(7)));
}
TEST_F(GpuFusibleTest, CopyTransposesMinorDimension) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,30,40]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,30,40]{1,2,3,0} parameter(1)
copy_transpose_minor_default = f32[10,20,30,40]{2,3,1,0} copy(default_layout)
copy_no_transpose_minor_default = f32[10,20,30,40]{3,2,1,0} copy(default_layout)
copy_transpose_minor_non_default = f32[10,20,30,40]{2,1,3,0} copy(non_default_layout)
copy_no_transpose_minor_non_default = f32[10,20,30,40]{1,2,3,0} copy(non_default_layout)
ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default,
copy_transpose_minor_non_default, copy_no_transpose_minor_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3)));
}
TEST_F(GpuFusibleTest, CopyTransposesMinorDimensionSkipTrivialDimensions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
default_layout = f32[10,20,1,1]{3,2,1,0} parameter(0)
non_default_layout = f32[10,20,1,1]{1,2,3,0} parameter(1)
copy_transpose_minor_default = f32[10,20,1,1]{2,3,1,0} copy(default_layout)
copy_no_transpose_minor_default = f32[10,20,1,1]{3,2,1,0} copy(default_layout)
copy_transpose_minor_non_default = f32[10,20,1,1]{2,0,3,1} copy(non_default_layout)
copy_no_transpose_minor_non_default = f32[10,20,1,1]{1,2,3,0} copy(non_default_layout)
ROOT r = tuple(copy_transpose_minor_default, copy_no_transpose_minor_default,
copy_transpose_minor_non_default, copy_no_transpose_minor_non_default)
})"));
auto* tuple = (*module)->entry_computation()->root_instruction();
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(0)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(1)));
EXPECT_TRUE(TransposesMinorDimension(tuple->operand(2)));
EXPECT_FALSE(TransposesMinorDimension(tuple->operand(3)));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_ReductionToVector) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
c0 = f32[] parameter(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(1)
ROOT reduce = f32[512]{0} reduce(p1, c0), dimensions={0,2,3}, to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_ElementalReduction) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
ENTRY entry {
c0 = f32[] parameter(0)
p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(1)
ROOT reduce = f32[512,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={3,0},
to_apply=scalar_add
})"))
.value();
SCOPED_TRACE(module->ToString());
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kReduce);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputInputReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = f32[128,512]{1,0} fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_SingleOutputLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0)
ROOT reduce = f32[8,5,1,1]{3,2,1,0} reduce(p1, c0), dimensions={1,3}, to_apply=scalar_add
}
ENTRY entry {
p0 = f32[8,512,5,16,1,1]{5,4,3,2,1,0} parameter(0)
ROOT fusion = f32[8,5,1,1]{3,2,1,0} fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputInputReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce.0 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
reduce.1 = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
ROOT root = (f32[128,512]{1,0}, f32[128,512]{1,0}) tuple(reduce.0, reduce.1)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[128,512]{1,0}, f32[128,512]{1,0}) fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest,
IsReduceInputFusion_MultiOutputInputReduceFusionWithExtraOutputs) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce = f32[128,512]{1,0} reduce(p1, c0), dimensions={2,3}, to_apply=scalar_add
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)
ROOT root = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[128,512]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_TRUE(IsReduceInputFusion(*reduce));
EXPECT_TRUE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, IsReduceInputFusion_MultiOutputLoopReduceFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce.0 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
reduce.1 = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
ROOT root = (f32[512,28]{1,0}, f32[512,28]{1,0}) tuple(reduce.0, reduce.1)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[512,28]{1,0}, f32[512,28]{1,0}) fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest,
IsReduceInputFusion_MultiOutputLoopFusionReduceAndElementwiseOp) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_reduction {
c0 = f32[] constant(0)
p1 = f32[128,512,28,28]{3,2,1,0} parameter(0)
reduce = f32[512,28]{1,0} reduce(p1, c0), dimensions={0,2}, to_apply=scalar_add
mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1)
ROOT root = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(reduce, mul)
}
ENTRY entry {
p0 = f32[128,512,28,28]{3,2,1,0} parameter(0)
ROOT fusion = (f32[512,28]{1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_reduction
})"))
.value();
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
ASSERT_EQ(reduce->opcode(), HloOpcode::kFusion);
EXPECT_FALSE(IsReduceInputFusion(*reduce));
EXPECT_FALSE(IsInputFusibleReduction(*reduce));
}
TEST_F(GpuFusibleTest, CustomFusionIsNotFusibleAsConsumer) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
triton_fusion {
p0 = f16[20,3]{1,0} parameter(0)
p1 = f16[3,40]{1,0} parameter(1)
dot = f16[20,40]{1,0} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT c = f16[20,40]{0,1} copy(dot)
}
ENTRY e {
p0 = f16[20,3]{1,0} parameter(0)
n = f16[20,3]{1,0} negate(p0)
p1 = f16[3,40]{1,0} parameter(1)
ROOT r = f16[20,40]{0,1} fusion(n, p1),
kind=kCustom,
calls=triton_fusion
})"));
const HloInstruction* root = module->entry_computation()->root_instruction();
EXPECT_FALSE(IsFusibleAsMultiOutputFusionRoot(*root));
}
TEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p0.1)
ROOT transpose = f32[32,64]{1,0} transpose(neg), dimensions={1,0}
}
fused_computation_2 {
p0.2 = f32[32,64]{1,0} parameter(0)
neg = f32[32,64]{1,0} negate(p0.2)
ROOT add = f32[32,64]{1,0} add(neg, neg)
}
ENTRY entry {
p0 = f32[64,32]{1,0} parameter(0)
fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
ROOT fusion.2 = f32[32,64]{1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_2 = fusion_1->operand(0);
EXPECT_TRUE(FusionHeroesAreCompatible(fusion_1->fused_expression_root(),
fusion_2->fused_expression_root()));
EXPECT_TRUE(FusionHeroesAreCompatible(fusion_2->fused_expression_root(),
fusion_1->fused_expression_root()));
}
TEST_F(GpuFusibleTest, FusionHeroesAreCompatible_TransposeFusionNotCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[64,32]{1,0} parameter(0)
neg = f32[64,32]{1,0} negate(p0.1)
ROOT transpose = f32[32,64]{1,0} transpose(neg), dimensions={1,0}
}
fused_computation_2 {
p0.2 = f32[32,64]{1,0} parameter(0)
broadcast = f32[32,64,4]{2,1,0} broadcast(p0.2), dimensions={0,1}
ROOT add = f32[32,64,4]{2,1,0} add(broadcast, broadcast)
}
ENTRY entry {
p0 = f32[64,32]{1,0} parameter(0)
fusion.1 = f32[32,64]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_1
ROOT fusion.2 = f32[32,64,4]{2,1,0} fusion(fusion.1), kind=kLoop, calls=fused_computation_2
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction();
const HloInstruction* fusion_2 = fusion_1->operand(0);
EXPECT_FALSE(FusionHeroesAreCompatible(fusion_1->fused_expression_root(),
fusion_2->fused_expression_root()));
EXPECT_FALSE(FusionHeroesAreCompatible(fusion_2->fused_expression_root(),
fusion_1->fused_expression_root()));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_LoopFusions) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
ROOT div = f32[6400]{0} divide(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_IgnoreFpPrecision) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
ROOT convert = f16[6400]{0} convert(p0.2)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f16[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f16[6400]{0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_BitcastCompatible) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
fused_computation_2 {
p0.2 = f32[6400]{0} parameter(0)
bitcast = f32[1,6400]{1,0} bitcast(p0.2)
ROOT convert = f16[1,6400]{1,0} convert(bitcast)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f16[1,6400]{1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
ROOT root = (f32[6400]{0}, f16[1,6400]{1,0}) tuple(fusion.1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Reduce) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(0)
reduce = f32[] reduce(p0, const.2), dimensions={0}, to_apply=scalar_add
ROOT root = (f32[6400]{0}, f32[]) tuple(fusion.1, reduce)
})"))
.value();
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* reduce =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *reduce));
}
TEST_F(GpuFusibleTest, ShapesCompatibleForMultiOutputFusion_Elementwise) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[6400]{0} parameter(0)
ROOT mul = f32[6400]{0} multiply(p0.1, p0.1)
}
ENTRY entry {
p0 = f32[6400]{0} parameter(0)
fusion.1 = f32[6400]{0} fusion(p0), kind=kLoop, calls=fused_computation_1
const.2 = f32[] constant(1)
broadcast = f32[6400]{0} broadcast(const.2), dimensions={}
div = f32[6400]{0} divide(p0, broadcast)
ROOT root = (f32[6400]{0}, f32[6400]{0}) tuple(fusion.1, div)
})"))
.value();
const HloInstruction* fusion =
module->entry_computation()->root_instruction()->operand(0);
const HloInstruction* div =
module->entry_computation()->root_instruction()->operand(1);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion, *div));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFusion_MultiOutputLoopFusion) {
auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"(
fused_computation_1 {
p0.1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
mul = f32[8,1,5,16,1,1]{5,4,3,2,1,0} multiply(p0.1, p0.1)
exp = f32[8,1,5,16,1,1]{5,4,3,2,1,0} exponential(p0.1)
ROOT tuple = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(mul, exp)
}
fused_computation_2 {
p0.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
const.2 = f32[] constant(0)
broadcast = f32[8,1,5,16,1,1]{5,4,3,2,1,0} broadcast(const.2), dimensions={}
ROOT add = f32[8,1,5,16,1,1]{5,4,3,2,1,0} add(p0.2, broadcast)
}
ENTRY entry {
p0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} parameter(0)
fusion.1 = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) fusion(p0), kind=kLoop, calls=fused_computation_1
fusion.2 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation_2
gte0 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=0
gte1 = f32[8,1,5,16,1,1]{5,4,3,2,1,0} get-tuple-element(fusion.1), index=1
ROOT root = (f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}, f32[8,1,5,16,1,1]{5,4,3,2,1,0}) tuple(gte0, gte1, fusion.2)
})"))
.value();
const HloInstruction* fusion_1 =
module->entry_computation()->root_instruction()->operand(0)->operand(0);
const HloInstruction* fusion_2 =
module->entry_computation()->root_instruction()->operand(2);
EXPECT_NE(fusion_1, fusion_2);
EXPECT_TRUE(ShapesCompatibleForMultiOutputFusion(*fusion_1, *fusion_2));
}
TEST_F(GpuFusibleTest,
ShapesCompatibleForMultiOutputFu | 2,115 |
#ifndef XLA_SERVICE_GPU_GEMM_FUSION_AUTOTUNER_H_
#define XLA_SERVICE_GPU_GEMM_FUSION_AUTOTUNER_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla.pb.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
class GemmFusionAutotuner : public HloModulePass {
public:
explicit GemmFusionAutotuner(const AutotuneConfig& config,
const int32_t toolkit_version,
tsl::thread::ThreadPool* thread_pool,
const MultiProcessKeyValueStore& key_value_store)
: config_(config),
toolkit_version_(toolkit_version),
thread_pool_(thread_pool),
key_value_store_(key_value_store) {}
absl::string_view name() const override { return "triton-autotuner"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
const AutotuneConfig config_;
const int32_t toolkit_version_;
tsl::thread::ThreadPool* thread_pool_;
MultiProcessKeyValueStore key_value_store_;
};
class GemmFusionAutotunerImpl {
public:
GemmFusionAutotunerImpl(const AutotuneConfig config,
const int32_t toolkit_version,
const DebugOptions debug_options,
tsl::thread::ThreadPool* thread_pool)
: config_(std::move(config)),
toolkit_version_(toolkit_version),
debug_options_(std::move(debug_options)),
thread_pool_(thread_pool) {}
struct CuBlasConfig {
bool operator<(const CuBlasConfig& other) const;
};
struct CuDnnConfig {
int64_t plan_id;
bool operator<(const CuDnnConfig& other) const;
};
using Config = std::variant<CuBlasConfig, CuDnnConfig, TritonGemmConfig>;
using TilingConfigs =
std::vector<std::pair<const HloFusionInstruction*, std::vector<Config>>>;
struct ExecutableCandidate {
Config config;
std::unique_ptr<Executable> executable;
};
absl::StatusOr<std::vector<Config>> GenerateConfigs(
const HloFusionInstruction& fusion);
absl::StatusOr<std::vector<TritonGemmConfig>> GenerateTritonConfigs(
const HloDotInstruction& dot);
absl::StatusOr<absl::flat_hash_map<const HloFusionInstruction*,
std::vector<ExecutableCandidate>>>
CompileAll(AutotunerCompileUtil& compile_util, const TilingConfigs& task);
absl::StatusOr<std::vector<AutotuneResult>> Profile(
AutotunerCompileUtil& compile_util, const HloFusionInstruction& fusion,
absl::Span<const ExecutableCandidate> candidates);
absl::Status Autotune(
AutotunerCompileUtil& compile_util, const TilingConfigs& gemm_config_sets,
absl::flat_hash_map<AutotuneCacheKey, uint64_t> fusion_count_map);
const AutotuneConfig& GetConfig() const { return config_; }
bool IsAutotuningEnabled() const;
static std::string ToString(const Config& config);
private:
se::CudaComputeCapability GetComputeCapability() const {
return std::get<se::CudaComputeCapability>(
config_.GetGpuComputeCapability());
}
std::vector<TritonGemmConfig> GetDefaultTritonConfigs() const;
std::vector<TritonGemmConfig> GetExhaustiveTritonConfigs() const;
const AutotuneConfig config_;
const int32_t toolkit_version_;
const DebugOptions debug_options_;
tsl::thread::ThreadPool* thread_pool_;
std::vector<TritonGemmConfig> triton_configs_;
};
}
}
#endif
#include "xla/service/gpu/gemm_fusion_autotuner.h"
#include <algorithm>
#include <array>
#include <atomic>
#include <cstdint>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "third_party/gpus/cuda/include/cublas_v2.h"
#include "xla/autotuning.pb.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/float_normalization.h"
#include "xla/service/gpu/autotuner_compile_util.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/buffer_comparator.h"
#include "xla/service/gpu/cudnn_fusion_compiler.h"
#include "xla/service/gpu/fusion_wrapper.h"
#include "xla/service/gpu/gemm_rewriter.h"
#include "xla/service/gpu/gpu_float_support.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/instruction_fusion.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/priority_fusion.h"
#include "xla/service/gpu/split_k_gemm_rewriter.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shaped_buffer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/redzone_allocator.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/tsl/util/proto/proto_utils.h"
#include "xla/util.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/bits.h"
#include "tsl/platform/blocking_counter.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
#include "tsl/profiler/lib/scoped_annotation.h"
namespace xla {
namespace gpu {
using Config = GemmFusionAutotunerImpl::Config;
using TilingConfigs = GemmFusionAutotunerImpl::TilingConfigs;
using ProfilingOutput = AutotunerCompileUtil::ProfilingOutput;
namespace {
constexpr int kMinTileSize = 16;
constexpr TritonGemmConfig kDefaultGemmTiling = {32, 32, 32, 1, 1, 4};
constexpr int kMaxWavesForSplitK = 5;
constexpr std::array<int, 6> kBlockSizes = {16, 32, 64, 128, 256, 512};
constexpr std::array<int, 4> kNumStages = {1, 2, 3, 4};
constexpr std::array<int, 4> kNumWarps = {2, 4, 8, 16};
constexpr std::array<int, 5> kSplitK = {1, 2, 4, 8, 16};
constexpr std::array<int, 5> kNumCtas = {1, 2, 4, 8, 16};
using AutoTuneCacheKeyCount = absl::flat_hash_map<AutotuneCacheKey, uint64_t>;
class GemmFusionAutotunerVisitor : public DfsHloRewriteVisitor {
public:
explicit GemmFusionAutotunerVisitor(const AutotuneConfig& config)
: config_(config) {}
absl::Status HandleFusion(HloInstruction* hlo) override {
TF_ASSIGN_OR_RETURN(auto gpu_config,
hlo->backend_config<GpuBackendConfig>());
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
if (backend_config.kind() != kTritonGemmFusionKind &&
backend_config.kind() != kCuDnnFusionKind) {
return absl::OkStatus();
}
VLOG(4) << "Processing " << hlo->ToString();
if (!backend_config.has_triton_gemm_config() &&
!backend_config.has_cudnn_fusion_config()) {
TF_ASSIGN_OR_RETURN(
AutotuneResult autotune_result,
AutotunerUtil::Autotune(
hlo, config_, [&]() -> absl::StatusOr<AutotuneResult> {
if (config_.IsDeviceless()) {
return absl::InternalError(absl::StrCat(
"Expect autotune result cache hit for deviceless "
"compilation (HLO: ",
hlo->ToString(), ")"));
}
return absl::InternalError("Expect autotune result cache hit.");
}));
VLOG(4) << "Result: " << autotune_result.ShortDebugString();
if (autotune_result.has_triton()) {
*backend_config.mutable_triton_gemm_config() = autotune_result.triton();
TF_RETURN_IF_ERROR(hlo->set_backend_config(gpu_config));
} else if (autotune_result.has_gemm()) {
HloComputation* const computation = hlo->parent();
HloInstruction* const call = computation->AddInstruction(
HloInstruction::CreateCall(hlo->shape(), hlo->operands(),
hlo->fused_instructions_computation()));
TF_RETURN_IF_ERROR(computation->ReplaceInstruction(hlo, call));
hlo = call;
} else {
CHECK(autotune_result.has_algorithm());
backend_config.set_kind(std::string(kCuDnnFusionKind));
backend_config.mutable_cudnn_fusion_config()->set_plan_id(
autotune_result.algorithm().algo_id());
TF_RETURN_IF_ERROR(hlo->set_backend_config(gpu_config));
}
}
if (backend_config.has_triton_gemm_config()) {
TF_ASSIGN_OR_RETURN(
const TritonGemmConfig config,
TritonGemmConfig::FromProto(backend_config.triton_gemm_config()));
if (config.split_k > 1) {
TF_RETURN_IF_ERROR(MakeDotSplitKBatch(hlo, config));
}
}
MarkAsChanged();
return absl::OkStatus();
}
private:
AutotuneConfig config_;
};
class GemmConfigSetCollector : public ConstDfsHloVisitorWithDefault {
public:
explicit GemmConfigSetCollector(GemmFusionAutotunerImpl* impl)
: impl_(impl) {}
absl::StatusOr<TilingConfigs> CollectGemmConfigSets(
const HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads = {}) {
error_out_on_cache_miss_ =
module->config()
.debug_options()
.xla_gpu_require_complete_aot_autotune_results();
gemm_config_sets_.clear();
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_RETURN_IF_ERROR(computation->Accept(this));
}
return std::move(gemm_config_sets_);
}
AutoTuneCacheKeyCount GetFusionsCount() {
return std::move(fusion_count_map_);
}
absl::Status HandleFusion(const HloInstruction* hlo) override {
const HloFusionInstruction* fusion = Cast<HloFusionInstruction>(hlo);
TF_ASSIGN_OR_RETURN(auto gpu_config,
hlo->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
AutotuneCacheKey key = AutotunerUtil::GetKey(hlo, impl_->GetConfig());
auto [iterator, inserted] = fusion_count_map_.insert({key, 1});
if (!inserted) {
++(iterator->second);
}
TF_ASSIGN_OR_RETURN(bool is_in_cache,
AutotunerUtil::IsInCache(key, impl_->GetConfig()));
if (is_in_cache || handled_fusions_.contains(key)) {
return absl::OkStatus();
}
bool missing_config = (backend_config.kind() == kTritonGemmFusionKind &&
!backend_config.has_triton_gemm_config()) ||
(backend_config.kind() == kCuDnnFusionKind &&
!backend_config.has_cudnn_fusion_config());
if (missing_config) {
if (error_out_on_cache_miss_) {
return absl::NotFoundError(absl::StrCat(
"Complete autotuning results are required, but no cache result "
"found for key: ",
key.ToString()));
}
TF_ASSIGN_OR_RETURN(std::vector<Config> configs,
impl_->GenerateConfigs(*fusion));
gemm_config_sets_.push_back({fusion, std::move(configs)});
}
handled_fusions_.insert(key);
return absl::OkStatus();
}
absl::Status DefaultAction(const HloInstruction* hlo) override {
return absl::OkStatus();
}
private:
bool error_out_on_cache_miss_;
GemmFusionAutotunerImpl* impl_;
TilingConfigs gemm_config_sets_;
AutoTuneCacheKeyCount fusion_count_map_;
absl::flat_hash_set<AutotuneCacheKey> handled_fusions_;
};
struct TileSizeLimit {
int block_m = 0;
int block_n = 0;
int block_k = 0;
};
absl::StatusOr<TileSizeLimit> GetLimits(const HloDotInstruction& dot) {
TF_ASSIGN_OR_RETURN(int64_t non_contracting_index_lhs,
NonContractingDimensionIndex(dot, 0));
TF_ASSIGN_OR_RETURN(int64_t non_contracting_index_rhs,
NonContractingDimensionIndex(dot, 1));
TF_ASSIGN_OR_RETURN(int64_t contracting_index,
ContractingDimensionIndex(dot, 1));
const int max_m = tsl::NextPowerOfTwoS64(
dot.operand(0)->shape().dimensions(non_contracting_index_lhs));
const int max_n = tsl::NextPowerOfTwoS64(
dot.operand(1)->shape().dimensions(non_contracting_index_rhs));
const int max_k = tsl::NextPowerOfTwoS64(
dot.operand(1)->shape().dimensions(contracting_index));
return TileSizeLimit{
std::max(max_m, kMinTileSize),
std::max(max_n, kMinTileSize),
std::max(max_k, kMinTileSize),
};
}
int GetLogEveryN() { return VLOG_IS_ON(3) ? 100 : 1000; }
absl::StatusOr<std::unique_ptr<HloModule>> TritonGemmAutotuneExtractor(
const TritonGemmConfig& config,
const se::DeviceDescription& gpu_device_info,
const HloFusionInstruction* fusion, DebugOptions debug_opts,
bool allow_filtering_kernels_spilling_registers) {
std::unique_ptr<HloModule> new_module =
ExtractInstructionIntoNewModule(*fusion);
debug_opts.clear_xla_gpu_enable_command_buffer();
if (!allow_filtering_kernels_spilling_registers) {
debug_opts.set_xla_gpu_filter_kernels_spilling_registers_on_autotuning(
false);
}
new_module->mutable_config().set_debug_options(debug_opts);
HloComputation* entry_computation = new_module->entry_computation();
HloInstruction* cloned_dot_fusion = entry_computation->root_instruction();
TF_ASSIGN_OR_RETURN(auto gpu_config,
cloned_dot_fusion->backend_config<GpuBackendConfig>());
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
*backend_config.mutable_triton_gemm_config() = config.ToProto();
TF_RETURN_IF_ERROR(cloned_dot_fusion->set_backend_config(gpu_config));
if (config.split_k > 1) {
TF_RETURN_IF_ERROR(MakeDotSplitKBatch(cloned_dot_fusion, config));
GpuFloatSupport bf16_support(gpu_device_info.cuda_compute_capability(),
BF16);
FloatNormalization float_normalization(&bf16_support);
TF_RETURN_IF_ERROR(float_normalization.Run(new_module.get()).status());
auto shape_size_function = [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
GpuPriorityFusion priority_fusion(
nullptr, gpu_device_info,
GpuHloCostAnalysis::Options{shape_size_function,
{},
true});
TF_RETURN_IF_ERROR(priority_fusion.Run(new_module.get()).status());
FusionWrapper fusion_wrapper;
TF_RETURN_IF_ERROR(fusion_wrapper.Run(new_module.get()).status());
}
return new_module;
}
absl::StatusOr<std::unique_ptr<HloModule>> CublasGemmAutotuneExtractor(
const AutotuneConfig& config, const int32_t toolkit_version,
const HloFusionInstruction* fusion, const DebugOptions& debug_opts) {
const HloComputation* fusion_computation =
fusion->called_computations().at(0);
std::unique_ptr<HloModule> new_module =
ExtractComputationIntoNewModule(*fusion_computation);
new_module->mutable_config().set_debug_options(debug_opts);
auto* dot = hlo_query::GetFirstInstructionWithOpcode(
*new_module->entry_computation(), HloOpcode::kDot);
if (dot->precision_config().algorithm() ==
PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3 ||
dot->precision_config().algorithm() ==
PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6) {
dot->mutable_precision_config()->set_algorithm(
PrecisionConfig::ALG_DOT_F32_F32_F32);
}
for (bool fp8 : {true, false}) {
GemmRewriter rewriter(config.GetGpuComputeCapability(), toolkit_version,
fp8);
GpuInstructionFusion fusion_pass(
false, config.GetExecutor()->GetDeviceDescription());
TF_RETURN_IF_ERROR(rewriter.Run(new_module.get()).status());
TF_RETURN_IF_ERROR(fusion_pass.Run(new_module.get()).status());
}
return new_module;
}
absl::StatusOr<std::unique_ptr<HloModule>> FusionExtractor(
const HloFusionInstruction& fusion, const DebugOptions& debug_opts) {
std::unique_ptr<HloModule> module = ExtractInstructionIntoNewModule(fusion);
module->mutable_config().set_debug_options(debug_opts);
return module;
}
absl::StatusOr<std::unique_ptr<HloModule>> CuDnnFusionExtractor(
const HloFusionInstruction& fusion, const DebugOptions& debug_opts,
const int plan_id) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
FusionExtractor(fusion, debug_opts));
GpuBackendConfig gpu_config;
FusionBackendConfig& backend_config =
*gpu_config.mutable_fusion_backend_config();
backend_config.set_kind(std::string(kCuDnnFusionKind));
backend_config.mutable_cudnn_fusion_config()->set_plan_id(plan_id);
TF_RETURN_IF_ERROR(
module->entry_computation()->root_instruction()->set_backend_config(
gpu_config));
return module;
}
bool IsFusionKind(const HloInstruction& hlo, absl::string_view kind) {
auto gpu_config = hlo.backend_config<GpuBackendConfig>();
if (!gpu_config.ok()) {
return false;
}
return gpu_config->fusion_backend_config().kind() == kind;
}
int GetCuDnnPlanCount(const HloInstruction& hlo,
const AutotuneConfig& autotune_config) {
if (auto gpu_config = hlo.backend_config<GpuBackendConfig>();
!gpu_config.ok() ||
gpu_config->fusion_backend_config().has_cudnn_fusion_config()) {
return {};
}
return CuDnnFusionCompiler::GetAvailablePlanCount(
*autotune_config.GetExecutor(), *DynCast<HloFusionInstruction>(&hlo));
}
AutotuneResult FromConfig(const Config& config) {
AutotuneResult res;
if (std::holds_alternative<GemmFusionAutotunerImpl::CuBlasConfig>(config)) {
res.mutable_gemm()->set_algorithm(CUBLAS_GEMM_DEFAULT);
} else if (std::holds_alternative<GemmFusionAutotunerImpl::CuDnnConfig>(
config)) {
res.mutable_algorithm()->set_algo_id(
std::get<GemmFusionAutotunerImpl::CuDnnConfig>(config).plan_id);
} else if (std::holds_alternative<TritonGemmConfig>(config)) {
*res.mutable_triton() = std::get<TritonGemmConfig>(config).ToProto();
} else {
LOG(FATAL) << "Unsupported config type: " << config.index();
}
return res;
}
absl::Status DumpOriginalFusion(AutotunerCompileUtil& util,
const HloFusionInstruction& fusion,
int fusion_id) {
TF_ASSIGN_OR_RETURN(std::unique_ptr<HloModule> module,
util.ExtractModule([&](const DebugOptions& debug_opts) {
return FusionExtractor(fusion, debug_opts);
}));
module->set_name(std::string(fusion.name()));
DumpToFileInDirOrStdout(
*fusion.GetModule(),
"",
absl::StrCat("gemm_fusion_", fusion_id, ".", module->name(), ".txt"),
module->ToString());
return absl::OkStatus();
}
absl::Status DumpAutotunedFusion(const AutotuneConfig& autotune_config,
const int32_t toolkit_version,
AutotunerCompileUtil& util,
const AutotuneResult result,
const HloFusionInstruction* fusion,
int fusion_id) {
TritonGemmConfig triton_gemm_config;
if (result.has_triton()) {
TF_ASSIGN_OR_RETURN(triton_gemm_config,
TritonGemmConfig::FromProto(result.triton()));
}
const se::DeviceDescription& device_desc =
autotune_config.GetExecutor()->GetDeviceDescription();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
util.ExtractModule([&](const DebugOptions& debug_opts) {
if (result.has_algorithm()) {
return CuDnnFusionExtractor(*fusion, debug_opts,
result.algorithm().algo_id());
} else if (result.has_triton()) {
return TritonGemmAutotuneExtractor(
triton_gemm_config, device_desc, fusion, debug_opts,
true);
} else if (result.has_gemm()) {
return CublasGemmAutotuneExtractor(autotune_config, toolkit_version,
fusion, debug_opts);
} else {
LOG(FATAL) << "Unknown result type: " << result.DebugString();
}
}));
module->set_name(std::string(fusion->name()));
DumpToFileInDirOrStdout(
*fusion->GetModule(),
"",
absl::StrCat("gemm_fusion_", fusion_id, ".", module->name(),
".optimized.txt"),
module->ToString());
return absl::OkStatus();
}
std::string Serialize(const Config& config) {
if (auto triton_config = std::get_if<TritonGemmConfig>(&config)) {
tsl::protobuf::TextFormat::Printer printer;
printer.SetSingleLineMode(true);
std::string result;
printer.PrintToString(triton_config->ToProto(), &result);
return result;
}
return GemmFusionAutotunerImpl::ToString(config);
}
}
bool GemmFusionAutotunerImpl::CuBlasConfig::operator<(
const CuBlasConfig& other) const {
return false;
}
bool GemmFusionAutotunerImpl::CuDnnConfig::operator<(
const CuDnnConfig& other) const {
return plan_id < other.plan_id;
}
bool GemmFusionAutotunerImpl::IsAutotuningEnabled() const {
return debug_options_.xla_gpu_autotune_level() > 0 &&
!debug_options_.xla_gpu_deterministic_ops();
}
std::string GemmFusionAutotunerImpl::ToString(const Config& config) {
if (std::holds_alternative<TritonGemmConfig>(config)) {
return std::get<TritonGemmConfig>(config).ToString();
} else if (std::holds_alternative<CuDnnConfig>(config)) {
return absl::StrFormat("cuDNN plan %d",
std::get<CuDnnConfig>(config).plan_id);
} else if (std::holds_alternative<CuBlasConfig>(config)) {
return "reference (cublas)";
} else {
LOG(FATAL) << "Unsupported config type: " << config.index();
}
}
absl::StatusOr<std::vector<Config>> GemmFusionAutotunerImpl::GenerateConfigs(
const HloFusionInstruction& fusion) {
const HloDotInstruction* dot =
Cast<HloDotInstruction>(hlo_query::GetFirstInstructionWithOpcode(
*fusion.called_computations().at(0), HloOpcode::kDot));
std::vector<Config> configs;
if (algorithm_util::IsSupportedByCublasOrCublasLt(
dot->precision_config().algorithm()) &&
!dot->sparse_operands() && IsAutotuningEnabled()) {
configs.push_back(CuBlasConfig{});
}
bool is_hopper =
!config_.IsDeviceless() && GetComputeCapability().IsAtLeastHopper();
bool is_cudnn_enabled =
debug_options_.xla_gpu_cudnn_gemm_fusion_level() > 0 && is_hopper &&
GetDnnVersionInfoOrDefault(config_.GetExecutor()).major_version() >= 9;
if ((IsFusionKind(fusion, kCuDnnFusionKind) && IsAutotuningEnabled()) ||
(IsFusionKind(fusion, kTritonGemmFusionKind) && is_cudnn_enabled &&
algorithm_util::IsSupportedByCudnn(
dot->precision_config().algorithm()) &&
!dot->sparse_operands() && IsAutotuningEnabled())) {
const int plan_count = GetCuDnnPlanCount(fusion, config_);
for (int plan_id = 0; plan_id < plan_count; ++plan_id) {
configs.push_back(CuDnnConfig{plan_id});
}
}
if (IsFusionKind(fusion, kCuDnnFusionKind)) {
if (!IsAutotuningEnabled()) {
configs.push_back(CuDnnConfig{-1});
}
return configs;
}
TF_ASSIGN_OR_RETURN(std::vector<TritonGemmConfig> triton_configs,
GenerateTritonConfigs(*dot));
for (TritonGemmConfig& config : triton_configs) {
configs.push_back(std::move(config));
}
return configs;
}
absl::StatusOr<std::vector<TritonGemmConfig>>
GemmFusionAutotunerImpl::GenerateTritonConfigs(const HloDotInstruction& dot) {
std::vector<const HloInstruction*> converts =
HloFindAll({&dot}, [&](const HloInstruction* node) {
return node->opcode() == HloOpcode::kConvert;
});
int minBitWidth = primitive_util::BitWidth(dot.shape().element_type());
for (auto convert : converts) {
auto in_type = convert->operand(0)->shape().element_type();
auto out_type = convert->shape().element_type();
minBitWidth = std::min({minBitWidth, primitive_util::BitWidth(in_type),
primitive_util::BitWidth(out_type)});
}
std::vector<TritonGemmConfig> result_configs;
TF_ASSIGN_OR_RETURN(TileSizeLimit limits, GetLimits(dot));
if (triton_configs_.empty()) {
triton_configs_ = !IsAutotuningEnabled()
? std::vector(1, kDefaultGemmTiling)
: debug_options_.xla_gpu_exhaustive_tiling_search()
? GetExhaustiveTritonConfigs()
: GetDefaultTritonConfigs();
}
constexpr int kMinGemmElements = 32 * 32;
bool small_dot =
ShapeUtil::ElementsIn(dot.operand(0)->shape()) <= kMinGemmElements &&
ShapeUtil::ElementsIn(dot.operand(1)->shape()) <= kMinGemmElements;
std::vector<TritonGemmConfig> triton_configs =
small_dot ? std::vector(1, kDefaultGemmTiling) : triton_configs_;
const int kCoreCount =
!config_.IsDeviceless()
? config_.GetExecutor()->GetDeviceDescription().core_count()
: 100;
const int64_t kSufficientNumb | #include "xla/service/gpu/gemm_fusion_autotuner.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/autotuning.pb.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/pjrt/distributed/key_value_store_interface.h"
#include "xla/service/call_inliner.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/autotuner_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gemm_fusion.h"
#include "xla/service/gpu/gemm_rewriter.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_pass_pipeline.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/test_utils.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/tools/hlo_decomposer.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/path.h"
#include "tsl/platform/status_matchers.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/threadpool.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
using HloExtractionTest = HloTestBase;
TEST_F(HloExtractionTest, InstructionExtractionIsCorrect) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
triton_gemm_dot {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
c0 = f32[10,10] convert(p0)
ROOT dot.0 = f32[10,10] dot(c0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
s = f32[10,10] sqrt(p1)
d = f32[10,10] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
ROOT r = f32[10,10] add(d, s)
})")
.value();
std::unique_ptr<HloModule> extracted_module = ExtractInstructionIntoNewModule(
*module->entry_computation()->root_instruction()->operand(0));
module.release();
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
GmockMatch(m::Fusion(m::Parameter(), m::Parameter())));
EXPECT_EQ(extracted_module->entry_computation()->instruction_count(), 3);
TF_EXPECT_OK(VerifyHloModule(extracted_module.get(),
true,
false));
}
TEST_F(HloExtractionTest, ComputationExtractionIsCorrect) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
triton_gemm_dot {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
c0 = f32[10,10] convert(p0)
ROOT dot.0 = f32[10,10] dot(c0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY entry {
p0 = s8[10,10] parameter(0)
p1 = f32[10,10] parameter(1)
s = f32[10,10] sqrt(p1)
d = f32[10,10] fusion(p0, p1),
kind=kCustom, calls=triton_gemm_dot
ROOT r = f32[10,10] add(d, s)
})")
.value();
std::unique_ptr<HloModule> extracted_module =
ExtractComputationIntoNewModule(*module->entry_computation()
->root_instruction()
->operand(0)
->fused_instructions_computation());
module.release();
EXPECT_THAT(extracted_module->entry_computation()->root_instruction(),
GmockMatch(m::Dot(m::Convert(m::Parameter()), m::Parameter())));
EXPECT_EQ(extracted_module->entry_computation()->instruction_count(), 4);
TF_EXPECT_OK(VerifyHloModule(extracted_module.get(),
true,
false));
}
class StatelessAutotunerTest : public HloTestBase {
public:
StatelessAutotunerTest()
: HloTestBase(true,
false) {}
int32_t GetToolkitVersion() const { return CUDA_VERSION; }
void SetUp() override {
AutotunerUtil::ClearAutotuneResults();
HloTestBase::SetUp();
}
void TearDown() override {
AutotunerUtil::ClearAutotuneResults();
HloTestBase::TearDown();
}
};
class GemmFusionAutotunerTest : public StatelessAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
StatelessAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_enable_triton_gemm(true);
debug_options.set_xla_gpu_cublas_fallback(false);
debug_options.set_xla_gpu_cudnn_gemm_fusion_level(0);
return debug_options;
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
void CheckTritonAutotuning(absl::string_view hlo,
absl::string_view expected) {
HloPassPipeline pipeline("gemm_rewrite");
pipeline.AddPass<GemmFusion>(backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability());
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
DebugOptions opts;
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(
AutotuneConfig{DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
opts},
GetToolkitVersion(), &thread_pool, key_value_store);
RunAndFilecheckHloRewrite(
hlo, std::move(pipeline), expected, [](const HloModule* m) {
VLOG(5) << m->ToString();
const HloInstruction* dot_fusion =
m->entry_computation()->root_instruction();
if (dot_fusion->opcode() == HloOpcode::kReduce) {
dot_fusion = dot_fusion->operand(0);
}
CHECK_EQ(dot_fusion->opcode(), HloOpcode::kFusion);
if (!dot_fusion->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.has_cudnn_fusion_config()) {
CHECK_GT(dot_fusion->backend_config<GpuBackendConfig>()
.value()
.fusion_backend_config()
.triton_gemm_config()
.block_m(),
0);
}
});
}
};
class GemmFusionAutotunerTestWithMorePreciseReduction
: public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_triton_gemm_disable_reduced_precision_reduction(
true);
return debug_options;
}
};
absl::StatusOr<std::vector<TritonGemmConfig>> GetPossibleMatmulAutotuneConfigs(
const HloDotInstruction& dot,
const se::CudaComputeCapability& compute_capability,
const int32_t toolkit_version, const DebugOptions& debug_options) {
DevicelessConfig test_config{"", compute_capability};
AutotuneConfig autotune_config{test_config, debug_options};
GemmFusionAutotunerImpl autotuner(autotune_config, toolkit_version,
debug_options, nullptr);
return autotuner.GenerateTritonConfigs(dot);
}
TEST_F(GemmFusionAutotunerTest, AmpereUsesMoreThanTwoStages) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.num_stages > 2; }));
}
TEST_F(GemmFusionAutotunerTest, SmallOutputCanUseLargeSplitK) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[1024,1024] parameter(0)
p1 = f32[1024,1024] parameter(1)
ROOT r = f32[1024,1024] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.split_k >= 4; }));
}
TEST_F(GemmFusionAutotunerTest, LargeOutputDoesNotUseLargeSplitK) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
ENTRY e {
p0 = f32[20480,20480] parameter(0)
p1 = f32[20480,20480] parameter(1)
ROOT r = f32[20480,20480] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_FALSE(std::any_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return config.split_k > 1; }));
}
TEST_F(GemmFusionAutotunerTest, Int8FusedGemm) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[128,64] parameter(0)
c = f16[128,64] convert(x)
y = f16[64,6144] parameter(1)
ROOT out = f16[128,6144] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{5e-3, 5e-3}));
}
TEST_F(GemmFusionAutotunerTest, Int8FusedGemm256) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[128,256] parameter(0)
c = f16[128,256] convert(x)
y = f16[256,6144] parameter(1)
ROOT out = f16[128,6144] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-2, 1e-2}));
}
TEST_F(GemmFusionAutotunerTest, SelectsSplitK) {
const std::string kHloText = R"(
HloModule t
ENTRY e {
p0 = s8[7,8192] parameter(0)
p0c = bf16[7,8192] convert(p0)
p1 = bf16[8192,18] parameter(1)
ROOT dot.0 = bf16[7,18] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: reduce
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: kCustom
; CHECK-NEXT: kLoop
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{4, 1e-1}));
}
TEST_F(GemmFusionAutotunerTestWithMorePreciseReduction, SelectsSplitK) {
constexpr absl::string_view kHloText = R"(
HloModule t
ENTRY e {
p0 = s8[7,8192] parameter(0)
p0c = bf16[7,8192] convert(p0)
p1 = bf16[8192,18] parameter(1)
ROOT dot.0 = bf16[7,18] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: reduce
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK-NEXT: kCustom
; CHECK-NEXT: kLoop
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-2, 1e-2}));
}
TEST_F(GemmFusionAutotunerTest, ApplySplitKWithoutAlteringTiling) {
const std::string kHloText = R"(
triton_dot {
p0 = f16[55,120] parameter(0)
p1 = f16[120,20] parameter(1)
ROOT dot = f16[55,20] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f16[55,120]{1,0} parameter(0)
p1 = f16[120,20]{1,0} parameter(1)
ROOT _ = f16[55,20] fusion(p0, p1), kind=kCustom, calls=triton_dot,
backend_config={"fusion_backend_config":{kind: "__triton_gemm", triton_gemm_config: {"block_m":16,"block_n":64,"block_k":32,"split_k":3,"num_stages":1,"num_warps":2,"num_ctas":1}}}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: f16[3,55,20]
; CHECK: {"block_m":16,"block_n":64,"block_k":32,"split_k":3,"num_stages":1,"num_warps":2,"num_ctas":1}
; CHECK: f16[55,20]{1,0} {{(reduce|fusion)}}
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
TEST_F(GemmFusionAutotunerTest, DoNotRunAutotuningKernelSpillingRegisters) {
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = s8[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.p0 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
%convert.p1 = f16[4,12288]{1,0} convert(s8[4,12288]{1,0} %p1)
%dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %convert.p1, f16[12288,1536]{1,0} %convert.p0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT %convert = s8[4,1536]{1,0} convert(f16[4,1536]{1,0} %dot)
}
ENTRY %e {
%get-tuple-element.7020 = s8[12288,1536]{1,0} parameter(0)
%convert = s8[4,12288]{1,0} parameter(1)
ROOT %triton = s8[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %get-tuple-element.7020, s8[4,12288]{1,0} %convert), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"256","block_n":"256","block_k":"32","split_k":"1","num_stages":"1","num_warps":"16","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
EXPECT_THAT(
backend().compiler()->RunBackend(std::move(module),
backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true}),
::testing::AnyOf(
tsl::testing::StatusIs(
tsl::error::CANCELLED,
absl::StrFormat(
"Compilation result discarded due to register spilling")),
tsl::testing::StatusIs(
tsl::error::RESOURCE_EXHAUSTED,
absl::StrFormat("Register allocation failed"))));
}
TEST_F(GemmFusionAutotunerTest,
DoNotFilterOutAutotuningKernelSpillingRegisters) {
if (GetCudaComputeCapability().IsAtLeastHopper()) {
GTEST_SKIP() << "Hopper and newer runs out of registers for such HLOs";
}
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = s8[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.p0 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
%convert.p1 = f16[4,12288]{1,0} convert(s8[4,12288]{1,0} %p1)
%dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %convert.p1, f16[12288,1536]{1,0} %convert.p0), lhs_contracting_dims={1}, rhs_contracting_dims={0}
ROOT %convert = s8[4,1536]{1,0} convert(f16[4,1536]{1,0} %dot)
}
ENTRY %e {
%get-tuple-element.7020 = s8[12288,1536]{1,0} parameter(0)
%convert = s8[4,12288]{1,0} parameter(1)
ROOT %triton = s8[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %get-tuple-element.7020, s8[4,12288]{1,0} %convert), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"256","block_n":"256","block_k":"32","split_k":"1","num_stages":"1","num_warps":"16","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
HloModuleConfig config = module->config();
DebugOptions debug_options = config.debug_options();
debug_options.set_xla_gpu_filter_kernels_spilling_registers_on_autotuning(
false);
config.set_debug_options(debug_options);
module->set_config(config);
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_NE(executable, nullptr);
}
TEST_F(GemmFusionAutotunerTest, RunAutotuningKernelNotSpillingRegisters) {
const std::string kHloText = R"(
HloModule m
%triton_gemm_dot {
%p1 = f16[4,12288]{1,0} parameter(1)
%p0 = s8[12288,1536]{1,0} parameter(0)
%convert.10406 = f16[12288,1536]{1,0} convert(s8[12288,1536]{1,0} %p0)
ROOT %dot = f16[4,1536]{1,0} dot(f16[4,12288]{1,0} %p1, f16[12288,1536]{1,0} %convert.10406), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY %e {
%p0 = s8[12288,1536]{1,0} parameter(0)
%p1 = f16[4,12288]{1,0} parameter(1)
ROOT %triton_dot = f16[4,1536]{1,0} fusion(s8[12288,1536]{1,0} %p0, f16[4,12288]{1,0} %p1), kind=kCustom, calls=%triton_gemm_dot,
backend_config={"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"16","block_n":"32","block_k":"32","split_k":"1","num_stages":"1","num_warps":"2","num_ctas":"1"}}}
})";
auto module = ParseAndReturnVerifiedModule(kHloText).value();
std::unique_ptr<Executable> executable =
backend()
.compiler()
->RunBackend(std::move(module), backend().default_stream_executor(),
{nullptr,
nullptr,
{},
true})
.value();
EXPECT_NE(executable, nullptr);
}
using GemmFusionAutotunerDumpTest = GemmFusionAutotunerTest;
TEST_F(GemmFusionAutotunerDumpTest, Fp8CublasltFallbackSupport) {
const std::string kHloText = R"(
HloModule o
gemm_fusion {
p0 = f8e4m3fn[64,6144]{1,0} parameter(0)
p1 = f8e4m3fn[64,6144]{1,0} parameter(1)
ROOT %dot.0 = f32[64,64]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY main {
p0 = f8e4m3fn[64,6144]{1,0} parameter(0)
p1 = f8e4m3fn[64,6144]{1,0} parameter(1)
ROOT %dot.0 = f32[64,64]{1,0} fusion(p0, p1), kind=kCustom, calls=gemm_fusion, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm"},"force_earliest_schedule":false}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
DebugOptions opts;
AutotuneConfig autotune_config{
DeviceConfig{backend().default_stream_executor(),
backend().memory_allocator()},
opts};
AutotuneCacheKey cache_key(autotune_config.GetModelStr(),
*module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(AutotuneResults autotune_results_override,
ParseTextProto<AutotuneResults>(R"pb(
version: 3
results {
device: "..."
hlo: "..."
result {
gemm { algorithm: -1 }
run_time { nanos: 14 }
}
})pb"));
autotune_results_override.mutable_results(0)->set_device(
std::string(cache_key.GetModelStr()));
autotune_results_override.mutable_results(0)->set_hlo(
std::string(cache_key.GetHlo()));
CHECK_OK(AutotunerUtil::LoadAutotuneResults(autotune_results_override));
HloPassPipeline pipeline("gemm_autotune");
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(autotune_config, GetToolkitVersion(),
&thread_pool, key_value_store);
pipeline.AddPass<CallInliner>();
for (bool fp8_rewrite : {true, false}) {
pipeline.AddPass<GemmRewriter>(autotune_config.GetGpuComputeCapability(),
GetToolkitVersion(), fp8_rewrite);
}
TF_EXPECT_OK(HloTestBase::RunHloPass(&pipeline, module.get()));
const bool is_at_least_hopper =
std::holds_alternative<se::CudaComputeCapability>(
autotune_config.GetGpuComputeCapability()) &&
std::get<se::CudaComputeCapability>(
autotune_config.GetGpuComputeCapability())
.IsAtLeastHopper();
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(module->ToString(), is_at_least_hopper
? "
: "
EXPECT_TRUE(filecheck_matches);
}
TEST_F(GemmFusionAutotunerDumpTest, DumpingWorks) {
HloModuleConfig config;
DebugOptions options = GetDebugOptionsForTest();
options.set_xla_gpu_cublas_fallback(true);
options.set_xla_gpu_dump_autotuned_gemm_fusions(true);
std::string output_directory;
if (!tsl::io::GetTestUndeclaredOutputsDir(&output_directory)) {
output_directory = tsl::testing::TmpDir();
}
options.set_xla_dump_to(output_directory);
config.set_debug_options(options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion1 {
p0 = f32[3333,3333] parameter(0)
s = f32[3333,3333] sine(p0)
p1 = f32[3333,3333] parameter(1)
c = f32[3333,3333] cosine(p1)
ROOT dot = f32[3333,3333] dot(s, c),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[3333,3333] parameter(0)
p1 = f32[3333,3333] parameter(1)
ROOT rr = f32[3333,3333] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__triton_gemm"}}
})",
config));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module,
GetOptimizedModule(std::move(module)));
std::string dump;
TF_EXPECT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(output_directory,
FilenameFor(*optimized_module, "",
"gemm_fusion_0.rr.txt")),
&dump));
EXPECT_TRUE(*RunFileCheck(dump, R"(
CHECK: HloModule rr
CHECK-NOT: cublas
CHECK: __triton_gemm
CHECK-NOT: block_m
)"));
dump.clear();
TF_EXPECT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(
output_directory,
FilenameFor(*optimized_module, "",
"gemm_fusion_0.rr.optimized.txt")),
&dump));
EXPECT_TRUE(*RunFileCheck(dump, R"(
CHECK: HloModule rr
CHECK-NOT: triton
CHECK: cublas
)"));
}
TEST_F(GemmFusionAutotunerTest, AutotuneCuDnnFusion) {
const std::string kHlo = R"(
fusion1 {
p0 = f32[3,28,32] parameter(0)
p1 = f32[3,28,32] parameter(1)
ROOT d = f32[3,32,32] dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[3,28,32] parameter(0)
p1 = f32[3,28,32] parameter(1)
ROOT _ = f32[3,32,32] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})";
CheckTritonAutotuning(kHlo, R"(
)");
}
class GemmFusionAutotunerLevelTest : public StatelessAutotunerTest,
public ::testing::WithParamInterface<int> {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
StatelessAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_autotune_level(GetParam());
debug_options.set_xla_gpu_cublas_fallback(false);
return debug_options;
}
};
TEST_P(GemmFusionAutotunerLevelTest, AllAutotuningLevelsWorkCorrectly) {
const std::string kHloText = R"(
HloModule m
ENTRY e {
p0 = pred[64,10] parameter(0)
p0c = f32[64,10] convert(p0)
p1 = f32[10,128] parameter(1)
ROOT r = f32[64,128] dot(p0c, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
MatchOptimizedHlo(kHloText, R"(
; CHECK: kind=kCustom
; CHECK-SAME: block_m
)");
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
TEST_P(GemmFusionAutotunerLevelTest, Deviceless) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[16,16] parameter(0)
c = f16[16,16] convert(x)
y = f16[16,16] parameter(1)
ROOT out = f16[16,16] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
HloPassPipeline pipeline("gemm_rewrite_deviceless");
pipeline.AddPass<GemmFusion>(backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability());
tsl::thread::ThreadPool thread_pool(tsl::Env::Default(), "",
tsl::port::MaxParallelism());
DebugOptions opts;
MultiProcessKeyValueStore key_value_store;
pipeline.AddPass<GemmFusionAutotuner>(
AutotuneConfig{DevicelessConfig{backend()
.default_stream_executor()
->GetDeviceDescription()
.model_str(),
backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability()},
opts},
GetToolkitVersion(), &thread_pool, key_value_store);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(hlo));
if (GetDebugOptionsForTest().xla_gpu_autotune_level() == 0) {
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pipeline, module.get()));
EXPECT_TRUE(changed);
TF_ASSERT_OK_AND_ASSIGN(
bool filecheck_matches,
RunFileCheck(
module->ToString(HloPrintOptions{}.set_print_operand_shape(false)),
R"(
)"));
EXPECT_TRUE(filecheck_matches);
} else {
EXPECT_THAT(HloTestBase::RunHloPass(&pipeline, module.get()),
tsl::testing::StatusIs(
tsl::error::INTERNAL,
::testing::HasSubstr(
"Expect autotune result cache hit for deviceless")));
}
}
INSTANTIATE_TEST_SUITE_P(GemmFusionAutotunerLevelSweep,
GemmFusionAutotunerLevelTest, ::testing::Range(0, 5));
class GemmFusionAutotunerExhaustiveTest : public GemmFusionAutotunerTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
GemmFusionAutotunerTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_exhaustive_tiling_search(true);
return debug_options;
}
};
TEST_F(GemmFusionAutotunerExhaustiveTest, DISABLED_CompileOnly) {
const std::string hlo = R"(
HloModule module
ENTRY e {
x = s8[16,16] parameter(0)
c = f16[16,16] convert(x)
y = f16[16,16] parameter(1)
ROOT out = f16[16,16] dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
CheckTritonAutotuning(hlo, R"(
)");
}
TEST_F(GemmFusionAutotunerExhaustiveTest, SkipsCrashingTileKConfig) {
std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(R"(
HloModule module
ENTRY e {
x = s8[33,33]{1,0} parameter(0)
c = f16[33,33]{1,0} convert(x)
y = f16[33,33]{1,0} parameter(1)
ROOT out = f16[33,33]{1,0} dot(c, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)")
.value();
const se::CudaComputeCapability compute_capability{
se::CudaComputeCapability::AMPERE, 0};
TF_ASSERT_OK_AND_ASSIGN(
const std::vector<TritonGemmConfig> configs,
GetPossibleMatmulAutotuneConfigs(
*Cast<HloDotInstruction>(
module->entry_computation()->root_instruction()),
compute_capability, GetToolkitVersion(), GetDebugOptionsForTest()));
EXPECT_TRUE(std::all_of(
configs.begin(), configs.end(),
[](const TritonGemmConfig& config) { return con | 2,116 |
#ifndef XLA_SERVICE_GPU_REDUCTION_SPLITTER_H_
#define XLA_SERVICE_GPU_REDUCTION_SPLITTER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class ReductionSplitter : public HloModulePass {
public:
explicit ReductionSplitter(bool ignore_small_dims)
: ignore_small_dims_(ignore_small_dims) {}
absl::string_view name() const override { return "reduction-splitter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
bool ignore_small_dims_;
};
}
}
#endif
#include "xla/service/gpu/reduction_splitter.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <memory>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class ReductionSplitterVisitor : public DfsHloRewriteVisitor {
public:
explicit ReductionSplitterVisitor(bool ignore_small_dims)
: ignore_small_dims_(ignore_small_dims) {}
absl::Status HandleReduce(HloInstruction *reduce) override {
VLOG(4) << "Input: " << reduce->ToString();
if (IsReductionFromOrToContiguousDimensions(*reduce)) {
VLOG(4) << "Reduction with contiguous dimensions. Return.";
return absl::OkStatus();
}
if (reduce->dimensions().size() < 2) {
return absl::OkStatus();
}
if (!reduce->shape().IsArray()) {
return absl::OkStatus();
}
HloInstruction *operand = reduce->mutable_operand(0);
const Shape &shape = operand->shape();
CHECK(shape == LayoutUtil::GetWithDefaultLayout(shape))
<< "Default layout should be enforced on reduction operand";
for (int64_t i = 0; i < reduce->dimensions().size(); ++i) {
for (int64_t j = i + 1; j < reduce->dimensions().size(); ++j) {
CHECK(abs(reduce->dimensions(i) - reduce->dimensions(j)) > 1)
<< "Reduction dimensions must not be consecutive";
}
}
int64_t max_shape_dim = 0;
int64_t max_reduce_dim = 0;
const auto &input_shape = reduce->operand(0)->shape();
for (int64_t i = 0; i < reduce->dimensions().size(); ++i) {
if (input_shape.dimensions(reduce->dimensions(i)) > max_shape_dim) {
max_reduce_dim = reduce->dimensions(i);
max_shape_dim = input_shape.dimensions(max_reduce_dim);
}
}
if (ignore_small_dims_ && max_shape_dim <= 8) {
return absl::OkStatus();
}
VLOG(3) << "Splitting reduction " << reduce->name() << " at dimension "
<< max_reduce_dim;
std::vector<int64_t> pre_reduce_dims;
pre_reduce_dims.push_back(max_reduce_dim);
std::vector<int64_t> pre_reduce_shape_dims(input_shape.dimensions().begin(),
input_shape.dimensions().end());
pre_reduce_shape_dims.erase(pre_reduce_shape_dims.begin() + max_reduce_dim);
Shape pre_reduce_shape = ShapeUtil::MakeShape(
reduce->shape().element_type(), pre_reduce_shape_dims);
std::unique_ptr<HloInstruction> pre_reduce = HloInstruction::CreateReduce(
pre_reduce_shape, reduce->mutable_operand(0),
reduce->mutable_operand(1), pre_reduce_dims, reduce->to_apply());
pre_reduce->set_metadata(reduce->metadata());
std::vector<int64_t> final_reduce_dims(reduce->dimensions().begin(),
reduce->dimensions().end());
final_reduce_dims.erase(
std::remove(final_reduce_dims.begin(), final_reduce_dims.end(),
max_reduce_dim),
final_reduce_dims.end());
for (int64_t i = 0; i < final_reduce_dims.size(); ++i) {
if (final_reduce_dims[i] > max_reduce_dim) {
final_reduce_dims[i]--;
}
}
std::unique_ptr<HloInstruction> final_reduce = HloInstruction::CreateReduce(
reduce->shape(),
reduce->parent()->AddInstruction(std::move(pre_reduce)),
reduce->mutable_operand(1), final_reduce_dims, reduce->to_apply());
return ReplaceWithNewInstruction(reduce, std::move(final_reduce));
}
private:
bool ignore_small_dims_;
};
absl::StatusOr<bool> ReductionSplitter::Run(
HloModule *module,
const absl::flat_hash_set<absl::string_view> &execution_threads) {
TF_ASSIGN_OR_RETURN(bool changed,
ReductionSplitterVisitor(ignore_small_dims_)
.RunOnModule(module, execution_threads));
return changed;
}
}
} | #include "xla/service/gpu/reduction_splitter.h"
#include <cstdint>
#include <vector>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class ReductionSplitterTest : public HloTestBase {};
TEST_F(ReductionSplitterTest, SplitReductionAtDimensionTwo) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f16[6,16,512,64]{3,2,1,0} parameter(0)
transpose.1781 = f16[6,512,16,64]{3,1,2,0} transpose(param_0), dimensions={0,2,1,3}
convert.6986 = f32[6,512,16,64]{3,1,2,0} convert(transpose.1781)
bitcast.2136 = f32[6,16,512,64]{3,2,1,0} bitcast(convert.6986)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,64]{1,0} reduce(bitcast.2136, constant_11111), dimensions={0,2}, to_apply=add_computation
}
)")
.value();
ASSERT_TRUE(
ReductionSplitter(true).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root_reduction =
module->entry_computation()->root_instruction();
ASSERT_THAT(root_reduction,
GmockMatch(m::Reduce(m::Reduce(), m::Constant())));
auto* pre_reduction = root_reduction->operand(0);
EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({2}));
EXPECT_THAT(pre_reduction->shape(), ShapeUtil::MakeShape(F32, {6, 16, 64}));
EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({0}));
EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64}));
}
TEST_F(ReductionSplitterTest, SplitReductionAtDimensionZero) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[1024,16,512,64,128]{4,3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,64]{1,0} reduce(param_0, constant_11111), dimensions={2,0,4}, to_apply=add_computation
}
)")
.value();
ASSERT_TRUE(
ReductionSplitter(false).Run(module.get()).value());
SCOPED_TRACE(module->ToString());
const HloInstruction* root_reduction =
module->entry_computation()->root_instruction();
ASSERT_THAT(root_reduction,
GmockMatch(m::Reduce(m::Reduce(), m::Constant())));
auto* pre_reduction = root_reduction->operand(0);
EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({0}));
EXPECT_THAT(pre_reduction->shape(),
ShapeUtil::MakeShape(F32, {16, 512, 64, 128}));
EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({1, 3}));
EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64}));
}
TEST_F(ReductionSplitterTest, DontSplitReductionWithSmallDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[16,8,1024,8]{3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[16,1024]{1,0} reduce(param_0, constant_11111), dimensions={3,1}, to_apply=add_computation
}
)")
.value();
EXPECT_FALSE(
ReductionSplitter(true).Run(module.get()).value());
EXPECT_TRUE(
ReductionSplitter(false).Run(module.get()).value());
}
TEST_F(ReductionSplitterTest, DontSplitReductionsWithContiguousDimensions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
add_computation {
x = f32[] parameter(0)
y = f32[] parameter(1)
ROOT add = f32[] add(x, y)
}
ENTRY entry_computation {
param_0 = f32[128,128,64,128]{3,2,1,0} parameter(0)
constant_11111 = f32[] constant(0)
ROOT reduce.982 = f32[128,64]{1,0} reduce(param_0, constant_11111), dimensions={3,0}, to_apply=add_computation
}
)")
.value();
EXPECT_FALSE(
ReductionSplitter(false).Run(module.get()).value());
}
}
}
} | 2,117 |
#ifndef XLA_SERVICE_GPU_GPU_SORT_REWRITER_H_
#define XLA_SERVICE_GPU_GPU_SORT_REWRITER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
namespace gpu {
class GpuSortRewriter : public HloModulePass {
public:
absl::string_view name() const override { return "gpu-sort-rewriter"; }
static int SortSizeThreshold() { return sort_size_threshold_; }
static void SetSortSizeThresholdForTestingOnly(int threshold) {
sort_size_threshold_ = threshold;
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
absl::StatusOr<bool> RunOnInstruction(HloSortInstruction* sort_op);
absl::StatusOr<bool> RunOnComputation(HloComputation* computation);
static inline int sort_size_threshold_ = 33000;
};
}
}
#endif
#include "xla/service/gpu/gpu_sort_rewriter.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/runtime/cub_sort_thunk.h"
#include "xla/service/stable_sort_expander.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
struct SortComputationAnalysis {
int key_operand;
bool descending;
};
std::pair<int64_t, int64_t> ParametersFromCmpOperands(
const HloCompareInstruction* cmp_op) {
if (cmp_op == nullptr) {
return std::pair<int64_t, int64_t>(-1, -1);
}
const HloParameterInstruction* param0 =
DynCast<HloParameterInstruction>(cmp_op->operand(0));
const HloParameterInstruction* param1 =
DynCast<HloParameterInstruction>(cmp_op->operand(1));
return (param0 && param1) ? std::make_pair(param0->parameter_number(),
param1->parameter_number())
: std::pair<int64_t, int64_t>(-1, -1);
}
std::optional<SortComputationAnalysis> AnalyzeCompareOp(
const HloInstruction* maybe_compare_op) {
const HloCompareInstruction* compare =
DynCast<HloCompareInstruction>(maybe_compare_op);
if (compare == nullptr || compare->direction() == ComparisonDirection::kEq ||
compare->direction() == ComparisonDirection::kNe) {
return std::nullopt;
}
auto [index0, index1] = ParametersFromCmpOperands(compare);
if (index0 == -1 || index1 == -1) {
return std::nullopt;
}
int first_index = std::min(index0, index1);
if (first_index % 2 != 0 || std::max(index0, index1) != first_index + 1) {
return std::nullopt;
}
bool descending = compare->direction() == ComparisonDirection::kGt ||
compare->direction() == ComparisonDirection::kGe;
bool reverse = first_index != index0;
return SortComputationAnalysis{first_index / 2, descending != reverse};
}
std::optional<SortComputationAnalysis> AnalyzeComplexSortComputation(
const HloSortInstruction& sort_op) {
auto computation = sort_op.called_computations().front();
if (computation->num_parameters() != 4) {
return std::nullopt;
}
int64_t iota_operand_index =
StableSortExpander::IotaOperandIndexForStableSort(sort_op);
if (iota_operand_index < 0) {
return std::nullopt;
}
auto root = computation->root_instruction();
if (root->opcode() != HloOpcode::kSelect) {
return std::nullopt;
}
auto iota_cmp = DynCast<HloCompareInstruction>(root->operand(1));
auto [iotap0, iotap1] = ParametersFromCmpOperands(iota_cmp);
if (iota_cmp == nullptr ||
iota_cmp->direction() != ComparisonDirection::kLt ||
iotap0 != iota_operand_index * 2 ||
iotap1 != iota_operand_index * 2 + 1) {
return std::nullopt;
}
auto eq_cmp = DynCast<HloCompareInstruction>(root->operand(0));
if (eq_cmp == nullptr || eq_cmp->direction() != ComparisonDirection::kEq) {
return std::nullopt;
}
auto [p0, p1] = ParametersFromCmpOperands(eq_cmp);
if (p0 < 0 || p1 < 0) {
auto cmp = DynCast<HloCompareInstruction>(eq_cmp->operand(0));
auto cmp_reverse = DynCast<HloCompareInstruction>(eq_cmp->operand(1));
auto [a, b] = ParametersFromCmpOperands(cmp);
auto [p, q] = ParametersFromCmpOperands(cmp_reverse);
if (cmp == nullptr || cmp_reverse == nullptr || a < 0 || b < 0 || a != q ||
b != p || cmp->direction() != cmp_reverse->direction() ||
cmp->direction() == Comparison::Direction::kEq ||
cmp->direction() == Comparison::Direction::kNe) {
return std::nullopt;
}
}
return AnalyzeCompareOp(root->operand(2));
}
std::optional<SortComputationAnalysis> AnalyzeSortOp(
const HloSortInstruction& sort_op) {
auto computation = sort_op.called_computations().front();
auto result = AnalyzeCompareOp(computation->root_instruction());
if (!result.has_value()) {
result = AnalyzeComplexSortComputation(sort_op);
}
return result;
}
absl::StatusOr<std::unique_ptr<CubSortRunnerInterface>> CreateRunner(
HloSortInstruction* sort_op, const SortComputationAnalysis& sort_config) {
int value_index = 1 - sort_config.key_operand;
return CubSortRunnerInterface::Create(
sort_op->operand(sort_config.key_operand)->shape().element_type(),
sort_op->operand_count() == 2
? std::optional(sort_op->operand(value_index)->shape().element_type())
: std::nullopt);
}
bool IsCubCompatibleSort(HloSortInstruction* sort_op) {
VLOG(1) << "Sort instruction: " << sort_op->name();
if (sort_op->operand_count() != 1 && sort_op->operand_count() != 2) {
VLOG(2) << "Unsupported operand count: " << sort_op->operand_count();
return false;
}
const Shape& operand_shape = sort_op->operand(0)->shape();
if (sort_op->sort_dimension() != operand_shape.rank() - 1) {
VLOG(2) << "Sort dimension should be the minor one";
return false;
}
if (Product(operand_shape.dimensions()) <
GpuSortRewriter::SortSizeThreshold()) {
VLOG(2) << "Tensor shape size is too small to see an improvement";
return false;
}
auto sort_config = AnalyzeSortOp(*sort_op);
if (!sort_config.has_value()) {
VLOG(2) << "Only simple compare computations are supported";
return false;
}
if (!CreateRunner(sort_op, *sort_config).ok()) {
VLOG(2) << "Unsupported operand types (no compiled CUB kernels)";
return false;
}
VLOG(2) << "Sort operation is compatible";
return true;
}
HloInstruction* UnpackResultPair(HloSortInstruction* sort_op,
HloInstruction* custom_call, bool swap) {
HloComputation* parent = sort_op->parent();
HloInstruction* gte0 =
parent->AddInstruction(HloInstruction::CreateGetTupleElement(
sort_op->operand(0)->shape(), custom_call, swap ? 1 : 0));
HloInstruction* gte1 =
parent->AddInstruction(HloInstruction::CreateGetTupleElement(
sort_op->operand(1)->shape(), custom_call, swap ? 0 : 1));
return parent->AddInstruction(HloInstruction::CreateTuple({gte0, gte1}));
}
}
absl::StatusOr<bool> GpuSortRewriter::RunOnInstruction(
HloSortInstruction* sort_op) {
SortComputationAnalysis sort_config = AnalyzeSortOp(*sort_op).value();
const Shape& operand_shape = sort_op->operand(0)->shape();
int64_t batch_size = Product(operand_shape.dimensions()) /
operand_shape.dimensions(sort_op->sort_dimension());
TF_ASSIGN_OR_RETURN(auto runner, CreateRunner(sort_op, sort_config));
TF_ASSIGN_OR_RETURN(
int64_t scratch_size,
runner->GetScratchSize(Product(operand_shape.dimensions()), batch_size));
if (batch_size > 1) {
scratch_size += sizeof(int) - scratch_size % sizeof(int);
scratch_size += (batch_size + 1) * sizeof(int);
}
HloInstruction* keys = sort_op->mutable_operand(0);
HloInstruction* values = nullptr;
if (sort_op->operand_count() == 2) {
values = sort_op->mutable_operand(1);
if (sort_config.key_operand == 1) {
std::swap(keys, values);
}
}
std::vector<Shape> shapes{keys->shape()};
std::vector<HloInstruction*> operands{keys};
if (values != nullptr) {
shapes.push_back(values->shape());
operands.push_back(values);
}
shapes.push_back(ShapeUtil::MakeShape(U8, {scratch_size}));
Shape call_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(shapes));
HloInstruction* custom_call =
sort_op->parent()->AddInstruction(HloInstruction::CreateCustomCall(
call_shape, absl::MakeSpan(operands), kCubDeviceRadixSortTarget));
xla::SortOptions backend_config;
backend_config.set_descending(sort_config.descending);
TF_RETURN_IF_ERROR(custom_call->set_backend_config(backend_config));
HloInstruction* replacement;
if (sort_op->operand_count() == 1) {
replacement =
sort_op->parent()->AddInstruction(HloInstruction::CreateGetTupleElement(
sort_op->shape(), custom_call, 0));
} else {
replacement = UnpackResultPair(sort_op, custom_call,
sort_config.key_operand == 1);
}
TF_RETURN_IF_ERROR(
sort_op->parent()->ReplaceInstruction(sort_op, replacement));
return true;
}
absl::StatusOr<bool> GpuSortRewriter::RunOnComputation(
HloComputation* computation) {
std::vector<HloSortInstruction*> sort_ops;
for (auto* inst : computation->instructions()) {
HloSortInstruction* sort = DynCast<HloSortInstruction>(inst);
if (sort != nullptr && IsCubCompatibleSort(sort)) {
sort_ops.push_back(sort);
}
}
bool changed = false;
for (auto* sort : sort_ops) {
TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(sort));
changed |= result;
}
return changed;
}
absl::StatusOr<bool> GpuSortRewriter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
XLA_VLOG_LINES(2, "GpuSortRewriter::Run(), before:\n" + module->ToString());
bool changed = false;
for (HloComputation* computation :
module->MakeNonfusionComputations(execution_threads)) {
TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation));
changed |= result;
}
XLA_VLOG_LINES(2, "GpuSortRewriter::Run(), after:\n" + module->ToString());
return changed;
}
}
} | #include "xla/service/gpu/gpu_sort_rewriter.h"
#include <utility>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
namespace m = ::xla::match;
class GpuSortRewriterTest : public HloTestBase {
public:
void SetUp() override {
HloTestBase::SetUp();
GpuSortRewriter::SetSortSizeThresholdForTestingOnly(1000);
}
bool RunModuleAndPass(HloModule* module) {
auto cloned = module->Clone();
bool changed = GpuSortRewriter().Run(module).value();
if (changed) {
EXPECT_TRUE(RunAndCompare(std::move(cloned), ErrorSpec{0, 0}));
}
return changed;
}
void ExpectDirection(const HloInstruction* instruction, bool descending) {
auto config = instruction->backend_config<xla::SortOptions>();
EXPECT_EQ(config->descending(), descending);
}
};
TEST_F(GpuSortRewriterTest, SortKeysLessThan) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[1000] parameter(0)
ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
false);
}
TEST_F(GpuSortRewriterTest, SortKeysGreaterThan) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %gt = pred[] compare(%lhs, %rhs), direction=GT
}
ENTRY %main {
%input = f32[1000] parameter(0)
ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
true);
}
TEST_F(GpuSortRewriterTest, SortKeysGreaterThanSwapped) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(1)
%rhs = f32[] parameter(0)
ROOT %gt = pred[] compare(%lhs, %rhs), direction=GT
}
ENTRY %main {
%input = f32[1000] parameter(0)
ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
false);
}
TEST_F(GpuSortRewriterTest, SortPairs) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs_key = u32[] parameter(0)
%rhs_key = u32[] parameter(1)
%lhs_value = f32[] parameter(2)
%rhs_value = f32[] parameter(3)
ROOT %lt = pred[] compare(%lhs_key, %rhs_key), direction=LT
}
ENTRY %main {
%input_keys = u32[1000] parameter(0)
%input_values = f32[1000] parameter(1)
ROOT %sort = (u32[1000], f32[1000]) sort(%input_keys, %input_values),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0),
m::GetTupleElement(m::CustomCall(), 1))));
}
TEST_F(GpuSortRewriterTest, SortPairsSwapped) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs_value = f32[] parameter(0)
%rhs_value = f32[] parameter(1)
%lhs_key = u32[] parameter(2)
%rhs_key = u32[] parameter(3)
ROOT %lt = pred[] compare(%lhs_key, %rhs_key), direction=LT
}
ENTRY %main {
%input_values = f32[1000] parameter(0)
%input_keys = u32[1000] parameter(1)
ROOT %sort = (f32[1000], u32[1000]) sort(%input_values, %input_keys),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 1),
m::GetTupleElement(m::CustomCall(), 0))));
}
TEST_F(GpuSortRewriterTest, NoRewriteManyTensors) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
%unused1 = f64[] parameter(2)
%unused2 = f64[] parameter(3)
%unused3 = u64[] parameter(4)
%unused4 = u64[] parameter(5)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input1 = f32[1000] parameter(0)
%input2 = f64[1000] parameter(1)
%input3 = u64[1000] parameter(2)
ROOT %sort = (f32[1000], f64[1000], u64[1000]) sort(%input1, %input2, %input3),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(GpuSortRewriterTest, NoRewriteNonMinorSortDimension) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[1000,4] parameter(0)
ROOT %sort = f32[1000,4] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(GpuSortRewriterTest, NoRewriteUnsupportedType) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = pred[] parameter(0)
%rhs = pred[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = pred[1000] parameter(0)
ROOT %sort = pred[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(GpuSortRewriterTest, NoRewriteComplexComparer) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%lhs_scaled = f32[] multiply(%lhs, f32[] constant(2))
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs_scaled, %rhs), direction=LT
}
ENTRY %main {
%input = f32[1000] parameter(0)
ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(GpuSortRewriterTest, NoRewriteMixedKeysValues) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs_key = u32[] parameter(0)
%rhs_key = u32[] parameter(1)
%lhs_value = u32[] parameter(2)
%rhs_value = u32[] parameter(3)
ROOT %mixed = pred[] compare(%rhs_key, %lhs_value), direction=LT
}
ENTRY %main {
%input_keys = u32[1000] parameter(0)
%input_values = u32[1000] parameter(1)
ROOT %sort = (u32[1000], u32[1000]) sort(%input_keys, %input_values),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(GpuSortRewriterTest, NoRewriteSmallSize) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[100] parameter(0)
ROOT %sort = f32[100] sort(%input), dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_FALSE(RunModuleAndPass(module.get()));
}
TEST_F(GpuSortRewriterTest, SortWithBatchDim) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[10,100] parameter(0)
ROOT %sort = f32[10,100] sort(%input), dimensions={1}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
false);
}
TEST_F(GpuSortRewriterTest, SortWithMultipleBatchDims) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = f32[] parameter(0)
%rhs = f32[] parameter(1)
ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT
}
ENTRY %main {
%input = f32[10,10,10] parameter(0)
ROOT %sort = f32[10,10,10] sort(%input), dimensions={2}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(
module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(
m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0)));
ExpectDirection(module->entry_computation()->root_instruction()->operand(0),
false);
}
TEST_F(GpuSortRewriterTest, SortPairsIotaComparerSimple) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = u16[] parameter(0)
%rhs = u16[] parameter(1)
%lhs_index = s32[] parameter(2)
%rhs_index = s32[] parameter(3)
cmp_indices = pred[] compare(%lhs_index, %rhs_index), direction=LT
cmp_lr = pred[] compare(%lhs, %rhs), direction=GT
cmp_eq = pred[] compare(%lhs, %rhs), direction=EQ
ROOT %lt = pred[] select(cmp_eq, cmp_indices, cmp_lr)
}
ENTRY %main {
%inputs = u16[1000] parameter(0)
%iota = s32[1000] iota(), iota_dimension=0
ROOT %sort = (u16[1000], s32[1000]) sort(%inputs, %iota),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0),
m::GetTupleElement(m::CustomCall(), 1))));
}
TEST_F(GpuSortRewriterTest, SortPairsIotaComparerLikeStableSortExpander) {
constexpr char kHlo[] = R"(
HloModule TestModule
%compare {
%lhs = u16[] parameter(0)
%rhs = u16[] parameter(1)
%lhs_index = s32[] parameter(2)
%rhs_index = s32[] parameter(3)
cmp_indices = pred[] compare(%lhs_index, %rhs_index), direction=LT
cmp_lr = pred[] compare(%lhs, %rhs), direction=GT
cmp_rl = pred[] compare(%rhs, %lhs), direction=GT
cmp_eq = pred[] compare(cmp_lr, cmp_rl), direction=EQ
ROOT %lt = pred[] select(cmp_eq, cmp_indices, cmp_lr)
}
ENTRY %main {
%inputs = u16[1000] parameter(0)
%iota = s32[1000] iota(), iota_dimension=0
ROOT %sort = (u16[1000], s32[1000]) sort(%inputs, %iota),
dimensions={0}, to_apply=%compare
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
EXPECT_TRUE(RunModuleAndPass(module.get()));
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0),
m::GetTupleElement(m::CustomCall(), 1))));
}
}
}
} | 2,118 |
#ifndef XLA_SERVICE_GPU_KERNELS_TOPK_KERNEL_H_
#define XLA_SERVICE_GPU_KERNELS_TOPK_KERNEL_H_
#include <stddef.h>
#include "absl/status/status.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu {
absl::Status RunTopk(se::Stream* stream, PrimitiveType dtype,
se::DeviceMemoryBase data, size_t num_elements,
se::DeviceMemoryBase top_elements,
se::DeviceMemoryBase top_indices, size_t k,
size_t batch_size);
}
#endif
#include "xla/service/gpu/kernels/topk_kernel.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/kernels/topk_kernel_common.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/typed_kernel_factory.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
size_t NumThreads(size_t n, size_t k, size_t batch_size) {
size_t simultaneous_threads_per_block = 512 * (16 / k);
size_t threads_per_block =
std::min(simultaneous_threads_per_block, kTopKMaxThreadsPerBlock);
size_t min_slice = absl::bit_floor(n / absl::bit_ceil(k));
return std::min(threads_per_block, min_slice);
}
template <typename T>
absl::StatusOr<void*> GetKernel(int n, int k) {
if (k <= 1) return GetTopKKernelForK<T, 1>(n);
if (k <= 2) return GetTopKKernelForK<T, 2>(n);
if (k <= 4) return GetTopKKernelForK<T, 4>(n);
if (k <= 8) return GetTopKKernelForK<T, 8>(n);
if (k <= 16) return GetTopKKernelForK<T, 16>(n);
return absl::UnimplementedError(absl::StrCat("Unsupported K: ", k));
}
template <typename T>
absl::Status TypedTopK(se::Stream* stream, se::DeviceMemoryBase data,
size_t num_elements, se::DeviceMemoryBase top_elements,
se::DeviceMemoryBase top_indices, size_t k,
size_t batch_size) {
constexpr size_t max_kv_size = sizeof(uint64_t);
int shmem_size = absl::bit_ceil(k) * max_kv_size * GetTopKWaveFrontSize<T>();
int num_threads = NumThreads(num_elements, k, batch_size);
if (num_threads == 0) {
return absl::FailedPreconditionError(
"Invalid kernel parameters. This is likely a bug in the "
"TopkSpecializer.");
}
se::StreamExecutor* executor = stream->parent();
se::DeviceMemory<T> data_typed(data);
se::DeviceMemory<T> top_elements_typed(top_elements);
se::DeviceMemory<uint32_t> top_indices_typed(top_indices);
TF_ASSIGN_OR_RETURN(void* kernel_symbol, GetKernel<T>(num_elements, k));
TF_ASSIGN_OR_RETURN(
auto kernel,
(se::TypedKernelFactory<se::DeviceMemory<T>, size_t, se::DeviceMemory<T>,
se::DeviceMemory<uint32_t>,
size_t>::Create(executor, "topk",
kernel_symbol)));
TF_RETURN_IF_ERROR(stream->ThenLaunch(
se::ThreadDim(num_threads, 1, 1), se::BlockDim(batch_size, 1, 1),
shmem_size, kernel, data_typed, num_elements, top_elements_typed,
top_indices_typed, k));
return absl::OkStatus();
}
}
absl::Status RunTopk(se::Stream* stream, PrimitiveType dtype,
se::DeviceMemoryBase data, size_t num_elements,
se::DeviceMemoryBase top_elements,
se::DeviceMemoryBase top_indices, size_t k,
size_t batch_size) {
VLOG(2) << "TopK: " << primitive_util::LowercasePrimitiveTypeName(dtype)
<< ", n: " << num_elements << ", k: " << k << ", bs: " << batch_size;
switch (dtype) {
case PrimitiveType::F32:
return TypedTopK<float>(stream, data, num_elements, top_elements,
top_indices, k, batch_size);
case PrimitiveType::BF16:
return TypedTopK<bfloat16>(stream, data, num_elements, top_elements,
top_indices, k, batch_size);
default:
return absl::UnimplementedError("GpuTopK not implemented for this dtype");
}
}
} | #include "xla/service/gpu/kernels/topk_kernel.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <functional>
#include <tuple>
#include <vector>
#include "absl/log/check.h"
#include "absl/random/random.h"
#include "absl/strings/substitute.h"
#include "absl/time/time.h"
#include "xla/stream_executor/device_memory_handle.h"
#include "xla/stream_executor/gpu/gpu_init.h"
#include "xla/stream_executor/gpu/gpu_timer.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla::gpu {
namespace {
using se::gpu::GpuStreamHandle;
using ::testing::Combine;
using ::testing::Values;
template <typename T>
std::vector<T> RandomVecRange(int num_elements, T start, T end) {
std::vector<T> local;
local.reserve(num_elements);
thread_local absl::BitGen gen;
for (int i = 0; i < num_elements; ++i) {
local.push_back(absl::Uniform<T>(gen, start, end));
}
return local;
}
template <typename T>
std::vector<T> RandomVec(int num_elements) {
return RandomVecRange(num_elements, static_cast<T>(0),
static_cast<T>(num_elements));
}
template <typename T>
std::vector<T> RandomVecNegative(int num_elements) {
return RandomVecRange(num_elements, -static_cast<T>(num_elements),
static_cast<T>(0));
}
PrimitiveType Get(float) { return PrimitiveType::F32; }
PrimitiveType Get(bfloat16) { return PrimitiveType::BF16; }
se::StreamExecutor* GetGpuExecutor() {
auto* platform =
se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value();
return platform->ExecutorForDevice(0).value();
}
using TopkTest = ::testing::TestWithParam<std::tuple<int, int, int, int>>;
TEST_P(TopkTest, TopKFloat) {
using T = float;
auto* executor = GetGpuExecutor();
auto stream = executor->CreateStream().value();
const auto [n_kb, k, batch_size, offset] = GetParam();
const size_t n = n_kb * 1024 + offset;
stream_executor::DeviceMemoryHandle input_buffer(
executor, executor->AllocateArray<T>(n * batch_size));
stream_executor::DeviceMemoryHandle output_values(
executor, executor->AllocateArray<T>(k * batch_size));
stream_executor::DeviceMemoryHandle output_indices(
executor, executor->AllocateArray<uint32_t>(k * batch_size));
ASSERT_TRUE(!(input_buffer.memory().is_null() ||
output_values.memory().is_null() ||
output_indices.memory().is_null()));
auto source = RandomVec<T>(n * batch_size);
CHECK_OK(stream->Memcpy(input_buffer.memory_ptr(), source.data(),
n * batch_size * sizeof(T)));
ASSERT_TRUE(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n,
output_values.memory(), output_indices.memory(), k,
batch_size)
.ok());
std::vector<T> got(k);
ASSERT_TRUE(stream->BlockHostUntilDone().ok());
for (int i = 0; i < batch_size; i++) {
CHECK_OK(stream->Memcpy(
got.data(),
se::DeviceMemory<T>(output_values.memory()).GetSlice(k * i, k),
k * sizeof(T)));
std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1));
std::sort(slice.begin(), slice.end(), std::greater<T>());
slice.resize(k);
EXPECT_THAT(got, ::testing::ElementsAreArray(slice))
<< " k=" << k << ", batch_size=" << batch_size << " i=" << i;
}
}
TEST_P(TopkTest, TopKPackedNegative) {
using T = float;
auto* executor = GetGpuExecutor();
auto stream = executor->CreateStream().value();
const auto [n_kb, k, batch_size, offset] = GetParam();
const size_t n = n_kb * 1024 + offset;
stream_executor::DeviceMemoryHandle input_buffer(
executor, executor->AllocateArray<T>(n * batch_size));
stream_executor::DeviceMemoryHandle output_values(
executor, executor->AllocateArray<T>(k * batch_size));
stream_executor::DeviceMemoryHandle output_indices(
executor, executor->AllocateArray<uint32_t>(k * batch_size));
ASSERT_TRUE(!(input_buffer.memory().is_null() ||
output_values.memory().is_null() ||
output_indices.memory().is_null()));
auto source = RandomVecNegative<T>(n * batch_size);
CHECK_OK(stream->Memcpy(input_buffer.memory_ptr(), source.data(),
n * batch_size * sizeof(T)));
ASSERT_TRUE(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n,
output_values.memory(), output_indices.memory(), k,
batch_size)
.ok());
std::vector<T> got(k);
ASSERT_TRUE(stream->BlockHostUntilDone().ok());
for (int i = 0; i < batch_size; i++) {
CHECK_OK(stream->Memcpy(
got.data(),
se::DeviceMemory<T>(output_values.memory()).GetSlice(k * i, k),
k * sizeof(T)));
std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1));
std::sort(slice.begin(), slice.end(), std::greater<T>());
slice.resize(k);
EXPECT_THAT(got, ::testing::ElementsAreArray(slice))
<< " k=" << k << ", batch_size=" << batch_size << " i=" << i;
}
}
INSTANTIATE_TEST_SUITE_P(TopkTests, TopkTest,
Combine(
Values(1, 8, 12, 64, 128),
Values(1, 2, 8, 16, 7, 12),
Values(1, 16, 64, 128),
Values(0, 7, 4)),
[](const auto& info) {
return absl::Substitute(
"n$0KiB_k$1_batch_size$2_offset$3",
std::get<0>(info.param), std::get<1>(info.param),
std::get<2>(info.param),
std::get<3>(info.param));
});
template <size_t K>
void BM_SmallTopk(benchmark::State& state) {
using T = float;
size_t k = K;
size_t batch_size = state.range(0);
size_t n = state.range(1) * 1024;
state.SetLabel(
absl::Substitute("n=$0Ki k=$1 batch_size=$2", n / 1024, k, batch_size));
auto* executor = GetGpuExecutor();
auto stream = executor->CreateStream().value();
stream_executor::DeviceMemoryHandle input_buffer(
executor, executor->AllocateArray<T>(n * batch_size));
stream_executor::DeviceMemoryHandle output_values(
executor, executor->AllocateArray<T>(k * batch_size));
stream_executor::DeviceMemoryHandle output_indices(
executor, executor->AllocateArray<uint32_t>(k * batch_size));
if (input_buffer.memory().is_null() || output_values.memory().is_null() ||
output_indices.memory().is_null()) {
state.SkipWithError("Unable to allocate GPU memory: aborting benchmark");
return;
}
auto source = RandomVec<T>(n);
for (size_t i = 0; i < batch_size; i++) {
auto slice = se::DeviceMemory<T>(input_buffer.memory()).GetSlice(i * n, n);
CHECK_OK(stream->Memcpy(&slice, source.data(), n * sizeof(T)));
}
for (auto _ : state) {
CHECK_OK(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n,
output_values.memory(), output_indices.memory(), k,
batch_size));
auto timer = se::gpu::GpuTimer::Create(stream.get(),
true );
CHECK_OK(timer.status());
CHECK_OK(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n,
output_values.memory(), output_indices.memory(), k,
batch_size));
auto timer_duration = timer.value().GetElapsedDuration();
CHECK_OK(timer_duration.status());
state.SetIterationTime(absl::ToDoubleSeconds(timer_duration.value()));
}
size_t items_processed = batch_size * n * state.iterations();
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(T));
}
BENCHMARK(BM_SmallTopk<1>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
BENCHMARK(BM_SmallTopk<2>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
BENCHMARK(BM_SmallTopk<4>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
BENCHMARK(BM_SmallTopk<8>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
BENCHMARK(BM_SmallTopk<16>)->RangePair(1, 1024, 16, 1024)->UseManualTime();
}
} | 2,119 |
#ifndef XLA_SERVICE_GPU_KERNELS_CUTLASS_GEMM_FUSION_H_
#define XLA_SERVICE_GPU_KERNELS_CUTLASS_GEMM_FUSION_H_
#include <optional>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/stream_executor/device_description.h"
namespace xla::gpu {
class CutlassGemmPattern : public CustomKernelFusionPattern {
public:
std::optional<Match> TryMatch(const se::DeviceDescription& device,
HloInstruction* instr) const override;
};
class CutlassGemmWithDynamicUpdateSlicePattern
: public CustomKernelFusionPattern {
public:
std::optional<Match> TryMatch(const se::DeviceDescription& device,
HloInstruction* instr) const override;
};
class CutlassGemmWithUpcastPattern : public CustomKernelFusionPattern {
public:
std::optional<Match> TryMatch(const se::DeviceDescription& device,
HloInstruction* instr) const override;
};
}
#endif
#include "xla/service/gpu/kernels/cutlass_gemm_fusion.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/service/gpu/kernels/cutlass_gemm.h"
#include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
namespace m = match;
struct RootWithWorkspace {
HloInstruction* root;
HloInstruction* workspace;
};
static RootWithWorkspace MatchRootWithWorkspace(HloInstruction* root) {
RootWithWorkspace result;
if (Match(root,
m::Tuple(m::Op(&result.root),
m::CustomCall(&result.workspace,
{CustomKernelFusionPattern::kWorkspace})))) {
return result;
}
return {root, nullptr};
}
struct GemmWithUpcast {
explicit GemmWithUpcast(HloDotInstruction* dot) : dot(dot) {}
HloInstruction* dot;
HloInstruction* lhs_upcast = nullptr;
HloInstruction* rhs_upcast = nullptr;
};
struct GemmWithDynamicSlice {
explicit GemmWithDynamicSlice(HloDynamicUpdateSliceInstruction* update_slice)
: update_slice(update_slice) {}
std::vector<HloInstruction*> Instrs() {
if (bitcast == nullptr) {
return {dot, update_slice};
}
return {dot, bitcast, update_slice};
}
HloInstruction* dot = nullptr;
HloInstruction* bitcast = nullptr;
HloInstruction* update_slice = nullptr;
};
}
static absl::Status MatchRowMajorGemm(HloDotInstruction* dot) {
if (dot->operand(0)->shape().dimensions_size() != 2 ||
dot->operand(1)->shape().dimensions_size() != 2) {
return absl::InternalError("operands must have rank 2");
}
auto& dot_dims = dot->dot_dimension_numbers();
if (dot_dims.lhs_contracting_dimensions().size() != 1 ||
dot_dims.lhs_contracting_dimensions()[0] != 1) {
return absl::InternalError("lhs contracting dimensions must be 1");
}
if (dot_dims.rhs_contracting_dimensions().size() != 1 ||
dot_dims.rhs_contracting_dimensions()[0] != 0) {
return absl::InternalError("rhs contracting dimensions must be 0");
}
return absl::OkStatus();
}
static absl::Status MatchSimpleGemm(
HloDotInstruction* dot, absl::Span<const PrimitiveType> support_dtypes) {
TF_RETURN_IF_ERROR(MatchRowMajorGemm(dot));
for (PrimitiveType dtype : support_dtypes) {
if (dot->operand(0)->shape().element_type() == dtype &&
dot->operand(1)->shape().element_type() == dtype &&
dot->shape().element_type() == dtype) {
return absl::OkStatus();
}
}
return absl::InternalError("unsupported operands type");
}
static absl::StatusOr<GemmWithUpcast> MatchGemmWithUpcast(
HloDotInstruction* dot) {
TF_RETURN_IF_ERROR(MatchRowMajorGemm(dot));
GemmWithUpcast match(dot);
if (Match(const_cast<HloInstruction*>(dot->operand(0)),
m::Convert(&match.lhs_upcast, m::Op()))) {
return match;
}
if (Match(const_cast<HloInstruction*>(dot->operand(1)),
m::Convert(&match.rhs_upcast, m::Op()))) {
return match;
}
return absl::InternalError("unsupported gemm with upcasing");
}
template <typename Pattern>
auto OptionalBitcast(HloInstruction** optional_bitcast, Pattern pattern) {
return m::AnyOf<HloInstruction>(m::Bitcast(optional_bitcast, pattern),
std::move(pattern));
}
static absl::StatusOr<GemmWithDynamicSlice> MatchGemmWithDynamicUpdateSlice(
HloDynamicUpdateSliceInstruction* update_slice) {
GemmWithDynamicSlice match(update_slice);
if (!Match(const_cast<HloInstruction*>(update_slice->update()),
OptionalBitcast(&match.bitcast,
m::Dot(&match.dot, m::Op(), m::Op())))) {
return absl::InternalError("failed to match update slice instr");
}
TF_RETURN_IF_ERROR(MatchRowMajorGemm(Cast<HloDotInstruction>(match.dot)));
return match;
}
static bool AreInstructionsOnTheSameStream(
absl::Span<const HloInstruction* const> instructions) {
absl::flat_hash_set<int64_t> stream_set;
for (const HloInstruction* inst : instructions) {
auto gpu_config = inst->backend_config<GpuBackendConfig>();
if (!gpu_config.ok()) {
continue;
}
stream_set.insert(gpu_config->operation_queue_id());
if (stream_set.size() > 1) {
return false;
}
}
return true;
};
std::optional<CustomKernelFusionPattern::Match> CutlassGemmPattern::TryMatch(
const se::DeviceDescription& device, HloInstruction* instr) const {
auto* dot = DynCast<HloDotInstruction>(instr);
if (!dot) return std::nullopt;
auto matched = MatchSimpleGemm(dot, {PrimitiveType::F32});
if (!matched.ok()) return std::nullopt;
CustomFusionConfig config;
config.set_name("cutlass_gemm");
return Match{config, {instr}};
}
std::optional<CustomKernelFusionPattern::Match>
CutlassGemmWithDynamicUpdateSlicePattern::TryMatch(
const se::DeviceDescription& device, HloInstruction* instr) const {
auto* update_slice = DynCast<HloDynamicUpdateSliceInstruction>(instr);
if (!update_slice) return std::nullopt;
auto matched = MatchGemmWithDynamicUpdateSlice(update_slice);
if (!matched.ok() || !AreInstructionsOnTheSameStream(matched->Instrs()))
return std::nullopt;
CustomFusionConfig config;
config.set_name("cutlass_gemm_with_dynamic_update_slice");
Match match(config, matched->Instrs());
match.AddReplacement(matched->dot, [=](HloFusionInstruction* fusion) {
HloComputation* parent = fusion->parent();
auto* dus = Cast<HloDynamicUpdateSliceInstruction>(matched->update_slice);
bool has_bitcast = matched->bitcast != nullptr;
const Shape dus_shape =
has_bitcast ? matched->bitcast->shape() : matched->dot->shape();
auto* slice = parent->AddInstruction(HloInstruction::CreateDynamicSlice(
dus_shape, fusion, dus->index_operands(), dus_shape.dimensions()));
return parent->AddInstruction(
HloInstruction::CreateBitcast(matched->dot->shape(), slice));
});
return match;
}
std::optional<CustomKernelFusionPattern::Match>
CutlassGemmWithUpcastPattern::TryMatch(const se::DeviceDescription& device,
HloInstruction* instr) const {
auto* dot = DynCast<HloDotInstruction>(instr);
if (!dot) return std::nullopt;
auto matched = MatchGemmWithUpcast(dot);
if (!matched.ok()) return std::nullopt;
DCHECK(matched->lhs_upcast == nullptr || matched->rhs_upcast == nullptr);
CustomFusionConfig config;
config.set_name("cutlass_gemm_with_upcast");
return matched->lhs_upcast ? Match{config, {matched->lhs_upcast, instr}}
: Match{config, {matched->rhs_upcast, instr}};
}
class CutlassGemmFusion : public CustomKernelFusion {
public:
absl::StatusOr<std::vector<CustomKernel>> LoadKernels(
const se::DeviceDescription& device,
const HloComputation* computation) const final {
auto* dot = DynCast<HloDotInstruction>(computation->root_instruction());
if (dot == nullptr) {
return absl::InternalError(
"cutlass_gemm requires ROOT operation to be a dot");
}
TF_RETURN_IF_ERROR(MatchSimpleGemm(dot, {PrimitiveType::F32}));
auto dtype = dot->shape().element_type();
auto* lhs = Cast<HloParameterInstruction>(dot->operand(0));
auto* rhs = Cast<HloParameterInstruction>(dot->operand(1));
kernel::gemm_universal::ArgsIndices indices = {
lhs->parameter_number(), rhs->parameter_number(),
computation->num_parameters()};
auto& lhs_shape = lhs->shape();
auto& rhs_shape = rhs->shape();
size_t m = lhs_shape.dimensions(0);
size_t k = lhs_shape.dimensions(1);
size_t n = rhs_shape.dimensions(1);
TF_ASSIGN_OR_RETURN(
auto kernel,
kernel::gemm_universal::GetCutlassGemmKernel(
"cutlass_gemm", dtype, m, n, k, indices, {}, device));
return std::vector<CustomKernel>{std::move(kernel)};
}
};
class CutlassGemmWithUpcastFusion : public CustomKernelFusion {
public:
absl::StatusOr<std::vector<CustomKernel>> LoadKernels(
const se::DeviceDescription& device,
const HloComputation* computation) const final {
auto* dot = DynCast<HloDotInstruction>(computation->root_instruction());
if (dot == nullptr) {
return absl::InternalError(
"cutlass_gemm requires ROOT operation to be a dot");
}
TF_ASSIGN_OR_RETURN(auto matched, MatchGemmWithUpcast(dot));
if (matched.lhs_upcast != nullptr)
return absl::InternalError("only rhs upcasting is implemented");
auto dot_dtype = dot->shape().element_type();
auto upcast_dtype = matched.rhs_upcast->shape().element_type();
if (dot_dtype != PrimitiveType::BF16 || upcast_dtype != PrimitiveType::S8)
return absl::InternalError("unsupported upcasting pattern");
return absl::UnimplementedError("requires CUTLASS 3.3.0");
}
};
class CutlassGemmWithDynamicUpdateSliceFusion : public CustomKernelFusion {
public:
absl::StatusOr<std::vector<CustomKernel>> LoadKernels(
const se::DeviceDescription& device,
const HloComputation* computation) const final {
auto [root, workspace] =
MatchRootWithWorkspace(computation->root_instruction());
auto* dus = DynCast<HloDynamicUpdateSliceInstruction>(root);
if (dus == nullptr) {
return absl::InternalError(
"cutlass_gemm_with_dynamic_update_slice requires ROOT operation to "
"be a dynamic update slice");
}
TF_ASSIGN_OR_RETURN(auto matched, MatchGemmWithDynamicUpdateSlice(dus));
TF_RETURN_IF_ERROR(
MatchSimpleGemm(Cast<HloDotInstruction>(matched.dot),
{PrimitiveType::F32, PrimitiveType::BF16}));
auto dtype = matched.dot->shape().element_type();
auto* lhs = Cast<HloParameterInstruction>(matched.dot->operand(0));
auto* rhs = Cast<HloParameterInstruction>(matched.dot->operand(1));
auto* out = Cast<HloParameterInstruction>(matched.update_slice->operand(0));
kernel::gemm_universal::ArgsIndices args_indices = {
lhs->parameter_number(), rhs->parameter_number(),
out->parameter_number(), workspace != nullptr};
auto* offset =
Cast<HloParameterInstruction>(matched.update_slice->operand(2));
kernel::gemm_universal::DynamicSliceIndices slices;
slices.out = offset->parameter_number();
auto& lhs_shape = lhs->shape();
auto& rhs_shape = rhs->shape();
size_t m = lhs_shape.dimensions(0);
size_t k = lhs_shape.dimensions(1);
size_t n = rhs_shape.dimensions(1);
TF_ASSIGN_OR_RETURN(
auto kernel, kernel::gemm_universal::GetCutlassGemmKernel(
"cutlass_gemm_with_dynamic_update_slice", dtype, m, n,
k, args_indices, slices, device));
return std::vector<CustomKernel>{std::move(kernel)};
}
};
}
XLA_REGISTER_CUSTOM_FUSION_PATTERN(
::xla::gpu::CutlassGemmWithDynamicUpdateSlicePattern);
XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm", ::xla::gpu::CutlassGemmFusion);
XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm_with_upcast",
::xla::gpu::CutlassGemmWithUpcastFusion);
XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm_with_dynamic_update_slice",
::xla::gpu::CutlassGemmWithDynamicUpdateSliceFusion); | #include "xla/service/gpu/kernels/cutlass_gemm_fusion.h"
#include <cstdint>
#include <utility>
#include "xla/array.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/error_spec.h"
#include "xla/literal_util.h"
#include "xla/service/gpu/custom_kernel_fusion_rewriter.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
class CutlassFusionTest : public HloTestBase {};
TEST_F(CutlassFusionTest, RowMajorGemm) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: f32[15,19], p1: f32[19,17]) -> f32[15,17] {
%p0 = f32[15,19]{1,0} parameter(0)
%p1 = f32[19,17]{1,0} parameter(1)
ROOT %r = f32[15,17]{1,0} dot(%p0, %p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm {{.*}} {
; CHECK: [[P0:%[^ ]+]] = f32[15,19]{1,0} parameter(0)
; CHECK: [[P1:%[^ ]+]] = f32[19,17]{1,0} parameter(1)
; CHECK: ROOT [[DOT:%[^ ]+]] = f32[15,17]{1,0} dot([[P0]], [[P1]]),
; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0}
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[15,17]{1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"cutlass_gemm"}
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmWithUpcast) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: bf16[15,19], p1: s8[19,17]) -> bf16[15,17] {
%p0 = bf16[15,19]{1,0} parameter(0)
%p1 = s8[19,17]{1,0} parameter(1)
%c1 = bf16[19,17]{1,0} convert(%p1)
ROOT %r = bf16[15,17]{1,0} dot(%p0, %c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_upcast {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = bf16[15,19]{1,0} parameter
; CHECK-DAG: [[P1:%[^ ]+]] = s8[19,17]{1,0} parameter
; CHECK: [[C1:%[^ ]+]] = bf16[19,17]{1,0} convert([[P1]])
; CHECK: ROOT [[DOT:%[^ ]+]] = bf16[15,17]{1,0} dot([[P0]], [[C1]]),
; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0}
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = bf16[15,17]{1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_upcast,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{"name":"cutlass_gemm_with_upcast"}
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithUpcastPattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSlice) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: f32[2,2,2], p1: f32[2,2], i: s32[]) -> f32[2,2,2] {
%p0 = f32[2,2,2]{2,1,0} parameter(0)
%p1 = f32[2,2]{1,0} parameter(1)
%i = s32[] parameter(2)
%dot = f32[2,2]{1,0} dot(%p1, %p1),
lhs_contracting_dims={1},
rhs_contracting_dims={0}
%bc = f32[1,2,2]{2,1,0} bitcast(%dot)
ROOT %r = f32[2,2,2]{2,1,0} dynamic-update-slice(%p0, %bc, %i, %i, %i)
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter
; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2,2]{2,1,0} parameter
; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter
; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]])
; CHECK-DAG: [[CAST:%[^ ]+]] = f32[1,2,2]{2,1,0} bitcast([[DOT]])
; CHECK: ROOT [[DUS:%[^ ]+]] = f32[2,2,2]{2,1,0} dynamic-update-slice(
; CHECK: [[P1]], [[CAST]], [[P2]], [[P2]], [[P2]]
; CHECK: )
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[2,2,2]{2,1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{
; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice"
; CHECK: }
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceMultipleUses) {
const char* hlo = R"(
HloModule test
ENTRY %main {
%p0 = f32[2,2,2]{2,1,0} parameter(0)
%p1 = f32[2,2]{1,0} parameter(1)
%i = s32[] parameter(2)
%dot = f32[2,2]{1,0} dot(%p1, %p1),
lhs_contracting_dims={1},
rhs_contracting_dims={0}
%add = f32[2,2]{1,0} add(%dot, %dot)
%cast = f32[1,2,2]{2,1,0} bitcast(%dot)
%dus = f32[2,2,2]{2,1,0} dynamic-update-slice(%p0, %cast, %i, %i, %i)
ROOT %r = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(%add, %dus)
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} {
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter
; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2,2]{2,1,0} parameter
; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter
; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]])
; CHECK-DAG: [[CAST:%[^ ]+]] = f32[1,2,2]{2,1,0} bitcast([[DOT]])
; CHECK: ROOT [[DUS:%[^ ]+]] = f32[2,2,2]{2,1,0} dynamic-update-slice(
; CHECK: [[P1]], [[CAST]], [[P2]], [[P2]], [[P2]]
; CHECK: )
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: [[OFFSET:%[^ ]+]] = s32[] parameter(2)
; CHECK: [[FUSION:%[^ ]+]] = f32[2,2,2]{2,1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{
; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice"
; CHECK: }
; CHECK: }
; CHECK: [[SLICE:%[^ ]+]] = f32[1,2,2]{2,1,0} dynamic-slice(
; CHECK: [[FUSION]], [[OFFSET]], [[OFFSET]], [[OFFSET]]),
; CHECK: dynamic_slice_sizes={1,2,2}
; CHECK: [[CAST:%[^. ]+]] = f32[2,2]{1,0} bitcast([[SLICE]])
; CHECK: [[ADD:%[^. ]+]] = f32[2,2]{1,0} add([[CAST]], [[CAST]])
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceWithoutBitcast) {
const char* hlo = R"(
HloModule test
ENTRY %main (p0: f32[4,2], p1: f32[2,2], i: s32[]) -> f32[4,2] {
%p0 = f32[4,2]{1,0} parameter(0)
%p1 = f32[2,2]{1,0} parameter(1)
%i = s32[] parameter(2)
%dot = f32[2,2]{1,0} dot(%p1, %p1),
lhs_contracting_dims={1},
rhs_contracting_dims={0}
ROOT %r = f32[4,2]{1,0} dynamic-update-slice(%p0, %dot, %i, %i)
}
)";
const char* expected = R"(
; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} {
; CHECK-DAG: [[P1:%[^ ]+]] = f32[4,2]{1,0} parameter
; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter
; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]])
; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter
; CHECK: ROOT [[DUS:%[^ ]+]] = f32[4,2]{1,0} dynamic-update-slice([[P1]], [[DOT]], [[P2]], [[P2]])
; CHECK: }
; CHECK: ENTRY %main {{.*}} {
; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[4,2]{1,0} fusion
; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice,
; CHECK: backend_config={
; CHECK: "kind":"__custom_fusion",
; CHECK: "custom_fusion_config":{
; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice"
; CHECK: }
; CHECK: }
; CHECK: }
)";
CustomKernelFusionPatternRegistry patterns;
patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>();
auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo();
CustomKernelFusionRewriter pass(&device, &patterns);
RunAndFilecheckHloRewrite(hlo, std::move(pass), expected);
}
TEST_F(CutlassFusionTest, RowMajorGemmKernel) {
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
arg0 = f32[100,784]{1,0} parameter(0)
arg1 = f32[784,10]{1,0} parameter(1)
gemm = (f32[100,10]{1,0}, s8[0]{0}) custom-call(arg0, arg1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
ROOT get-tuple-element = f32[100,10]{1,0} get-tuple-element((f32[100,10]{1,0}, s8[0]{0}) gemm), index=0
})";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm {
arg0 = f32[100,784]{1,0} parameter(0)
arg1 = f32[784,10]{1,0} parameter(1)
ROOT dot = f32[100,10]{1,0} dot(arg0, arg1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
arg0 = f32[100,784]{1,0} parameter(0)
arg1 = f32[784,10]{1,0} parameter(1)
ROOT _ = f32[100,10]{1,0} fusion(arg0, arg1), kind=kCustom, calls=cutlass_gemm,
backend_config={"fusion_backend_config":{kind: "__custom_fusion", custom_fusion_config: {"name":"cutlass_gemm"}}}
})";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
error_spec, false));
}
TEST_F(CutlassFusionTest, RowMajorGemmWithUpcastKernel) {
GTEST_SKIP() << "Requires CUTLASS 3.3.0+";
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
p0 = bf16[16,32]{1,0} parameter(0)
p1 = s8[32,8]{1,0} parameter(1)
c1 = bf16[32,8]{1,0} convert(p1)
gemm = (bf16[16,8]{1,0}, s8[0]{0}) custom-call(p0, c1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
ROOT get-tuple-element = bf16[16,8]{1,0} get-tuple-element(gemm), index=0
})";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm_with_upcast {
p0 = bf16[16,32]{1,0} parameter(0)
p1 = s8[32,8]{1,0} parameter(1)
c1 = bf16[32,8]{1,0} convert(p1)
ROOT dot = bf16[16,8]{1,0} dot(p0, c1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[16,32]{1,0} parameter(0)
p1 = s8[32,8]{1,0} parameter(1)
ROOT _ = bf16[16,8]{1,0} fusion(p0, p1), kind=kCustom, calls=cutlass_gemm_with_upcast,
backend_config={"fusion_backend_config":{kind: "__custom_fusion", custom_fusion_config: {"name":"cutlass_gemm_with_upcast"}}}
})";
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
error_spec, false));
}
TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceKernel) {
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
p0 = bf16[2,8,8]{2,1,0} parameter(0)
p1 = bf16[8,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
gemm.tuple = (bf16[8,8]{1,0}, s8[0]{0}) custom-call(p1, p1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
gemm = bf16[8,8]{1,0} get-tuple-element(gemm.tuple), index=0
cast = bf16[1,8,8]{2,1,0} bitcast(gemm)
ROOT r = bf16[2,8,8]{2,1,0} dynamic-update-slice(p0, cast, p2, p3, p3)
})";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm {
p0.1 = bf16[8,8]{1,0} parameter(0)
p1.1 = bf16[2,8,8]{2,1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
dot.1 = bf16[8,8]{1,0} dot(p0.1, p0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
bc.1 = bf16[1,8,8]{2,1,0} bitcast(dot.1)
r.1 = bf16[2,8,8]{2,1,0} dynamic-update-slice(p1.1, bc.1, p2, p3, p3)
workspace = u8[1024]{0} custom-call(),
custom_call_target="__custom_kernel_fusion$workspace",
api_version=API_VERSION_TYPED_FFI
ROOT tuple = (bf16[2,8,8]{2,1,0}, u8[1024]{0}) tuple(r.1, workspace)
}
ENTRY e {
p0 = bf16[2,8,8]{2,1,0} parameter(0)
p1 = bf16[8,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
r.0 = (bf16[2,8,8]{2,1,0}, u8[1024]{0}) fusion(p1, p0, p2, p3), kind=kCustom,
calls=%cutlass_gemm,
backend_config={"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm_with_dynamic_update_slice"}}}
ROOT %get-tuple-element = bf16[2,8,8]{2,1,0} get-tuple-element(r.0), index=0
})";
Array3D<bfloat16> p0_arr(2, 8, 8);
Array2D<bfloat16> p1_arr(8, 8);
p1_arr.Each([](int64_t i, int64_t j, bfloat16* out) {
*out = bfloat16{1.0f * i * j};
});
Array<int32_t> p2_arr({}, 1);
Array<int32_t> p3_arr({}, 0);
auto p0 = LiteralUtil::CreateFromArray(p0_arr);
auto p1 = LiteralUtil::CreateFromArray(p1_arr);
auto p2 = LiteralUtil::CreateFromArray(p2_arr);
auto p3 = LiteralUtil::CreateFromArray(p3_arr);
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
{&p0, &p1, &p2, &p3}, error_spec,
false));
}
TEST_F(CutlassFusionTest,
RowMajorGemmWithDynamicUpdateSliceKernelWithoutBitcast) {
ErrorSpec error_spec{1e-3, 1e-3};
const char* hlo_text_cublas = R"(
HloModule cublas
ENTRY e {
p0 = bf16[16,8]{1,0} parameter(0)
p1 = bf16[8,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
gemm.tuple = (bf16[8,8]{1,0}, s8[0]{0}) custom-call(p1, p1),
custom_call_target="__cublas$gemm",
backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}}
gemm = bf16[8,8]{1,0} get-tuple-element(gemm.tuple), index=0
ROOT r = bf16[16,8]{1,0} dynamic-update-slice(p0, gemm, p2, p3)
}
)";
const char* hlo_text_custom_fusion = R"(
HloModule cutlass
cutlass_gemm {
p0.1 = bf16[8,8]{1,0} parameter(0)
p1.1 = bf16[16,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
dot.1 = bf16[8,8]{1,0} dot(p0.1, p0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
r.1 = bf16[16,8]{1,0} dynamic-update-slice(p1.1, dot.1, p2, p3)
workspace = u8[1024]{0} custom-call(),
custom_call_target="__custom_kernel_fusion$workspace",
api_version=API_VERSION_TYPED_FFI
ROOT tuple = (bf16[16,8]{1,0}, u8[1024]{0}) tuple(r.1, workspace)
}
ENTRY e {
p0 = bf16[16,8]{1,0} parameter(0)
p1 = bf16[8,8]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
r.0 = (bf16[16,8]{1,0}, u8[1024]{0}) fusion(p1, p0, p2, p3), kind=kCustom,
calls=%cutlass_gemm,
backend_config={"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm_with_dynamic_update_slice"}}}
ROOT %get-tuple-element = bf16[16,8]{1,0} get-tuple-element(r.0), index=0
})";
Array2D<bfloat16> p0_arr(16, 8);
Array2D<bfloat16> p1_arr(8, 8);
p1_arr.Each([](int64_t i, int64_t j, bfloat16* out) {
*out = bfloat16{1.0f * i * j};
});
Array<int32_t> p2_arr({}, 0);
Array<int32_t> p3_arr({}, 1);
auto p0 = LiteralUtil::CreateFromArray(p0_arr);
auto p1 = LiteralUtil::CreateFromArray(p1_arr);
auto p2 = LiteralUtil::CreateFromArray(p2_arr);
auto p3 = LiteralUtil::CreateFromArray(p3_arr);
EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion,
{&p0, &p1, &p2, &p3}, error_spec,
false));
}
} | 2,120 |
#ifndef XLA_SERVICE_GPU_KERNELS_TOPK_CUSTOM_KERNEL_H_
#define XLA_SERVICE_GPU_KERNELS_TOPK_CUSTOM_KERNEL_H_
#include <cstddef>
#include <string>
#include "absl/status/statusor.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu::kernel::topk {
absl::StatusOr<CustomKernel> GetTopKKernel(std::string name,
PrimitiveType dtype,
size_t num_elements, size_t k,
size_t batch_size);
}
#endif
#include "xla/service/gpu/kernels/topk_custom_kernel.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM)
#include "xla/service/gpu/kernels/topk_kernel_common.h"
#endif
namespace xla::gpu::kernel::topk {
#if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM)
namespace {
using KernelArgsPacking = se::MultiKernelLoaderSpec::KernelArgsPacking;
size_t EstimateOptimalNumThreads(size_t n, size_t k, size_t batch_size) {
constexpr size_t kEstimatedThreadsPerBlock = 512;
constexpr size_t kMaxKValue = 16;
size_t simultaneous_threads_per_block =
kEstimatedThreadsPerBlock * (kMaxKValue / k);
size_t threads_per_block =
std::min(simultaneous_threads_per_block, kTopKMaxThreadsPerBlock);
size_t min_slice = absl::bit_floor(n / absl::bit_ceil(k));
return std::min(threads_per_block, min_slice);
}
template <typename T>
absl::StatusOr<void*> GetKernel(int n, int k) {
if (k <= 1) return GetTopKKernelForK<T, 1>(n);
if (k <= 2) return GetTopKKernelForK<T, 2>(n);
if (k <= 4) return GetTopKKernelForK<T, 4>(n);
if (k <= 8) return GetTopKKernelForK<T, 8>(n);
if (k <= 16) return GetTopKKernelForK<T, 16>(n);
return absl::UnimplementedError(absl::StrCat("Unsupported K: ", k));
}
template <typename T>
KernelArgsPacking CreateTopKArgsPacking(size_t num_elements, size_t k) {
using Packed = absl::StatusOr<std::unique_ptr<se::KernelArgsPackedArrayBase>>;
return [=](const se::Kernel& kernel, const se::KernelArgs& args) -> Packed {
auto* mem_args = se::Cast<se::KernelArgsDeviceMemoryArray>(&args);
se::DeviceMemory<T> data(mem_args->device_memory_args()[0]);
se::DeviceMemory<T> top_elements(mem_args->device_memory_args()[1]);
se::DeviceMemory<uint32_t> top_indices(mem_args->device_memory_args()[2]);
return se::PackKernelArgs(args.number_of_shared_bytes(), data, num_elements,
top_elements, top_indices, k);
};
}
template <typename T>
absl::StatusOr<CustomKernel> GetTypedTopK(std::string name, size_t num_elements,
size_t k, size_t batch_size) {
constexpr size_t kMaxKVSize = sizeof(uint64_t);
int shmem_size = absl::bit_ceil(k) * kMaxKVSize * GetTopKWaveFrontSize<T>();
int num_threads = EstimateOptimalNumThreads(num_elements, k, batch_size);
if (num_threads == 0) {
return absl::FailedPreconditionError(
"Invalid kernel parameters. This is likely a bug in the "
"TopkSpecializer.");
}
auto packing = CreateTopKArgsPacking<T>(num_elements, k);
se::MultiKernelLoaderSpec spec(5, std::move(packing));
TF_ASSIGN_OR_RETURN(void* kernel_symbol, GetKernel<T>(num_elements, k));
spec.AddInProcessSymbol(kernel_symbol, name);
return CustomKernel(std::move(name), std::move(spec),
se::BlockDim(batch_size, 1, 1),
se::ThreadDim(num_threads, 1, 1), shmem_size);
}
}
absl::StatusOr<CustomKernel> GetTopKKernel(std::string name,
PrimitiveType dtype,
size_t num_elements, size_t k,
size_t batch_size) {
switch (dtype) {
case PrimitiveType::F32:
return GetTypedTopK<float>(std::move(name), num_elements, k, batch_size);
case PrimitiveType::BF16:
return GetTypedTopK<bfloat16>(std::move(name), num_elements, k,
batch_size);
default:
return absl::InvalidArgumentError(
absl::StrCat("Unsupported GpuTopK data type: ", dtype));
}
}
#else
absl::StatusOr<CustomKernel> GetTopKKernel(std::string name,
PrimitiveType dtype,
size_t num_elements, size_t k,
size_t batch_size) {
return absl::InternalError("XLA compiled without CUDA support");
}
#endif
} | #include "xla/service/gpu/kernels/topk_custom_kernel.h"
#include <algorithm>
#include <cstdint>
#include <cstring>
#include <functional>
#include <tuple>
#include <vector>
#include <gtest/gtest.h>
#include "absl/random/random.h"
#include "absl/strings/ascii.h"
#include "absl/strings/substitute.h"
#include "xla/service/platform_util.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_factory.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu::kernel::topk {
using ::testing::Combine;
using ::testing::Values;
template <typename T>
std::vector<T> RandomVecRange(int num_elements, T start, T end) {
std::vector<T> local;
local.reserve(num_elements);
thread_local absl::BitGen gen;
for (int i = 0; i < num_elements; ++i) {
local.push_back(absl::Uniform<T>(gen, start, end));
}
return local;
}
template <typename T>
std::vector<T> RandomVec(int num_elements) {
return RandomVecRange(num_elements, static_cast<T>(0),
static_cast<T>(num_elements));
}
template <typename T>
std::vector<T> RandomVecNegative(int num_elements) {
return RandomVecRange(num_elements, -static_cast<T>(num_elements),
static_cast<T>(0));
}
PrimitiveType Get(float) { return PrimitiveType::F32; }
PrimitiveType Get(bfloat16) { return PrimitiveType::BF16; }
using TopKKernelTest = ::testing::TestWithParam<std::tuple<int, int, int, int>>;
TEST_P(TopKKernelTest, TopKFloat) {
using T = float;
auto name =
absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value());
se::Platform* platform = se::PlatformManager::PlatformWithName(name).value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
auto stream = executor->CreateStream().value();
const auto [n_kb, k, batch_size, offset] = GetParam();
const size_t n = n_kb * 1024 + offset;
se::DeviceMemory<T> input_buffer =
executor->AllocateArray<T>(n * batch_size, 0);
se::DeviceMemory<T> output_values =
executor->AllocateArray<T>(k * batch_size, 0);
se::DeviceMemory<uint32_t> output_indices =
executor->AllocateArray<uint32_t>(k * batch_size, 0);
auto source = RandomVec<T>(n * batch_size);
TF_ASSERT_OK(
stream->Memcpy(&input_buffer, source.data(), n * batch_size * sizeof(T)));
TF_ASSERT_OK(stream->MemZero(&output_values, k * batch_size * sizeof(T)));
TF_ASSERT_OK(
stream->MemZero(&output_indices, k * batch_size * sizeof(uint32_t)));
auto custom_kernel =
GetTopKKernel("topk", PrimitiveType::F32, n, k, batch_size);
TF_ASSERT_OK_AND_ASSIGN(
auto kernel,
se::KernelFactory::Create(executor, custom_kernel->kernel_spec()));
se::KernelArgsDeviceMemoryArray arr(
std::vector<se::DeviceMemoryBase>(
{input_buffer, output_values, output_indices}),
custom_kernel->shared_memory_bytes());
TF_ASSERT_OK(stream->Launch(custom_kernel->thread_dims(),
custom_kernel->block_dims(), *kernel, arr));
std::vector<T> got(k);
ASSERT_TRUE(stream->BlockHostUntilDone().ok());
for (int i = 0; i < batch_size; i++) {
TF_ASSERT_OK(stream->Memcpy(got.data(), output_values.GetSlice(k * i, k),
k * sizeof(T)));
std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1));
std::sort(slice.begin(), slice.end(), std::greater<T>());
slice.resize(k);
EXPECT_THAT(got, ::testing::ElementsAreArray(slice))
<< " k=" << k << ", batch_size=" << batch_size << " i=" << i;
}
}
TEST_P(TopKKernelTest, TopKPackedNegative) {
using T = float;
auto name =
absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value());
se::Platform* platform = se::PlatformManager::PlatformWithName(name).value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
auto stream = executor->CreateStream().value();
const auto [n_kb, k, batch_size, offset] = GetParam();
const size_t n = n_kb * 1024 + offset;
se::DeviceMemory<T> input_buffer =
executor->AllocateArray<T>(n * batch_size, 0);
se::DeviceMemory<T> output_values =
executor->AllocateArray<T>(k * batch_size, 0);
se::DeviceMemory<uint32_t> output_indices =
executor->AllocateArray<uint32_t>(k * batch_size, 0);
auto source = RandomVecNegative<T>(n * batch_size);
TF_ASSERT_OK(
stream->Memcpy(&input_buffer, source.data(), n * batch_size * sizeof(T)));
TF_ASSERT_OK(stream->MemZero(&output_values, k * batch_size * sizeof(T)));
TF_ASSERT_OK(
stream->MemZero(&output_indices, k * batch_size * sizeof(uint32_t)));
auto custom_kernel =
GetTopKKernel("topk", PrimitiveType::F32, n, k, batch_size);
TF_ASSERT_OK_AND_ASSIGN(
auto kernel,
se::KernelFactory::Create(executor, custom_kernel->kernel_spec()));
se::KernelArgsDeviceMemoryArray arr(
std::vector<se::DeviceMemoryBase>(
{input_buffer, output_values, output_indices}),
custom_kernel->shared_memory_bytes());
TF_ASSERT_OK(stream->Launch(custom_kernel->thread_dims(),
custom_kernel->block_dims(), *kernel, arr));
std::vector<T> got(k);
ASSERT_TRUE(stream->BlockHostUntilDone().ok());
for (int i = 0; i < batch_size; i++) {
TF_ASSERT_OK(stream->Memcpy(got.data(), output_values.GetSlice(k * i, k),
k * sizeof(T)));
std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1));
std::sort(slice.begin(), slice.end(), std::greater<T>());
slice.resize(k);
EXPECT_THAT(got, ::testing::ElementsAreArray(slice))
<< " k=" << k << ", batch_size=" << batch_size << " i=" << i;
}
}
INSTANTIATE_TEST_SUITE_P(TopKTests, TopKKernelTest,
Combine(
Values(1, 8, 12, 64, 128),
Values(1, 2, 8, 16, 7, 12),
Values(1, 16, 64, 128),
Values(0, 7, 4)),
[](const auto& info) {
return absl::Substitute(
"n$0KiB_k$1_batch_size$2_offset$3",
std::get<0>(info.param), std::get<1>(info.param),
std::get<2>(info.param),
std::get<3>(info.param));
});
} | 2,121 |
#ifndef XLA_SERVICE_GPU_KERNELS_CUTLASS_GEMM_CUSTOM_KERNEL_H_
#define XLA_SERVICE_GPU_KERNELS_CUTLASS_GEMM_CUSTOM_KERNEL_H_
#include <cstdint>
#include <string>
#include "absl/status/statusor.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/cutlass_gemm.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu::kernel::gemm_universal {
absl::StatusOr<CustomKernel> GetCutlassGemmKernel(
std::string name, PrimitiveType dtype, int32_t m, int32_t n, int32_t k,
const ArgsIndices& indices, const DynamicSliceIndices& slices,
const se::DeviceDescription& device);
absl::StatusOr<CustomKernel> LoadCutlassGemmKernel(
std::string name, const std::string& library_path, PrimitiveType dtype,
int32_t m, int32_t n, int32_t k, const ArgsIndices& indices,
const DynamicSliceIndices& slices, const se::DeviceDescription& device);
}
#endif
#include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "third_party/gpus/cuda/include/cuda.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/kernels/cutlass_gemm.h"
#include "xla/stream_executor/device_description.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/xla_data.pb.h"
namespace xla::gpu::kernel::gemm_universal {
static constexpr auto Default = Arch::kDefault;
static constexpr auto Sm80 = Arch::kSm80;
static constexpr auto Sm90 = Arch::kSm90;
extern template struct Adaptor<F32xF32ToF32<Default>>;
extern template struct DeviceKernel<F32xF32ToF32<Default>>;
extern template struct Adaptor<Bf16xBf16ToBf16<Default>>;
extern template struct DeviceKernel<Bf16xBf16ToBf16<Default>>;
extern template struct Adaptor<Bf16xBf16ToBf16<Sm80>>;
extern template struct DeviceKernel<Bf16xBf16ToBf16<Sm80>>;
extern template struct Adaptor<Bf16xBf16ToBf16<Sm90>>;
extern template struct DeviceKernel<Bf16xBf16ToBf16<Sm90>>;
using KernelArgsPacking = se::MultiKernelLoaderSpec::KernelArgsPacking;
template <typename Dim>
static Dim As(Dim3 dim3) {
return Dim(dim3.x, dim3.y, dim3.z);
}
template <typename Dim>
static std::optional<Dim> As(std::optional<Dim3> dim3) {
if (dim3.has_value()) return Dim(dim3->x, dim3->y, dim3->z);
return std::nullopt;
}
static int32_t* SlicePtr(const se::KernelArgsDeviceMemoryArray* args,
int64_t index) {
const void* opaque = args->device_memory_ptr(index);
return static_cast<int32_t*>(const_cast<void*>(opaque));
}
template <typename Tag>
KernelArgsPacking ArgsPacking(int32_t m, int32_t n, int32_t k,
const ArgsIndices& indices,
const DynamicSliceIndices& slices,
int32_t device_sms, Adaptor<Tag> adaptor) {
using Packed = absl::StatusOr<std::unique_ptr<se::KernelArgsPackedArrayBase>>;
struct Params {
alignas(128) std::byte storage[1024];
};
return [=](const se::Kernel& kernel, const se::KernelArgs& args) -> Packed {
auto* mem_args = se::Cast<se::KernelArgsDeviceMemoryArray>(&args);
Arguments arguments = {m, n, k};
arguments.lhs = const_cast<void*>(mem_args->device_memory_ptr(indices.lhs));
arguments.rhs = const_cast<void*>(mem_args->device_memory_ptr(indices.rhs));
arguments.out = const_cast<void*>(mem_args->device_memory_ptr(indices.out));
if (indices.has_workspace) {
size_t num_mem_args = mem_args->device_memory_args().size();
arguments.workspace =
const_cast<void*>(mem_args->device_memory_ptr(num_mem_args - 1));
} else {
arguments.workspace = nullptr;
}
if (slices.out.has_value()) {
arguments.slices.out = SlicePtr(mem_args, *slices.out);
}
if (!adaptor.CanImplement(arguments)) {
return absl::InternalError(absl::StrCat(
"CUTLASS kernel can not implement gemm for a given problem size",
": m=", m, ", n=", n, ", k=", k));
}
auto threads = As<se::ThreadDim>(adaptor.ThreadDim());
auto shmem_bytes = adaptor.SharedMemoryBytes();
static int32_t sm_occupancy =
kernel.GetMaxOccupiedBlocksPerCore(threads, shmem_bytes).value_or(1);
if (sm_occupancy == 0) {
LOG_FIRST_N(WARNING, 1)
<< "CUTLASS gemm kernel reported 0 occupancy: threads_per_block="
<< (threads.x * threads.y * threads.z)
<< ", dynamic_shared_memory_bytes=" << shmem_bytes;
}
Params params;
adaptor.Initialize(¶ms, arguments, device_sms, sm_occupancy);
return se::PackKernelArgs<Params, DynamicSliceArguments>(
args.number_of_shared_bytes(), params, arguments.slices);
};
}
template <typename Tag>
static absl::StatusOr<CustomKernel> Load(std::string name, int32_t m, int32_t n,
int32_t k, const ArgsIndices& indices,
const DynamicSliceIndices& slices,
const se::DeviceDescription& device,
Adaptor<Tag> adaptor = {},
DeviceKernel<Tag> kernel = {}) {
auto cluster_dim = As<se::ClusterDim>(adaptor.ClusterDim());
auto block_dim = As<se::BlockDim>(adaptor.BlockDim(m, n, k));
auto thread_dim = As<se::ThreadDim>(adaptor.ThreadDim());
auto shared_memory_bytes = adaptor.SharedMemoryBytes();
auto packing =
ArgsPacking<Tag>(m, n, k, indices, slices, device.core_count(), adaptor);
se::MultiKernelLoaderSpec spec(2, std::move(packing));
spec.AddInProcessSymbol(kernel.symbol(), name);
if (cluster_dim.has_value()) {
return CustomKernel(std::move(name), std::move(spec), block_dim, thread_dim,
*cluster_dim, shared_memory_bytes);
} else {
return CustomKernel(std::move(name), std::move(spec), block_dim, thread_dim,
shared_memory_bytes);
}
}
absl::StatusOr<CustomKernel> GetCutlassGemmKernel(
std::string name, PrimitiveType dtype, int32_t m, int32_t n, int32_t k,
const ArgsIndices& indices, const DynamicSliceIndices& slices,
const se::DeviceDescription& device) {
auto& cuda_cc =
std::get<se::CudaComputeCapability>(device.gpu_compute_capability());
switch (dtype) {
case PrimitiveType::F32:
return Load<F32xF32ToF32<Default>>(std::move(name), m, n, k, indices,
slices, device);
case PrimitiveType::BF16:
#if CUDA_VERSION >= 12000
if (cuda_cc.IsAtLeastHopper()) {
return Load<Bf16xBf16ToBf16<Sm90>>(std::move(name), m, n, k, indices,
slices, device);
}
#endif
if (cuda_cc.IsAtLeastAmpere()) {
return Load<Bf16xBf16ToBf16<Sm80>>(std::move(name), m, n, k, indices,
slices, device);
}
return Load<Bf16xBf16ToBf16<Default>>(std::move(name), m, n, k, indices,
slices, device);
default:
return absl::InvalidArgumentError("Unsupported CUTLASS gemm data type");
}
}
absl::StatusOr<CustomKernel> LoadCutlassGemmKernel(
std::string name, const std::string& library_path, PrimitiveType dtype,
int32_t m, int32_t n, int32_t k, const ArgsIndices& indices,
const DynamicSliceIndices& slices, const se::DeviceDescription& device) {
auto adaptor = Adaptor<DlOpenedKernel>::Load(library_path);
if (!adaptor.has_value()) {
return absl::InternalError(
absl::StrCat("Failed to load CUTLASS adaptor from a shared library: ",
library_path));
}
auto kernel = DeviceKernel<DlOpenedKernel>::Load(library_path);
if (!kernel.has_value()) {
return absl::InternalError(absl::StrCat(
"Failed to load CUTLASS kernel from a shared library: ", library_path));
}
return Load<DlOpenedKernel>(std::move(name), m, n, k, indices, slices, device,
*adaptor, *kernel);
}
} | #include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h"
#include <cstdint>
#include <cstring>
#include <string>
#include <vector>
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_factory.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla::gpu::kernel::gemm_universal {
TEST(CutlassGemmKernelTest, SimpleGemm) {
se::Platform* platform =
se::PlatformManager::PlatformWithName("CUDA").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
auto stream = executor->CreateStream().value();
auto custom_kernel = GetCutlassGemmKernel(
"cutlass_gemm", PrimitiveType::F32, 4, 4, 4,
{0, 1, 2}, {}, executor->GetDeviceDescription());
TF_ASSERT_OK_AND_ASSIGN(
auto gemm,
se::KernelFactory::Create(executor, custom_kernel->kernel_spec()));
int64_t length = 4 * 4;
int64_t byte_length = sizeof(float) * length;
se::DeviceMemory<float> a = executor->AllocateArray<float>(length, 0);
se::DeviceMemory<float> b = executor->AllocateArray<float>(length, 0);
se::DeviceMemory<float> c = executor->AllocateArray<float>(length, 0);
float value = 2.0;
uint32_t pattern;
std::memcpy(&pattern, &value, sizeof(pattern));
TF_ASSERT_OK(stream->Memset32(&a, pattern, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, pattern, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
se::KernelArgsDeviceMemoryArray arr(
std::vector<se::DeviceMemoryBase>({a, b, c}),
custom_kernel->shared_memory_bytes());
TF_ASSERT_OK(stream->Launch(custom_kernel->thread_dims(),
custom_kernel->block_dims(), *gemm, arr));
std::vector<float> dst(length, -1.0f);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<float> expected(length, 16.0);
ASSERT_EQ(dst, expected);
}
TEST(CutlassGemmKernelTest, LoadFromSharedLibrary) {
std::string kernel_lib_path =
tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu", "kernels",
"cutlass_gemm_kernel_f32xf32_to_f32.so");
se::Platform* platform =
se::PlatformManager::PlatformWithName("CUDA").value();
se::StreamExecutor* executor = platform->ExecutorForDevice(0).value();
auto stream = executor->CreateStream().value();
auto custom_kernel = LoadCutlassGemmKernel(
"cutlass_gemm", kernel_lib_path, PrimitiveType::F32, 4, 4, 4,
{0, 1, 2}, {}, executor->GetDeviceDescription());
TF_ASSERT_OK_AND_ASSIGN(
auto gemm,
se::KernelFactory::Create(executor, custom_kernel->kernel_spec()));
int64_t length = 4 * 4;
int64_t byte_length = sizeof(float) * length;
se::DeviceMemory<float> a = executor->AllocateArray<float>(length, 0);
se::DeviceMemory<float> b = executor->AllocateArray<float>(length, 0);
se::DeviceMemory<float> c = executor->AllocateArray<float>(length, 0);
float value = 2.0;
uint32_t pattern;
std::memcpy(&pattern, &value, sizeof(pattern));
TF_ASSERT_OK(stream->Memset32(&a, pattern, byte_length));
TF_ASSERT_OK(stream->Memset32(&b, pattern, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
se::KernelArgsDeviceMemoryArray arr(
std::vector<se::DeviceMemoryBase>({a, b, c}),
custom_kernel->shared_memory_bytes());
TF_ASSERT_OK(stream->Launch(custom_kernel->thread_dims(),
custom_kernel->block_dims(), *gemm, arr));
std::vector<float> dst(length, -1.0f);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
std::vector<float> expected(length, 16.0);
ASSERT_EQ(dst, expected);
}
} | 2,122 |
#ifndef XLA_SERVICE_GPU_FUSIONS_REDUCTION_MLIR_H_
#define XLA_SERVICE_GPU_FUSIONS_REDUCTION_MLIR_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include "xla/service/gpu/fusions/reduction_base.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
using HloValueMap =
absl::flat_hash_map<const HloInstruction*, llvm::SmallVector<mlir::Value>>;
class MlirReductionFusion : public MlirFusionEmitterBase {
public:
explicit MlirReductionFusion(const HloFusionAnalysis& analysis);
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override = 0;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override;
LaunchDimensions launch_dimensions() const override;
const ReductionGroups& GetGroups() const { return groups_; }
protected:
struct EmitterState;
friend struct EmitterState;
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const override;
std::vector<mlir_converter::EpilogueSpecification> GetEpilogues(
const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const override;
llvm::SmallVector<mlir::Value> EvaluateEpilogue(
mlir::ImplicitLocOpBuilder& b, const HloValueMap& results,
llvm::SmallVector<mlir::Value> outputs, EmitterState& state, int group_id,
mlir::MLIRContext* ctx, mlir::Value vector_index = nullptr) const;
virtual llvm::SmallVector<mlir::Value> EmitReduction(
int group_id, EmitterState& state) const = 0;
Shape GetReduceOperandShape() const {
return first_reduce_->operand(0)->shape();
}
virtual IndexingMap ComputeThreadIdToReductionInputIndexing(
mlir::MLIRContext* ctx) const = 0;
std::vector<std::vector<const HloInstruction*>> reduction_heroes_;
std::vector<std::vector<const HloInstruction*>> reduction_roots_;
std::vector<std::vector<const HloInstruction*>> side_output_roots_;
const HloFusionAnalysis& analysis_;
absl::InlinedVector<int64_t, 4> input_shape_;
absl::InlinedVector<int64_t, 4> tile_sizes_per_thread_;
absl::InlinedVector<int64_t, 4> tile_sizes_per_block_;
absl::InlinedVector<int64_t, 4> num_threads_;
absl::InlinedVector<int64_t, 4> num_blocks_;
int64_t total_num_blocks_;
int64_t total_num_threads_per_block_;
int64_t vector_size_ = -1;
ReductionDimensions reduction_dimensions_;
ReductionGroups groups_;
const HloInstruction* first_reduce_;
};
class MlirRowReductionFusion : public MlirReductionFusion {
public:
explicit MlirRowReductionFusion(const HloFusionAnalysis& analysis);
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override;
protected:
int GetRowsPerWarp() const;
llvm::SmallVector<mlir::Value> EmitReduction(
int group_id, EmitterState& state) const override;
IndexingMap ComputeThreadIdToReductionInputIndexing(
mlir::MLIRContext* ctx) const override;
};
class MlirColumnReductionFusion : public MlirReductionFusion {
public:
explicit MlirColumnReductionFusion(const HloFusionAnalysis& analysis);
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override;
protected:
llvm::SmallVector<mlir::Value> EmitReduction(
int group_id, EmitterState& state) const override;
IndexingMap ComputeThreadIdToReductionInputIndexing(
mlir::MLIRContext* ctx) const override;
int64_t num_warps_per_column_;
int64_t num_blocks_per_row_;
};
std::unique_ptr<MlirReductionFusion> CreateMlirReductionFusion(
const HloFusionAnalysis& analysis);
}
}
#endif
#include "xla/service/gpu/fusions/reduction_mlir.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/fusions/mlir/type_util.h"
#include "xla/service/gpu/fusions/reduction_base.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace ma = mlir::arith;
using llvm::SmallVector;
using mlir::AffineExpr;
using mlir::AffineMap;
using mlir::ImplicitLocOpBuilder;
using mlir::MLIRContext;
using mlir::Value;
using mlir::ValueRange;
using mlir_converter::PartitionedComputations;
LaunchDimensions MlirReductionFusion::launch_dimensions() const {
size_t blocks_y = groups_.grouped_roots.size();
return {se::BlockDim(total_num_blocks_,
static_cast<int64_t>(blocks_y), 1),
se::ThreadDim(total_num_threads_per_block_,
1, 1)};
}
MlirReductionFusion::MlirReductionFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis) {
auto* hero_reduction = analysis.FindHeroReduction();
CHECK_NE(hero_reduction, nullptr);
Shape input_shape = hero_reduction->operand(0)->shape();
reduction_dimensions_ =
GetReductionKindAndContiguousComponents(*hero_reduction);
VLOG(10) << reduction_dimensions_;
CHECK(ReductionIsRaceFree(hero_reduction->GetModule()->config(),
reduction_dimensions_))
<< "Non-race-free reductions should have been decomposed. Did "
"tree_reduction_rewriter run?";
groups_ = GroupDisjointReductions(analysis, true);
first_reduce_ = hero_reduction;
const auto& groups = GetGroups();
int num_groups = groups.grouped_roots.size();
side_output_roots_.resize(num_groups);
reduction_heroes_.resize(num_groups);
reduction_roots_.resize(num_groups);
absl::flat_hash_set<const HloInstruction*> seen_heroes;
for (auto [root_adaptor, hero_adaptor, is_reduction, group_id] :
llvm::zip(analysis.fusion_roots(), analysis.fusion_heroes(),
groups.is_reduction_root, groups.group_id_per_root)) {
const HloInstruction* root = &root_adaptor.instruction();
const HloInstruction* hero = &hero_adaptor.instruction();
if (is_reduction) {
if (seen_heroes.insert(hero).second) {
reduction_heroes_[group_id].push_back(hero);
}
reduction_roots_[group_id].push_back(root);
} else {
side_output_roots_[group_id].push_back(root);
}
}
}
struct MlirReductionFusion::EmitterState {
EmitterState(const MlirReductionFusion& owner,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion,
const PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_target)
: owner(owner),
entry_function(entry_function),
fusion(fusion),
computations(computations),
call_target(call_target),
builder(entry_function.getLoc(), entry_function),
computation(computations.FindPartitionedComputation(
fusion.fused_instructions_computation())) {
int index = 0;
for (const auto& root : owner.analysis_.fusion_roots()) {
fusion_result_index_starts[&root.instruction()] = index;
index += root.shape().IsTuple() ? root.shape().tuple_shapes_size() : 1;
}
}
HloValueMap EmitPerThreadReducedElements(int group_id,
const HloValueMap& inits);
mlir::func::FuncOp GetReducer(const HloInstruction* hero) const {
return call_target(hero->called_computations()[0]->root_instruction());
}
SmallVector<Value> AllocateSharedTiles(
absl::Span<const HloInstruction* const> heroes,
absl::Span<const int64_t> shape);
SmallVector<Value> FusionParams() {
return ValueRange(entry_function.getArguments().take_front(
fusion.fused_parameters().size()));
}
int OutputIndex(const HloInstruction* root, int result_index) {
return fusion_result_index_starts[root] + result_index;
}
const MlirReductionFusion& owner;
mlir::func::FuncOp entry_function;
const HloFusionInstruction& fusion;
const PartitionedComputations& computations;
const mlir_converter::CallTargetProvider& call_target;
ImplicitLocOpBuilder builder;
const mlir_converter::PartitionedComputation& computation;
absl::flat_hash_map<const HloInstruction*, int> fusion_result_index_starts;
SmallVector<Value> thread_and_block_ids;
};
std::vector<mlir_converter::EpilogueSpecification>
MlirReductionFusion::GetEpilogues(const HloFusionInstruction& fusion,
MLIRContext* mlir_context) const {
std::vector<mlir_converter::EpilogueSpecification> epilogues;
epilogues.reserve(reduction_heroes_.size());
for (const auto& [heroes, roots] :
llvm::zip(reduction_heroes_, reduction_roots_)) {
epilogues.push_back(
mlir_converter::EpilogueSpecification::FromOutputIndexing(
analysis_, heroes, roots, *this, mlir_context));
}
for (const auto& roots : side_output_roots_) {
for (const auto* root : roots) {
epilogues.push_back(
mlir_converter::EpilogueSpecification::FromIdentityIndexing(
root, root, mlir_context));
}
}
return epilogues;
}
absl::Status MlirReductionFusion::EmitEntryFunction(
const PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
EmitterState state{*this, entry_function, fusion, computations, call_targets};
auto& b = state.builder;
b.setInsertionPointToStart(entry_function.addEntryBlock());
state.thread_and_block_ids = EmitThreadAndBlockIds(b);
if (reduction_heroes_.size() == 1) {
b.create<mlir::func::ReturnOp>(EmitReduction(0, state));
return absl::OkStatus();
}
SmallVector<int64_t> cases(reduction_heroes_.size() - 1);
absl::c_iota(cases, 1);
auto switch_op = b.create<mlir::scf::IndexSwitchOp>(
entry_function.getResultTypes(), EmitBlockId(b, 1), cases, cases.size());
b.create<mlir::func::ReturnOp>(switch_op.getResults());
for (auto [id, region] : llvm::enumerate(switch_op->getRegions())) {
b.setInsertionPointToStart(®ion.emplaceBlock());
b.create<mlir::scf::YieldOp>(EmitReduction(id, state));
}
return absl::OkStatus();
}
IndexingMap MlirRowReductionFusion::ComputeThreadIdToReductionInputIndexing(
mlir::MLIRContext* ctx) const {
auto rank = input_shape_.size();
auto thread_offsets =
DelinearizeInBoundsIndex(getAffineDimExpr(0, ctx), num_threads_);
auto block_offsets =
DelinearizeInBoundsIndex(getAffineDimExpr(3, ctx), num_blocks_);
SmallVector<AffineExpr> results;
results.resize(rank);
for (int i = 0; i < rank; ++i) {
results[i] =
block_offsets[i] * tile_sizes_per_block_[i] + thread_offsets[i];
if (tile_sizes_per_thread_[i] > 1) {
results[i] = results[i] + getAffineSymbolExpr(i, ctx) * num_threads_[i];
}
}
IndexingMap map{AffineMap::get(6, rank, results, ctx),
DimVarsFromTensorSizes({total_num_threads_per_block_, 1, 1,
total_num_blocks_, 1, 1}),
RangeVarsFromTensorSizes(tile_sizes_per_thread_),
{}};
for (auto [result, input_dim] : llvm::zip(results, input_shape_)) {
map.AddConstraint(result, {0, input_dim - 1});
}
return map;
}
HloValueMap MlirReductionFusion::EmitterState::EmitPerThreadReducedElements(
int group_id, const HloValueMap& inits) {
auto tile_indexing =
owner.ComputeThreadIdToReductionInputIndexing(builder.getContext());
tile_indexing
.GetMutableDimensionBound(
KernelFusionInterface::kIndexingMapBlockIdxDims[1])
.upper = owner.reduction_heroes_.size();
tile_indexing.Simplify();
bool vectorize = owner.vector_size_ > 1;
SmallVector<Value> iter_arg_inits;
const auto& side_outputs = owner.side_output_roots_[group_id];
const auto& reductions = owner.reduction_heroes_[group_id];
absl::flat_hash_map<const HloInstruction*, int> iter_arg_starts;
for (const auto& [hero, init] : inits) {
iter_arg_starts[hero] = iter_arg_inits.size();
iter_arg_inits.append(init);
}
auto body_builder = [&](ValueRange iter_args, ValueRange dim_values,
ValueRange symbol_values) -> SmallVector<Value> {
auto tile_indices = mlir_converter::ApplyIndexing(tile_indexing, dim_values,
symbol_values, builder);
llvm::SmallVector<Value> results(iter_args.size(), nullptr);
for (auto* reduction : reductions) {
int arity = reduction->operand_count() / 2;
int start = iter_arg_starts[reduction];
SmallVector<Value> reduce_args = iter_args.slice(start, arity);
auto indices = mlir_converter::ApplyIndexing(
GetBitcastMap(owner.input_shape_, reduction->operand(0)->shape(),
builder.getContext()),
tile_indices, {}, builder);
reduce_args.append(ProvideParameterRange(computation, reduction, 0, arity,
indices, call_target,
entry_function, builder));
const auto& reducer = GetReducer(reduction);
absl::c_copy(
builder.create<PureCallOp>(reducer, reduce_args).getResults(),
results.begin() + start);
}
struct SideOutput {
llvm::SmallVector<Value> indices;
Value scalar;
};
llvm::SmallVector<SideOutput> side_output_values;
for (auto* side_output : side_outputs) {
auto indices = mlir_converter::ApplyIndexing(
GetBitcastMap(owner.input_shape_, side_output->shape(),
builder.getContext()),
tile_indices, {}, builder);
auto* root_tuple = fusion.fused_expression_root();
Value value = mlir_converter::ProvideParameter(
computation, root_tuple, root_tuple->operand_index(side_output),
indices, call_target, entry_function, builder)[0];
side_output_values.push_back({std::move(indices), value});
}
for (const auto& [side_output, values] :
llvm::zip(side_outputs, side_output_values)) {
int offset = iter_arg_starts[side_output];
results[offset] = builder.create<mlir::tensor::InsertOp>(
values.scalar, iter_args[offset], values.indices);
}
return results;
};
auto results_vector = owner.EmitThreadLoopNest(
builder, iter_arg_inits, tile_indexing, body_builder, vectorize);
mlir::ValueRange results = results_vector;
HloValueMap results_per_hero;
for (const auto& [hero, init] : inits) {
results_per_hero[hero] = results.slice(iter_arg_starts[hero], init.size());
}
return results_per_hero;
}
SmallVector<Value> MlirReductionFusion::EmitterState::AllocateSharedTiles(
absl::Span<const HloInstruction* const> heroes,
absl::Span<const int64_t> shape) {
SmallVector<Value> tiles;
for (auto* hero : heroes) {
for (int i = 0; i < hero->operand_count() / 2; ++i) {
auto tile_shape = ShapeUtil::MakeShapeWithDescendingLayout(
hero->operand(i)->shape().element_type(), shape);
tiles.push_back(builder.create<AllocateSharedOp>(
mlir_converter::TensorShapeToMlirType(tile_shape, builder)));
}
}
return tiles;
}
std::optional<IndexingMap> MlirReductionFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index, MLIRContext* ctx) const {
const auto& hero = analysis_.fusion_hero(root_index).instruction();
if (groups_.is_reduction_root[root_index] &&
hero_operand_index >= hero.operand_count() / 2) {
return std::nullopt;
}
if (!groups_.is_reduction_root[root_index]) {
return ComposeIndexingMaps(
*ComputeThreadIdToOutputIndexing(root_index, ctx),
*ComputeOutputToInputIndexing(
&analysis_.fusion_root(root_index).instruction(), 0, ctx)
.indexing_maps[hero_operand_index]
.begin());
}
auto map = ComputeThreadIdToReductionInputIndexing(ctx);
AddGroupIdConstraint(map, root_index, groups_);
return map * GetBitcastMap(input_shape_,
hero.operand(hero_operand_index)->shape(), ctx);
}
SmallVector<Value> MlirReductionFusion::EvaluateEpilogue(
ImplicitLocOpBuilder& b, const HloValueMap& results,
llvm::SmallVector<Value> outputs, EmitterState& state, int group_id,
MLIRContext* ctx, Value vector_index) const {
Value zero = b.create<ma::ConstantIndexOp>(0);
const auto& epilogue = state.computations.epilogues()[group_id];
if (epilogue.roots.empty()) return outputs;
llvm::SmallVector<Value> epilogue_input_symbols(
epilogue.root_indexing.front().GetAffineMap().getNumSymbols(), zero);
auto epilogue_input_indices = state.thread_and_block_ids;
epilogue_input_indices.append(epilogue_input_symbols);
if (!epilogue_input_symbols.empty() && vector_index) {
epilogue_input_symbols.back() = epilogue_input_indices.back() =
vector_index;
}
auto values = EmitEpilogue(group_id, state.computations, state.entry_function,
results, epilogue_input_indices, b);
int first_root_index = state.OutputIndex(epilogue.roots.front(), 0);
auto thread_has_output = mlir_converter::CheckConstraints(
*ComputeThreadIdToOutputIndexing(first_root_index, ctx),
state.thread_and_block_ids, epilogue_input_symbols, b);
for (auto [index, root] : llvm::enumerate(epilogue.roots)) {
auto output_indices = mlir_converter::ApplyIndexing(
epilogue.root_indexing[index], state.thread_and_block_ids,
epilogue_input_symbols, b);
for (auto [result_index, result] : llvm::enumerate(values.at(root))) {
auto& output = outputs[state.OutputIndex(root, result_index)];
output = b.create<PredicatedInsertOp>(thread_has_output, result, output,
output_indices);
}
}
return outputs;
}
MlirRowReductionFusion::MlirRowReductionFusion(
const HloFusionAnalysis& analysis)
: MlirReductionFusion(analysis) {
CHECK(reduction_dimensions_.is_row_reduction);
Vector3 shape = reduction_dimensions_.dimensions;
Vector3 reduction_tiling = {
std::min(reduction_dimensions_
.dimensions[ReductionDimensions::kRowMajorReducedDimension],
BatchedReductionRaceFreeBound()),
1, 16};
int64_t num_threads_y = 1;
int64_t rows_per_warp = RowReductionGetRowsPerWarp(
shape[ReductionDimensions::kRowMinorReducedDimension]);
int64_t num_threads_x = [&] {
if (rows_per_warp > 1) {
return shape[ReductionDimensions::kRowMinorReducedDimension];
}
int64_t max_block_size =
MinThreadsXRowReduction(first_reduce_->GetModule()->config());
return std::min(
max_block_size,
RoundUpTo(
CeilOfRatio(shape[ReductionDimensions::kRowMinorReducedDimension],
reduction_tiling
[ReductionDimensions::kRowMinorReducedDimension]),
WarpSize()));
}();
constexpr int64_t kThreadsPerBlockTarget = 256;
if (num_threads_x * 2 <= kThreadsPerBlockTarget) {
int64_t kept_size = reduction_dimensions_
.dimensions[ReductionDimensions::kRowKeptDimension];
if (kept_size * num_threads_x <= kThreadsPerBlockTarget) {
num_threads_y = kept_size;
while ((num_threads_x * num_threads_y) % 32) ++num_threads_y;
} else {
num_threads_y = kThreadsPerBlockTarget / num_threads_x;
}
}
int vector_size =
GetVectorSizeForMlir(analysis, reduction_dimensions_, num_threads_x);
num_threads_ =
absl::InlinedVector<int64_t, 4>{1, num_threads_y, num_threads_x};
input_shape_ = {shape[0], shape[1], shape[2] / vector_size};
tile_sizes_per_thread_ = {
reduction_tiling[0], reduction_tiling[1],
std::max<int64_t>(reduction_tiling[2] / vector_size, 1)};
for (int i = 0; i < num_threads_.size(); ++i) {
tile_sizes_per_thread_[i] =
std::min(tile_sizes_per_thread_[i],
CeilOfRatio(input_shape_[i], num_threads_[i]));
}
if (rows_per_warp > 1) {
tile_sizes_per_thread_[2] = 1;
}
if (vector_size != 1) {
num_threads_.push_back(1);
input_shape_.push_back(vector_size);
tile_sizes_per_thread_.push_back(vector_size);
}
num_threads_.push_back(1);
input_shape_.push_back(1);
tile_sizes_per_thread_.push_back(1);
tile_sizes_per_block_.resize(input_shape_.size());
num_blocks_.resize(input_shape_.size());
for (int64_t i = 0; i < input_shape_.size(); ++i) {
tile_sizes_per_block_[i] = tile_sizes_per_thread_[i] * num_threads_[i];
CHECK_NE(tile_sizes_per_block_[i], 0);
num_blocks_[i] = CeilOfRatio(input_shape_[i], tile_sizes_per_block_[i]);
CHECK_NE(num_blocks_[i], 0);
}
total_num_blocks_ = Product(num_blocks_);
total_num_threads_per_block_ = Product(num_threads_);
vector_size_ = tile_sizes_per_thread_.back();
}
std::optional<IndexingMap>
MlirRowReductionFusion::ComputeThreadIdToOutputIndexing(
int64_t root_index, MLIRContext* ctx) const {
if (!groups_.is_reduction_root[root_index]) {
auto map = ComposeIndexingMaps(
ComputeThreadIdToReductionInputIndexing(ctx),
GetBitcastMap(input_shape_, analysis_.fusion_root(root_index).shape(),
ctx));
AddGroupIdConstraint(map, root_index, groups_);
return map;
}
const auto& hero = analysis_.fusion_hero(root_index).instruction();
auto thread_ids =
DelinearizeInBoundsIndex(mlir::getAffineDimExpr(0, ctx), num_threads_);
auto block_offsets = GetBlockOffsetsForTiling(
num_blocks_, tile_sizes_per_block_, input_shape_.size(), ctx);
auto physical_shape =
ShapeUtil::DeleteDimensions(hero.dimensions(), hero.operand(0)->shape());
std::vector<DimVar> dimension_ranges{
{{0, total_num_threads_per_block_ - 1}},
{},
{},
{{0, total_num_blocks_ - 1}},
{{0, static_cast<int64_t>(groups_.grouped_roots.size() - 1)}},
{},
};
constexpr int kRowKept = ReductionDimensions::kRowKeptDimension;
constexpr int kRowMinorReduced =
ReductionDimensions::kRowMinorReducedDimension;
auto map = [&]() {
IndexingMap linear_index(
mlir::AffineMap::get(
6, 0, block_offsets.getResult(kRowKept) + thread_ids[kRowKept],
ctx),
dimension_ranges, {}, {});
int rows_per_warp = GetRowsPerWarp();
if (rows_per_warp > 1) {
linear_index.AddConstraint(
thread_ids[kRowMinorReduced] % (WarpSize() / rows_per_warp), {0, 0});
} else {
linear_index.AddConstraint(thread_ids[kRowMinorReduced], {0, 0});
}
return ComposeIndexingMaps(
linear_index,
GetBitcastMap({input_shape_[kRowKept]}, physical_shape, ctx));
}();
AddGroupIdConstraint(map, root_index, groups_);
return map;
}
int MlirRowReductionFusion::GetRowsPerWarp() const {
return RowReductionGetRowsPerWarp(
input_shape_[ReductionDimensions::kRowMinorReducedDimension]);
}
llvm::SmallVector<mlir::Value> MlirRowReductionFusion::EmitReduction(
int group_id, EmitterState& state) const {
auto& b = state.builder;
auto* ctx = state.entry_function.getContext();
int num_warps_row =
num_threads_[ReductionDimensions::kRowMinorReducedDimension] / WarpSize();
Value zero = b.create<ma::ConstantIndexOp>(0);
Value one = b.create<ma::ConstantIndexOp>(1);
Value thread_id = state.thread_and_block_ids[0];
auto thread_indexing =
GetBitcastMap({total_num_threads_per_block_},
ShapeUtil::MakeShapeWithDescendingLayout(U8, num_threads_),
b.getContext());
auto thread_ids =
mlir_converter::ApplyIndexing(thread_indexing, {thread_id}, {}, b);
Value lane_id = b.create<mlir::gpu::LaneIdOp>(nullptr);
Value warp_id = b.create<ma::DivUIOp>(
thread_ids[ReductionDimensions::kRowMinorReducedDimension],
b.create<ma::ConstantIndexOp>(WarpSize()));
Value is_first_lane =
b.create<ma::CmpIOp>(ma::CmpIPredicate::eq, lane_id, zero);
int64_t vector_size = tile_sizes_per_thread_.back();
Value vector_size_cst = b.create<ma::ConstantIndexOp>(vector_size);
std::vector<int64_t> shared_tile_size;
if (GetRowsPerWarp() == 1 && num_warps_row > 1) {
CHECK_EQ(vector_size, 1);
shared_tile_size = {num_threads_[ReductionDimensions::kRowKeptDimension],
num_warps_row};
}
HloValueMap inits;
const auto& reductions = reduction_heroes_[group_id];
for (auto* hero : reductions) {
int arity = hero->operand_count() / 2;
inits[hero] =
ProvideParameterRange(state.computation, hero, arity, arity, {},
state.call_target, state.entry_function, b);
}
llvm::SmallVector<Value> outputs =
mlir::ValueRange(state.entry_function.getArguments().drop_front(
state.fusion.fused_parameters().size()));
for (auto* side_output : side_output_roots_[group_id]) {
inits[side_output].push_back(outputs[state.OutputIndex(side_output, 0)]);
}
auto accumulated = state.EmitPerThreadReducedElements(group_id, inits);
for (auto root : side_output_roots_[group_id]) {
outputs[state.OutputIndex(root, 0)] = accumulated[root].front();
}
for (auto* reduction : reductions) {
auto reducer = state.GetReducer(reduction);
int max_dist = WarpSize() / 2 / GetRowsPerWarp();
auto& values = accumulated[reduction];
values = b.create<ShuffleReduceOp>(reducer, values, max_dist).getResults();
}
if (shared | #include "xla/service/gpu/fusions/reduction_mlir.h"
#include <optional>
#include <gtest/gtest.h>
#include "absl/strings/substitute.h"
#include "xla/error_spec.h"
#include "xla/service/gpu/fusions/mlir_emitter_test_base.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::SizeIs;
using MlirRowReductionTest = MlirEmitterTestBase<MlirRowReductionFusion>;
using MlirColumnReductionTest = MlirEmitterTestBase<MlirColumnReductionFusion>;
TEST_F(MlirRowReductionTest, VariadicRowReduce) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
scalar_lhs.0 = f32[] parameter(0)
scalar_rhs.0 = f32[] parameter(1)
scalar_lhs.1 = f32[] parameter(2)
scalar_rhs.1 = f32[] parameter(3)
add.0 = f32[] add(scalar_lhs.0, scalar_lhs.1)
add.1 = f32[] add(scalar_rhs.0, scalar_rhs.1)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
fused_computation {
param_0 = f32[2, 3, 2048] parameter(0)
param_1 = f32[2, 3, 2048] parameter(1)
param_2 = f32[] parameter(2)
ROOT d.1 = (f32[2, 3], f32[2, 3])
reduce(param_0, param_1, param_2, param_2), dimensions={2}, to_apply=Add
}
ENTRY main {
a = f32[2, 3, 2048] parameter(0)
b = f32[2, 3, 2048] parameter(1)
c = f32[] constant(0)
ROOT fusion = (f32[2, 3], f32[2, 3]) fusion(a, b, c),
kind=kInput, calls=fused_computation
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4] -> (
(d3 * 2 + d0 floordiv 128) floordiv 3,
(d3 * 2 + d0 floordiv 128) mod 3,
(d0 mod 128 + s2 * 128) * 2 + s3)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 3)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 8)
s3 in [0, 2)
s4 in [0, 1)
d0 mod 128 + s2 * 128 in [0, 1024)
d3 * 2 + d0 floordiv 128 in [0, 6)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> ((d3 * 2 + d0 floordiv 128) floordiv 3,
(d3 * 2 + d0 floordiv 128) mod 3)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 3)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 128 in [0, 1)
d3 * 2 + d0 floordiv 128 in [0, 6)
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, RowReduceEpilogue) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_computation {
param_0 = f32[8,2048] parameter(0)
param_1 = f32[] parameter(1)
reduce = f32[8] reduce(param_0, param_1), dimensions={1}, to_apply=Add
ROOT log = f32[8] log(reduce)
}
ENTRY main {
a = f32[8,2048] parameter(0)
c = f32[] constant(0)
ROOT fusion = f32[8] fusion(a, c), kind=kInput, calls=fused_computation
})";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, RowReduceMOFEpilogue) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
Mul {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT mul = f32[] multiply(lhs, rhs)
}
fused_computation {
param_0 = f32[8,1024] parameter(0)
param_1 = f32[] parameter(1)
reduce1 = f32[8] reduce(param_0, param_1), dimensions={1}, to_apply=Add
reduce2 = f32[8] reduce(param_0, param_1), dimensions={1}, to_apply=Mul
log = f32[8] log(reduce1)
abs = f32[8] abs(reduce1)
neg = f32[8] negate(reduce2)
ROOT tuple = (f32[8], f32[8], f32[8]) tuple(log, neg, abs)
}
ENTRY main {
a = f32[8,1024] parameter(0)
c = f32[] constant(0)
ROOT fusion = (f32[8], f32[8], f32[8]) fusion(a, c), kind=kInput,
calls=fused_computation
})";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, RowReduceMOFGroups) {
constexpr auto kHloString = R"(
%add_f32 {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
%fused_computation {
%param0 = f32[1024] parameter(0)
%param1 = f32[1024] parameter(1)
%constant0 = f32[] constant(0)
%reduce1 = f32[] reduce(%param0, %constant0), dimensions={0}, to_apply=%add_f32
%reduce2 = f32[] reduce(%param1, %constant0), dimensions={0}, to_apply=%add_f32
ROOT %tuple = (f32[], f32[]) tuple(%reduce1, %reduce2)
}
ENTRY %cluster {
%param0 = f32[1024] parameter(0)
%param1 = f32[1024] parameter(1)
ROOT %fusion = (f32[], f32[])
fusion(%param0, %param1), kind=kInput, calls=%fused_computation
})";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, F64RowReduction) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f64[] parameter(0)
rhs = f64[] parameter(1)
ROOT add = f64[] add(lhs, rhs)
}
fused_computation {
param_0 = f64[100,128] parameter(0)
param_1 = f64[] parameter(1)
ROOT reduce = f64[100] reduce(param_0, param_1), dimensions={1}, to_apply=Add
}
ENTRY main {
a = f64[100,128] parameter(0)
c = f64[] constant(0)
ROOT fusion = f64[100] fusion(a, c), kind=kInput, calls=fused_computation
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> (
d3 * 8 + d0 floordiv 32,
d0 mod 32 + s2 * 32)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 13)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 4)
s3 in [0, 1)
d0 mod 32 + s2 * 32 in [0, 128)
d3 * 8 + d0 floordiv 32 in [0, 100)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (d3 * 8 + d0 floordiv 32)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 13)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 32 in [0, 1)
d3 * 8 + d0 floordiv 32 in [0, 100)
)"));
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, MultiRowReduction) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_computation {
param_0 = f32[1024,4] parameter(0)
param_1 = f32[] parameter(1)
ROOT reduce = f32[1024] reduce(param_0, param_1), dimensions={1}, to_apply=Add
}
ENTRY main {
a = f32[1024,4] parameter(0)
c = f32[] constant(0)
ROOT fusion = f32[1024] fusion(a, c), kind=kInput, calls=fused_computation
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> (
d3 * 64 + d0 floordiv 4, d0 mod 4)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 16)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 1)
s3 in [0, 1)
d0 mod 4 in [0, 4)
d3 * 64 + d0 floordiv 4 in [0, 1024)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (d3 * 64 + d0 floordiv 4)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 16)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 4 in [0, 1)
d3 * 64 + d0 floordiv 4 in [0, 1024)
)"));
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, NonPowerOfTwoRowReduction) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_computation {
param_0 = f32[100,568] parameter(0)
param_1 = f32[] parameter(1)
ROOT reduce = f32[100] reduce(param_0, param_1), dimensions={1}, to_apply=Add
}
ENTRY main {
a = f32[100,568] parameter(0)
c = f32[] constant(0)
ROOT fusion = f32[100] fusion(a, c), kind=kInput, calls=fused_computation
})";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, NonTrivialEpilogue) {
constexpr auto kHloString = R"(
HloModule module
add {
p0 = f64[] parameter(0)
p1 = f64[] parameter(1)
ROOT add = f64[] add(p0, p1)
}
fusion {
%p0 = f64[4] parameter(0)
%p1 = f64[4] parameter(1)
%c0 = f64[] constant(-inf)
%reduce0 = f64[] reduce(p1, c0), dimensions={0}, to_apply=add
%bc0 = f64[4] broadcast(reduce0), dimensions={}
%compare0 = pred[4] compare(p1, bc0), direction=EQ
%c1 = f64[] constant(0)
%bc1 = f64[4] broadcast(c1), dimensions={}
%select.3.1 = f64[4] select(compare0, p0, bc1)
%reduce1 = f64[] reduce(select.3.1, c1), dimensions={0}, to_apply=add
%convert0 = f64[4] convert(compare0)
%reduce2 = f64[] reduce(convert0, c1), dimensions={0}, to_apply=add
ROOT %tuple.1 = (f64[], f64[], f64[]) tuple(%reduce1, reduce0, reduce2)
}
ENTRY main {
%p0 = f64[4] parameter(0)
%p1 = f64[4] parameter(1)
ROOT %fusion = (f64[], f64[], f64[]) fusion(%p0, %p1), kind=kInput,
calls=fusion
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> (
(d0 floordiv 4) * 4 + d0 mod 4)
domain:
d0 in [0, 4)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 1)
s3 in [0, 1)
d0 mod 4 in [0, 4)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> ()
domain:
d0 in [0, 4)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 4 in [0, 1)
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, SideOutput) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_computation {
param_0 = f32[8,2048] parameter(0)
param_1 = f32[] parameter(1)
exp = f32[8,2048] exponential(param_0)
reduce = f32[8] reduce(param_0, param_1), dimensions={1}, to_apply=Add
ROOT t = (f32[8], f32[8,2048]) tuple(reduce, exp)
}
ENTRY main {
a = f32[8,2048] parameter(0)
c = f32[] constant(0)
ROOT fusion = (f32[8], f32[8,2048]) fusion(a, c), kind=kInput,
calls=fused_computation
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4] -> (
d3 * 2 + d0 floordiv 128, (d0 mod 128 + s2 * 128) * 2 + s3)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 4)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 8)
s3 in [0, 2)
s4 in [0, 1)
d0 mod 128 + s2 * 128 in [0, 1024)
d3 * 2 + d0 floordiv 128 in [0, 8)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (d3 * 2 + d0 floordiv 128)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 4)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 128 in [0, 1)
d3 * 2 + d0 floordiv 128 in [0, 8)
)"));
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, UnsignedSideOutput) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = u32[] parameter(0)
rhs = u32[] parameter(1)
ROOT add = u32[] add(lhs, rhs)
}
fused_computation {
param_0 = u32[8,2048] parameter(0)
param_1 = u32[] parameter(1)
add = u32[8,2048] add(param_0, param_0)
reduce = u32[8] reduce(param_0, param_1), dimensions={1}, to_apply=Add
ROOT t = (u32[8], u32[8,2048]) tuple(reduce, add)
}
ENTRY main {
a = u32[8,2048] parameter(0)
c = u32[] constant(0)
ROOT fusion = (u32[8], u32[8,2048]) fusion(a, c), kind=kInput,
calls=fused_computation
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4] -> (
d3 * 2 + d0 floordiv 128, (d0 mod 128 + s2 * 128) * 2 + s3)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 4)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 8)
s3 in [0, 2)
s4 in [0, 1)
d0 mod 128 + s2 * 128 in [0, 1024)
d3 * 2 + d0 floordiv 128 in [0, 8)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (d3 * 2 + d0 floordiv 128)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 4)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 128 in [0, 1)
d3 * 2 + d0 floordiv 128 in [0, 8)
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, BroadcastSideOutput) {
constexpr auto kHloString = R"(
%add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
%fusion {
%p0 = f32[6,6] parameter(0)
%c0 = f32[] constant(0)
%reduce = f32[] reduce(%p0, %c0), dimensions={0,1}, to_apply=%add
%broadcast = f32[6,6] broadcast(%reduce), dimensions={}
ROOT %tuple = (f32[6,6], f32[]) tuple(%broadcast, %reduce)
}
ENTRY main {
%p0 = f32[6,6] parameter(0)
ROOT %fusion = (f32[6,6], f32[]) fusion(%p0), kind=kInput, calls=%fusion
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> ()
domain:
d0 in [0, 32)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 2)
s3 in [0, 1)
(d0 + s2 * 32) mod 6 in [0, 6)
d0 + s2 * 32 in [0, 36)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> (
(d0 + s2 * 32) floordiv 6, (d0 + s2 * 32) mod 6)
domain:
d0 in [0, 32)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 2)
s3 in [0, 1)
d0 + s2 * 32 in [0, 36)
)"));
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, VariadicMOF) {
constexpr auto kHloString = R"(
%reducer1 {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
%reducer2 {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
p2 = f32[] parameter(2)
p3 = f32[] parameter(3)
add0 = f32[] add(p0, p2)
add1 = f32[] add(p1, p3)
ROOT tuple = (f32[], f32[]) tuple(add0, add1)
}
%fusion {
%p0 = f32[6,6] parameter(0)
%c0 = f32[] constant(0)
%neg = f32[6,6] negate(%p0)
%reduce1 = f32[] reduce(%neg, %c0), dimensions={0,1}, to_apply=%reducer1
%reduce2 = (f32[], f32[]) reduce(%p0, %p0, %c0, %c0), dimensions={0,1}, to_apply=%reducer2
ROOT %tuple = (f32[], (f32[], f32[]), f32[6,6]) tuple(%reduce1, %reduce2, %neg)
}
ENTRY main {
%p0 = f32[6,6] parameter(0)
ROOT %fusion = (f32[], (f32[], f32[]), f32[6,6]) fusion(%p0), kind=kInput, calls=%fusion
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> (
(d0 + s2 * 32) floordiv 6, (d0 + s2 * 32) mod 6)
domain:
d0 in [0, 32)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 2)
s3 in [0, 1)
d0 + s2 * 32 in [0, 36)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> ()
domain:
d0 in [0, 1)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1)
d4 in [0, 1)
d5 in [0, 1)
)"));
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirRowReductionTest, ThreadIndexingOutputLayout) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,512] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64]{0,1} reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,512] parameter(0)
ROOT %fusion = f32[100,64]{0,1} fusion(%input), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3, s4] -> (
(d3 * 8 + d0 floordiv 32) floordiv 64,
(d3 * 8 + d0 floordiv 32) mod 64,
(d0 mod 32 + s2 * 32) * 2 + s3)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 800)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 8)
s3 in [0, 2)
s4 in [0, 1)
d0 mod 32 + s2 * 32 in [0, 256)
d3 * 8 + d0 floordiv 32 in [0, 6400)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (
(d3 * 8 + d0 floordiv 32) floordiv 64,
(d3 * 8 + d0 floordiv 32) mod 64
)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 800)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 32 in [0, 1)
d3 * 8 + d0 floordiv 32 in [0, 6400)
)"));
}
TEST_F(MlirRowReductionTest, TwoGroups) {
auto module = ParseAndReturnVerifiedModule(R"(
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[2] parameter(0)
%p1 = f32[2] parameter(1)
%c0 = f32[] constant(-inf)
%r0 = f32[] reduce(%p0, %c0), dimensions={0}, to_apply=add
%c1 = f32[] constant(inf)
%r1 = f32[] reduce(%p1, %c1), dimensions={0}, to_apply=add
ROOT %tuple = (f32[], f32[]) tuple(%r0, %r1)
}
ENTRY entry {
%p0 = f32[2] parameter(0)
%p1 = f32[2] parameter(1)
ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion fusion(analysis);
EXPECT_THAT(fusion.GetGroups().grouped_roots,
ElementsAre(ElementsAre(&analysis.fusion_root(0).instruction()),
ElementsAre(&analysis.fusion_root(1).instruction())));
}
TEST_F(MlirRowReductionTest, OneGroup) {
auto module = ParseAndReturnVerifiedModule(R"(
%add {
%p0 = c128[] parameter(0)
%p1 = c128[] parameter(1)
ROOT %add.35 = c128[] add(c128[] %p0, c128[] %p1)
}
%fusion {
%p0 = c128[1,2] parameter(0)
%c0 = c128[] constant((0, 0))
%reduce = c128[] reduce(%p0, %c0), dimensions={0,1}, to_apply=%add
%real = f64[] real(c128[] %reduce)
%imag = f64[] imag(c128[] %reduce)
%negate = f64[] negate(f64[] %imag)
ROOT %tuple.29 = (f64[], f64[]) tuple(f64[] %real, f64[] %negate)
}
ENTRY entry {
%p0 = c128[1,2] parameter(0)
ROOT %fusion = (f64[], f64[]) fusion(%p0), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirRowReductionFusion mlir_fusion(analysis);
EXPECT_THAT(mlir_fusion.GetGroups().grouped_roots, SizeIs(1));
}
constexpr absl::string_view kColumnVectorizationTemplate = R"(
add {
b = $0[] parameter(1)
a = $0[] parameter(0)
ROOT out = $0[] add(a, b)
}
fusion {
%p0 = $0[192,64,1536] parameter(0)
%p1 = $0[] parameter(1)
ROOT reduce = $0[192,1536] reduce(p0, p1), dimensions={1}, to_apply=add
}
ENTRY entry {
%p0 = $0[192,64,1536] parameter(0)
%p1 = $0[] parameter(1)
ROOT %fusion = $0[192,1536] fusion(p0, p1), kind=kInput, calls=fusion
})";
TEST_F(MlirColumnReductionTest, ColumnReduction) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_computation {
param_0 = f32[13,1051,321] parameter(0)
param_1 = f32[] parameter(1)
ROOT reduce = f32[13,321] reduce(param_0, param_1), dimensions={1}, to_apply=Add
}
ENTRY main {
a = f32[13,1051,321] parameter(0)
c = f32[] constant(0)
ROOT fusion = f32[13,321] fusion(a, c), kind=kInput, calls=fused_computation
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirColumnReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d3 floordiv 11,
d0 floordiv 32 + s0 * 32,
(d3 mod 11) * 32 + d0 mod 32 + s1
)
domain:
d0 in [0, 1024)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 143)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 33)
s1 in [0, 1)
(d3 mod 11) * 32 + d0 mod 32 + s1 in [0, 321)
d0 floordiv 32 + s0 * 32 in [0, 1051)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0] -> (
d3 floordiv 11, (d3 mod 11) * 32 + d0 floordiv 32 + s0
)
domain:
d0 in [0, 1024)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 143)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
(d3 mod 11) * 32 + d0 floordiv 32 + s0 in [0, 321)
d0 mod 32 in [0, 1)
)"));
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirColumnReductionTest, SmallColumnReduction) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_computation {
param_0 = f32[3,128,4] parameter(0)
param_1 = f32[] parameter(1)
ROOT reduce = f32[3,4] reduce(param_0, param_1), dimensions={1}, to_apply=Add
}
ENTRY main {
a = f32[3,128,4] parameter(0)
c = f32[] constant(0)
ROOT fusion = f32[3,4] fusion(a, c), kind=kInput, calls=fused_computation
})";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirColumnReductionTest, MixedIndexing) {
constexpr auto kHloString = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%param_0 = f32[64,128] parameter(0)
%constant_0 = f32[] constant(0)
%reduce.1 = f32[128] reduce(f32[64,128] %param_0, f32[] %constant_0), dimensions={0}, to_apply=%add
%neg = f32[64,128] negate(f32[64,128] %param_0)
%bitcast = f32[8,8,128]{2,1,0} bitcast(f32[64,128] %neg)
%reduce.2 = f32[128] reduce(f32[8,8,128]{2,1,0} %bitcast, f32[] %constant_0), dimensions={0,1}, to_apply=%add
ROOT %tuple.12 = (f32[128], f32[128]) tuple(f32[128] %reduce.1, f32[128] %reduce.2)
}
ENTRY entry {
%param_0 = f32[64,128] parameter(0)
ROOT %fusion = (f32[128], f32[128]) fusion(%param_0), kind=kInput, calls=fusion
})";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirColumnReductionTest, ColumnReductionVectorization) {
constexpr auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
fused_computation {
param_0 = f32[2048,16384] parameter(0)
param_1 = f32[] parameter(1)
ROOT reduce = f32[16384] reduce(param_0, param_1), dimensions={0}, to_apply=Add
}
ENTRY main {
a = f32[2048,16384] parameter(0)
c = f32[] constant(0)
ROOT fusion = f32[16384] fusion(a, c), kind=kInput, calls=fused_computation
})";
auto module = ParseAndReturnVerifiedModule(kHloString).value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirCo | 2,123 |
#ifndef XLA_SERVICE_GPU_FUSIONS_IN_PLACE_DYNAMIC_UPDATE_SLICE_MLIR_H_
#define XLA_SERVICE_GPU_FUSIONS_IN_PLACE_DYNAMIC_UPDATE_SLICE_MLIR_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
class MlirInPlaceDynamicUpdateSliceFusion : public MlirFusionEmitterBase {
public:
explicit MlirInPlaceDynamicUpdateSliceFusion(
const HloFusionAnalysis& analysis)
: analysis_(analysis),
dus_ops_(
GetOutputDefiningDynamicUpdateSlices(analysis.fusion_roots())) {}
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* indexing_context) const override {
return std::nullopt;
}
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* indexing_context) const override;
protected:
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const override;
std::vector<mlir_converter::EpilogueSpecification> GetEpilogues(
const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const override;
private:
const HloFusionAnalysis& analysis_;
std::vector<const HloInstruction*> dus_ops_;
};
}
}
#endif
#include "xla/service/gpu/fusions/in_place_dynamic_update_slice_mlir.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::ImplicitLocOpBuilder;
using mlir::Value;
using mlir::ValueRange;
using mlir::arith::AddIOp;
using mlir::func::ReturnOp;
using mlir::tensor::InsertOp;
using mlir_converter::ApplyIndexing;
using mlir_converter::CallTargetProvider;
using mlir_converter::ClampIndex;
using mlir_converter::PartitionedComputations;
using mlir_converter::ProvideParameter;
constexpr int kDUSUpdateIndex = 1;
}
LaunchDimensions MlirInPlaceDynamicUpdateSliceFusion::launch_dimensions()
const {
const auto& update_shape =
dus_ops_.front()->operand(kDUSUpdateIndex)->shape();
return CalculateLaunchDimensions(update_shape, analysis_.device_info());
}
std::optional<IndexingMap>
MlirInPlaceDynamicUpdateSliceFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* indexing_context) const {
if (hero_operand_index != kDUSUpdateIndex) {
return std::nullopt;
}
auto launch_dims = launch_dimensions();
const auto& update_shape =
dus_ops_.front()->operand(kDUSUpdateIndex)->shape();
return GetDefaultThreadIdIndexingMap(launch_dims, 1,
update_shape, indexing_context);
}
std::vector<mlir_converter::EpilogueSpecification>
MlirInPlaceDynamicUpdateSliceFusion::GetEpilogues(
const HloFusionInstruction& fusion, mlir::MLIRContext* mlir_context) const {
std::vector<mlir_converter::EpilogueSpecification> epilogues;
for (const auto& [dus_op, root] :
llvm::zip(dus_ops_, analysis_.fusion_roots())) {
epilogues.push_back(
mlir_converter::EpilogueSpecification::FromIdentityIndexing(
dus_op, &root.instruction(), mlir_context));
}
return epilogues;
}
absl::Status MlirInPlaceDynamicUpdateSliceFusion::EmitEntryFunction(
const PartitionedComputations& computations,
const CallTargetProvider& call_targets, mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
ImplicitLocOpBuilder b(entry_function.getLoc(), entry_function);
b.setInsertionPointToStart(entry_function.addEntryBlock());
mlir::MLIRContext* mlir_context = entry_function.getContext();
auto indexing = *ComputeThreadIdToInputIndexing(
0,
kDUSUpdateIndex, mlir_context);
indexing.Simplify();
indexing.RemoveUnusedSymbols();
int num_inputs = fusion.fused_instructions_computation()->num_parameters();
auto output_tensor_args =
entry_function.getArguments().drop_front(num_inputs);
const auto& root_computation = computations.FindPartitionedComputation(
fusion.fused_instructions_computation());
auto result_tensors = EmitThreadLoopNest(
b, output_tensor_args, indexing,
[&](ValueRange output_tensors, ValueRange dim_values,
ValueRange symbol_values) -> llvm::SmallVector<Value> {
auto input_indices =
ApplyIndexing(indexing, dim_values, symbol_values, b);
llvm::SmallVector<Value> results;
for (auto [instr, root, output] :
llvm::zip(dus_ops_, analysis_.fusion_roots(), output_tensors)) {
const auto* dus_instr = Cast<HloDynamicUpdateSliceInstruction>(instr);
const auto& update_shape = dus_instr->update()->shape();
SmallVector<Value> update_indices;
auto start_indices = ProvideParameterRange(
root_computation, dus_instr,
dus_instr->first_index_operand_number(), update_shape.rank(), {},
call_targets, entry_function, b);
for (int i = 0; i < update_shape.rank(); ++i) {
int64_t update_size = update_shape.dimensions(i);
auto start_index = ClampIndex(
start_indices[i],
primitive_util::IsUnsignedIntegralType(
dus_instr
->operand(i + dus_instr->first_index_operand_number())
->shape()
.element_type()),
dus_instr->shape().dimensions(i) - update_size, b);
update_indices.push_back(
b.create<AddIOp>(input_indices[i], start_index));
}
auto updated_value =
ProvideParameter(root_computation, dus_instr, kDUSUpdateIndex,
input_indices, call_targets, entry_function, b);
if (dus_instr->shape() != root.shape()) {
update_indices = ApplyIndexing(
GetBitcastMap(dus_instr->shape(), root.shape(), b.getContext()),
update_indices, {}, b);
}
results.push_back(
b.create<InsertOp>(updated_value[0], output, update_indices));
}
return results;
});
b.create<ReturnOp>(result_tensors);
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/fusions/in_place_dynamic_update_slice_mlir.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/service/gpu/fusions/mlir_emitter_test_base.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using MlirInPlaceDynamicUpdateSliceFusionTest =
MlirEmitterTestBase<MlirInPlaceDynamicUpdateSliceFusion>;
TEST_F(MlirInPlaceDynamicUpdateSliceFusionTest, ThreadIndexing) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] parameter(2)
i1 = s32[] parameter(3)
ROOT updated = f32[20,30] dynamic-update-slice(in, updates, i0, i1)
}
ENTRY entry {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] constant(2)
i1 = s32[] constant(3)
ROOT fusion = f32[20,30] fusion(in, updates, i0, i1), kind=kLoop, calls=fused_computation
}
)"));
thread_id_printer_.SetSymbolName(0, "chunk_id");
thread_id_printer_.SetSymbolName(1, "unroll_id");
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirInPlaceDynamicUpdateSliceFusion fusion(analysis);
auto thread_id_update_indexing = fusion.ComputeThreadIdToInputIndexing(
0, 1, &mlir_context_);
EXPECT_THAT(thread_id_update_indexing->ToString(thread_id_printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
th_x floordiv 6, th_x mod 6)
domain:
th_x in [0, 30)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
)"));
auto thread_id_dst_indexing = fusion.ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_);
EXPECT_THAT(thread_id_dst_indexing, ::testing::Eq(std::nullopt));
}
TEST_F(MlirInPlaceDynamicUpdateSliceFusionTest, SimpleDUS) {
auto kHloString = R"(
HloModule module
fused_computation {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] parameter(2)
i1 = s32[] parameter(3)
ROOT updated = f32[20,30] dynamic-update-slice(in, updates, i0, i1)
}
ENTRY entry {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] constant(2)
i1 = s32[] constant(3)
ROOT fusion = f32[20,30] fusion(in, updates, i0, i1), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirInPlaceDynamicUpdateSliceFusionTest, OutOfBoundDUS) {
auto kHloString = R"(
HloModule module
fused_computation {
in = f32[7,8] parameter(0)
updates = f32[2,3] parameter(1)
i0 = s32[] parameter(2)
i1 = s32[] parameter(3)
ROOT updated = f32[7,8] dynamic-update-slice(in, updates, i0, i1)
}
ENTRY entry {
in = f32[7,8] parameter(0)
updates = f32[2,3] parameter(1)
i0 = s32[] constant(-20)
i1 = s32[] constant(30)
ROOT fusion = f32[7,8] fusion(in, updates, i0, i1), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirInPlaceDynamicUpdateSliceFusionTest, BitcastDus) {
auto kHloString = R"(
HloModule module
fused_computation {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] parameter(2)
i1 = s32[] parameter(3)
updated = f32[20,30] dynamic-update-slice(in, updates, i0, i1)
ROOT bitcast = f32[600] bitcast(updated)
}
ENTRY entry {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] constant(2)
i1 = s32[] constant(3)
ROOT fusion = f32[600] fusion(in, updates, i0, i1), kind=kLoop, calls=fused_computation
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirInPlaceDynamicUpdateSliceFusionTest, MOFDus) {
auto kHloString = R"(
HloModule module
fused_computation {
p0 = f32[10,11,12] parameter(0)
p1 = f32[1,11,12] parameter(1)
p2 = f32[8,11,12] parameter(2)
p3 = f32[1,11,12] parameter(3)
p4 = s32[] parameter(4)
c0 = s32[] constant(0)
cmp = pred[] compare(p4, c0), direction=EQ
broadcast = pred[1,11,12] broadcast(cmp), dimensions={}
select = f32[1,11,12] select(broadcast, p1, p3)
dus0 = f32[10,11,12] dynamic-update-slice(p0, select, c0, c0, c0)
dus1 = f32[8,11,12] dynamic-update-slice(p2, select, c0, c0, c0)
ROOT tuple = (f32[10,11,12], f32[8,11,12]) tuple(dus0, dus1)
}
ENTRY entry {
p0 = f32[10,11,12] parameter(0)
p1 = f32[1,11,12] parameter(1)
p2 = f32[8,11,12] parameter(2)
p3 = f32[1,11,12] parameter(3)
p4 = s32[] parameter(4)
ROOT fusion_root_multiple = (f32[10,11,12], f32[8,11,12])
fusion(p0, p1, p2, p3, p4), kind=kLoop, calls=fused_computation
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirInPlaceDynamicUpdateSliceFusionTest, OperandSubgraphWithTwoRoots) {
auto kHloString = R"(
HloModule in_place_dus
dus_fusion {
param_0.8 = f32[512,512]{1,0} parameter(0)
param_1.10 = f32[128,128]{1,0} parameter(1)
param_3.32 = s32[] parameter(3)
two = s32[] constant(2)
param_3_mod_2 = s32[] remainder(param_3.32, two)
one = s32[] constant(1)
param_3_plus_one = s32[] add(param_3_mod_2, one)
param_2.32 = s32[] parameter(2)
param_2_plus_one = s32[] add(param_2.32, one)
ROOT dynamic-update-slice.5.1 = f32[512,512]{1,0} dynamic-update-slice(
param_0.8, param_1.10, param_2_plus_one, param_3_plus_one)
}
ENTRY entry {
p0 = f32[512,512]{1,0} parameter(0)
p1 = f32[128,128]{1,0} parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT dus = f32[512,512]{1,0} fusion(p0, p1, p2, p3), kind=kLoop, calls=dus_fusion
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-6}));
}
}
}
} | 2,124 |
#ifndef XLA_SERVICE_GPU_FUSIONS_CONCATENATE_MLIR_H_
#define XLA_SERVICE_GPU_FUSIONS_CONCATENATE_MLIR_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
class MlirConcatenateFusion : public MlirFusionEmitterBase {
public:
explicit MlirConcatenateFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis) {}
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override;
protected:
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const override;
std::vector<mlir_converter::EpilogueSpecification> GetEpilogues(
const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const override;
private:
const HloFusionAnalysis& analysis_;
};
}
}
#endif
#include "xla/service/gpu/fusions/concatenate_mlir.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/concatenate.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
using llvm::SmallVector;
using mlir::Value;
using mlir::ValueRange;
LaunchDimensions MlirConcatenateFusion::launch_dimensions() const {
return CalculateLaunchDimensions(GetLargestConcatOperandShape(analysis_),
analysis_.device_info());
}
std::optional<IndexingMap>
MlirConcatenateFusion::ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const {
return std::nullopt;
}
std::optional<IndexingMap>
MlirConcatenateFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const {
return GetDefaultThreadIdIndexingMap(launch_dimensions(), 1,
GetLargestConcatOperandShape(analysis_),
ctx);
}
std::vector<mlir_converter::EpilogueSpecification>
MlirConcatenateFusion::GetEpilogues(const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const {
return {mlir_converter::EpilogueSpecification::FromIdentityIndexing(
&analysis_.fusion_hero(0).instruction(),
&analysis_.fusion_root(0).instruction(), mlir_context)};
}
absl::Status MlirConcatenateFusion::EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
const auto& root_computation = computations.FindPartitionedComputation(
fusion.fused_instructions_computation());
mlir::ImplicitLocOpBuilder builder(entry_function.getLoc(), entry_function);
builder.setInsertionPointToStart(entry_function.addEntryBlock());
auto* ctx = entry_function.getContext();
int num_inputs = fusion.fused_instructions_computation()->num_parameters();
SmallVector<Value> input_tensors(
entry_function.getArguments().take_front(num_inputs));
auto output_tensor_args =
entry_function.getArguments().drop_front(num_inputs);
SmallVector<Value> result_tensors{output_tensor_args.begin(),
output_tensor_args.end()};
auto thread_id_to_input_map =
ComputeThreadIdToInputIndexing(
0, 0, ctx)
.value();
auto epilogue_indexing = ComputeEpilogueInputToOutputIndexing(
analysis_.fusion_hero(0), analysis_.fusion_root(0), ctx);
const auto* concat = &analysis_.fusion_hero(0).instruction();
for (auto [operand_index, operand] : llvm::enumerate(concat->operands())) {
auto input_to_output_map =
*ComputeInputToOutputIndexing(concat, operand_index, ctx)
.indexing_maps.front()
.begin();
auto thread_id_to_output_map = ComposeIndexingMaps(
ComposeIndexingMaps(thread_id_to_input_map, input_to_output_map),
epilogue_indexing);
auto loop_nest_body_builder =
[&, operand_index = operand_index](
ValueRange output_tensors, ValueRange dim_values,
ValueRange symbol_values) -> SmallVector<Value> {
auto input_indices = mlir_converter::ApplyIndexing(
thread_id_to_input_map, dim_values, symbol_values, builder);
auto result_scalar = mlir_converter::ProvideParameter(
root_computation, concat, operand_index, input_indices, call_targets,
entry_function, builder);
absl::flat_hash_map<const HloInstruction*, llvm::SmallVector<Value>>
hero_value{{concat, result_scalar}};
auto output_indices = mlir_converter::ApplyIndexing(
thread_id_to_output_map, dim_values, symbol_values, builder);
auto result_scalars = EmitEpilogue(
0, computations, entry_function, hero_value,
output_indices, builder)[&analysis_.fusion_root(0).instruction()];
SmallVector<Value> result_tensors;
result_tensors.reserve(output_tensor_args.size());
for (auto [tensor, value] : llvm::zip(output_tensors, result_scalars)) {
result_tensors.push_back(
builder
.create<mlir::tensor::InsertOp>(value, tensor, output_indices)
.getResult());
}
return result_tensors;
};
result_tensors =
EmitThreadLoopNest(builder, result_tensors, thread_id_to_output_map,
loop_nest_body_builder);
}
builder.create<mlir::func::ReturnOp>(result_tensors);
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/fusions/concatenate_mlir.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/service/gpu/fusions/mlir_emitter_test_base.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using MlirConcatenateFusionTest = MlirEmitterTestBase<MlirConcatenateFusion>;
TEST_F(MlirConcatenateFusionTest, ThreadIdIndexing) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
param0 = f32[200] parameter(0)
param1 = f32[400] parameter(1)
param2 = f32[300] parameter(2)
ROOT concat = f32[900] concatenate(param0, param1, param2), dimensions={0}
}
ENTRY main {
param0 = f32[200] parameter(0)
param1 = f32[400] parameter(1)
param2 = f32[300] parameter(2)
ROOT fusion = f32[900] fusion(param0, param1, param2),
calls=fused_computation, kind=kLoop
}
)"));
thread_id_printer_.SetSymbolName(0, "chunk_id");
thread_id_printer_.SetSymbolName(1, "unroll_id");
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirConcatenateFusion fusion(analysis);
constexpr auto kIndexing = R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
(bl_x * 128 + th_x) mod 400)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 4)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
th_x + bl_x * 128 in [0, 400)
)";
auto thread_id_to_output_indexing_0 = fusion.ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing_0->ToString(thread_id_printer_),
MatchIndexingString(kIndexing));
auto thread_id_to_output_indexing_1 = fusion.ComputeThreadIdToInputIndexing(
0, 1, &mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing_1->ToString(thread_id_printer_),
MatchIndexingString(kIndexing));
auto thread_id_to_output_indexing_2 = fusion.ComputeThreadIdToInputIndexing(
0, 2, &mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing_2->ToString(thread_id_printer_),
MatchIndexingString(kIndexing));
}
TEST_F(MlirConcatenateFusionTest, StandAloneConcatenate) {
auto kHloString = R"(
HloModule module
fused_computation {
param0 = f32[200] parameter(0)
param1 = f32[400] parameter(1)
param2 = f32[300] parameter(2)
ROOT concat = f32[900] concatenate(param0, param1, param2), dimensions={0}
}
ENTRY main {
param0 = f32[200] parameter(0)
param1 = f32[400] parameter(1)
param2 = f32[300] parameter(2)
ROOT fusion = f32[900] fusion(param0, param1, param2),
calls=fused_computation, kind=kLoop
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirConcatenateFusionTest, PrologueEpilogue) {
auto kHloString = R"(
HloModule module
fused_computation {
param0 = f32[64] parameter(0)
param1 = f32[128] parameter(1)
log = f32[64] log(param0)
exp = f32[128] exponential(param1)
concat = f32[192] concatenate(log, exp), dimensions={0}
ROOT neg = f32[192] negate(concat)
}
ENTRY main {
param0 = f32[64] parameter(0)
param1 = f32[128] parameter(1)
ROOT fusion = f32[192] fusion(param0, param1), calls=fused_computation, kind=kLoop
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirConcatenateFusionTest, EpilogueSideParameter) {
auto kHloString = R"(
HloModule module
fused_computation {
param0 = f32[64] parameter(0)
param1 = f32[192] parameter(1)
neg = f32[64] negate(param0)
slice = f32[128] slice(param1), slice={[32:160]}
exp = f32[128] exponential(slice)
concat = f32[192] concatenate(neg, exp), dimensions={0}
ROOT add = f32[192] add(concat, param1)
}
ENTRY main {
param0 = f32[64] parameter(0)
param1 = f32[192] parameter(1)
ROOT fusion = f32[192] fusion(param0, param1), calls=fused_computation, kind=kLoop
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirConcatenateFusionTest, MajorDimension) {
auto kHloString = R"(
HloModule module
fused_computation {
param0 = f32[16,16] parameter(0)
param1 = f32[16,16] parameter(1)
ROOT concat = f32[32,16] concatenate(param0, param1), dimensions={0}
}
ENTRY main {
param0 = f32[16,16] parameter(0)
param1 = f32[16,16] parameter(1)
ROOT %fusion = f32[32,16] fusion(param0, param1), kind=kInput, calls=fused_computation
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirConcatenateFusionTest, EpilogueBitcast) {
auto kHloString = R"(
HloModule Test
fused_computation {
p0 = pred[1] parameter(0)
p1 = pred[1] parameter(1)
p2 = pred[1] parameter(2)
%concatenate.3.3 = pred[3] concatenate(p0, p1, p2), dimensions={0}
%bitcast.57.1 = pred[1,1,3]{2,1,0} bitcast(pred[3]{0} %concatenate.3.3)
ROOT %convert.36.1 = u32[1,1,3] convert(pred[1,1,3]{2,1,0} %bitcast.57.1)
}
ENTRY main {
p0 = pred[1] parameter(0)
p1 = pred[1] parameter(1)
p2 = pred[1] parameter(2)
ROOT fusion = u32[1,1,3] fusion(p0, p1, p2), kind=kInput, calls=fused_computation
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
}
}
} | 2,125 |
#ifndef XLA_SERVICE_GPU_FUSIONS_LOOP_MLIR_H_
#define XLA_SERVICE_GPU_FUSIONS_LOOP_MLIR_H_
#include <cstdint>
#include <optional>
#include "absl/status/status.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/loop.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
class MlirLoopFusion : public MlirFusionEmitterBase {
public:
explicit MlirLoopFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis), config_(ComputeLoopFusionConfig(analysis)) {}
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override;
protected:
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const override;
private:
const HloFusionAnalysis& analysis_;
LaunchDimensionsConfig config_;
};
}
}
#endif
#include "xla/service/gpu/fusions/loop_mlir.h"
#include <iterator>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::Value;
using mlir::ValueRange;
const Shape& GetIndexShape(const Shape& shape) {
return shape.IsTuple() ? shape.tuple_shapes(0) : shape;
}
}
std::optional<IndexingMap> MlirLoopFusion::ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const {
auto launch_dims = launch_dimensions();
return GetDefaultThreadIdIndexingMap(
launch_dims, config_.unroll_factor,
GetIndexShape(analysis_.fusion_root(root_index).shape()), ctx);
}
std::optional<IndexingMap> MlirLoopFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const {
std::optional<IndexingMap> thread_id_to_output_indexing =
ComputeThreadIdToOutputIndexing(root_index, ctx);
if (!thread_id_to_output_indexing.has_value()) {
return std::nullopt;
}
const HloInstruction* fusion_root =
&analysis_.fusion_root(root_index).instruction();
auto output_to_input_indexing =
ComputeOutputToInputIndexing(fusion_root, 0, ctx);
IndexingMapSet output_to_input_indexing_set =
output_to_input_indexing.indexing_maps[hero_operand_index];
CHECK_EQ(output_to_input_indexing_set.size(), 1);
IndexingMap thread_id_to_input_indexing_map = ComposeIndexingMaps(
*thread_id_to_output_indexing, *output_to_input_indexing_set.begin());
thread_id_to_input_indexing_map.Simplify();
return thread_id_to_input_indexing_map;
}
LaunchDimensions MlirLoopFusion::launch_dimensions() const {
return CalculateLaunchDimensions(
GetIndexShape(analysis_.fusion_root(0).shape()), analysis_.device_info(),
config_);
}
absl::Status MlirLoopFusion::EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
mlir::ImplicitLocOpBuilder builder(entry_function.getLoc(), entry_function);
builder.setInsertionPointToStart(entry_function.addEntryBlock());
auto indexing =
ComputeThreadIdToOutputIndexing(0, entry_function.getContext());
TF_RET_CHECK(indexing) << "Indexing is never nullopt";
int num_inputs = fusion.fused_instructions_computation()->num_parameters();
auto output_tensor_args =
entry_function.getArguments().drop_front(num_inputs);
llvm::SmallVector<const Shape*> result_shapes;
for (const HloInstructionAdaptor& root : analysis_.fusion_roots()) {
if (root.shape().IsTuple()) {
for (const auto& shape : root.shape().tuple_shapes()) {
result_shapes.push_back(&shape);
}
} else {
result_shapes.push_back(&root.shape());
}
}
auto body_builder = [&](ValueRange output_tensors, ValueRange dim_values,
ValueRange symbol_values) -> SmallVector<Value> {
llvm::SmallVector<Value> first_output_indices =
mlir_converter::ApplyIndexing(*indexing, dim_values, symbol_values,
builder);
auto root_fn = call_targets(
fusion.fused_instructions_computation()->root_instruction());
SmallVector<Value> operands(
entry_function.getArguments().take_front(num_inputs));
absl::c_copy(first_output_indices, std::back_inserter(operands));
auto result_scalars =
builder.create<PureCallOp>(root_fn, operands).getResults();
SmallVector<Value> result_tensors;
result_tensors.reserve(output_tensor_args.size());
for (auto [root_shape, tensor, value] :
llvm::zip(result_shapes, output_tensors, result_scalars)) {
llvm::SmallVector<Value> output_indices = mlir_converter::ApplyIndexing(
GetBitcastMap(*result_shapes.front(), *root_shape,
builder.getContext()),
first_output_indices, {}, builder);
result_tensors.push_back(builder.create<mlir::tensor::InsertOp>(
value, tensor, output_indices));
}
return result_tensors;
};
builder.create<mlir::func::ReturnOp>(
EmitThreadLoopNest(builder, output_tensor_args, *indexing, body_builder));
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/fusions/loop_mlir.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/service/gpu/fusions/mlir_emitter_test_base.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using MlirLoopFusionTest = MlirEmitterTestBase<MlirLoopFusion>;
TEST_F(MlirLoopFusionTest, ThreadId_IndexingUnrolled) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
neg {
%input = f32[100,200,300] parameter(0)
ROOT neg = f32[100,200,300] negate(%input)
}
ENTRY entry {
%input = f32[100,200,300] parameter(0)
ROOT %fusion = f32[100,200,300] fusion(%input), kind=kLoop, calls=neg
}
)"));
thread_id_printer_.SetSymbolName(0, "chunk_id");
thread_id_printer_.SetSymbolName(1, "unroll_id");
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirLoopFusion fusion(analysis);
auto thread_id_to_output_indexing =
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing->ToString(thread_id_printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
((bl_x * 128 + chunk_id * 129024 + th_x) floordiv 15000) mod 100,
((bl_x * 128 + chunk_id * 129024 + th_x) floordiv 75) mod 200,
(th_x * 4 + bl_x * 512 + chunk_id * 516096) mod 300 + unroll_id
)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1008)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 12)
unroll_id in [0, 4)
(th_x + bl_x * 128) * 4 + chunk_id * 516096 in [0, 5999997)
)"));
}
TEST_F(MlirLoopFusionTest, ThreadId_IndexingNotUnrolled) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
neg {
%input = f32[20] parameter(0)
ROOT neg = f32[20] negate(%input)
}
ENTRY entry {
%input = f32[20] parameter(0)
ROOT %fusion = f32[20] fusion(%input), kind=kLoop, calls=neg
}
)"));
thread_id_printer_.SetSymbolName(0, "chunk_id");
thread_id_printer_.SetSymbolName(1, "unroll_id");
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirLoopFusion fusion(analysis);
auto thread_id_to_output_indexing =
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing->ToString(thread_id_printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (th_x)
domain:
th_x in [0, 20)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
)"));
auto thread_id_to_input_indexing = fusion.ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_);
EXPECT_THAT(thread_id_to_input_indexing->ToString(thread_id_printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (th_x)
domain:
th_x in [0, 20)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
)"));
}
TEST_F(MlirLoopFusionTest, ThreadId_Broadcast) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
bcast {
%input = f32[20] parameter(0)
ROOT bcast = f32[10, 20, 30] broadcast(%input), dimensions={1}
}
ENTRY entry {
%input = f32[20] parameter(0)
ROOT %fusion = f32[10, 20, 30] fusion(%input), kind=kLoop, calls=bcast
}
)"));
thread_id_printer_.SetSymbolName(0, "chunk_id");
thread_id_printer_.SetSymbolName(1, "unroll_id");
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirLoopFusion fusion(analysis);
auto thread_id_to_output_indexing =
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing->ToString(thread_id_printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
((bl_x * 128 + th_x) floordiv 600) mod 10,
((bl_x * 128 + th_x) floordiv 30) mod 20,
(bl_x * 128 + th_x) mod 30
)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 47)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
th_x + bl_x * 128 in [0, 6000)
)"));
auto thread_id_to_input_indexing = fusion.ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_);
EXPECT_THAT(thread_id_to_input_indexing->ToString(thread_id_printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] ->
(((bl_x * 128 + th_x) floordiv 30) mod 20)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 47)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
th_x + bl_x * 128 in [0, 6000)
)"));
}
TEST_F(MlirLoopFusionTest, Constant_Broadcast) {
auto kHloString = R"(
HloModule module
bcast {
zero = bf16[] constant(0)
ROOT broadcast = bf16[2,16,48]{2,1,0} broadcast(zero), dimensions={}
}
ENTRY entry {
ROOT %fusion = bf16[2,16,48]{2,1,0} fusion(), kind=kLoop, calls=bcast
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{0}));
}
TEST_F(MlirLoopFusionTest, NoCodeDuplication) {
auto kHloString = R"(
HloModule test_module
%fused_computation (param: f32[6]) -> f32[2] {
%param = f32[6]{0} parameter(0)
%slice0.1 = f32[5]{0} slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5]{0} slice(f32[6]{0} %param), slice={[1:6]}
%add0 = f32[5]{0} add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
%slice1.1 = f32[4]{0} slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4]{0} slice(f32[5]{0} %add0), slice={[1:5]}
%add1 = f32[4]{0} add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
%slice2.1 = f32[3]{0} slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3]{0} slice(f32[4]{0} %add1), slice={[1:4]}
%add2 = f32[3]{0} add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
%slice3.1 = f32[2]{0} slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2]{0} slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2]{0} add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
}
ENTRY entry_computation {
p0 = f32[] parameter(0)
add = f32[] add(p0, p0)
broadcast = f32[6]{0} broadcast(add), dimensions={}
ROOT %fusion = f32[2]{0} fusion(broadcast), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirLoopFusionTest, TwoUsersConsistentIndexing) {
auto kHloString = R"(
HloModule test_module
%fused_computation (param: f32[6]) -> f32[2] {
%p0 = f32[2]{0} parameter(0)
%p1 = f32[2]{0} parameter(1)
%add = f32[2] add(%p0, %p1)
%sub = f32[2] subtract(%p0, %p1)
%mul = f32[2] multiply(%add, %sub)
%div = f32[2] divide(%add, %sub)
ROOT %atan2 = f32[2] atan2(%mul, %div)
}
ENTRY entry_computation {
p0 = f32[2] parameter(0)
p1 = f32[2] parameter(1)
ROOT %fusion = f32[2] fusion(p0, p1), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirLoopFusionTest, ComplexOps) {
auto kHloString = R"(
HloModule test_module
%fused_computation {
%p0 = f32[2]{0} parameter(0)
%p1 = f32[2]{0} parameter(1)
%p2 = c64[2]{0} parameter(2)
%complex = c64[2] complex(%p0, %p1)
%add = c64[2] add(%complex, %p2)
%cst = c64[2]{0} constant({(2.0, 0.0), (0.0, 2.0)})
ROOT %mul = c64[2] multiply(%add, %cst)
}
ENTRY entry_computation {
p0 = f32[2] parameter(0)
p1 = f32[2] parameter(1)
p2 = c64[2] parameter(2)
ROOT %fusion = c64[2] fusion(p0, p1, p2), kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirLoopFusionTest, IotaCopyBitcastBroadcastReshapeReverseTranspose) {
auto kHloString = R"(
HloModule test_module
%fused_computation {
%iota = f32[10,20,30] iota(), iota_dimension=2
%copy = f32[10,20,30] copy(%iota)
%bitcast = s32[10,20,30] bitcast-convert(%copy)
%broadcast = s32[2,10,3,20,5,30,7] broadcast(%bitcast),
dimensions={1,3,5}
%reshape = s32[20,60,150,7] reshape(%broadcast)
%reverse = s32[20,60,150,7] reverse(%reshape), dimensions={2,3}
ROOT %transpose = s32[60,20,7,150] transpose(%reverse),
dimensions={1,0,3,2}
}
ENTRY entry_computation {
ROOT %fusion = s32[60,20,7,150] fusion(),
kind=kLoop, calls=%fused_computation
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirLoopFusionTest, VariadicReduce) {
auto kHloString = R"(
HloModule Test, is_scheduled=true
Add {
scalar_lhs.0 = f32[] parameter(0)
scalar_lhs.1 = f32[] parameter(1)
scalar_rhs.0 = f32[] parameter(2)
scalar_rhs.1 = f32[] parameter(3)
add = f32[] add(scalar_lhs.0, scalar_rhs.0)
mul = f32[] multiply(scalar_lhs.1, scalar_rhs.1)
ROOT t = (f32[], f32[]) tuple(add, mul)
}
fused_computation {
param_0 = f32[3,4,5]{2,1,0} parameter(0)
param_1 = f32[3,4,5]{2,1,0} parameter(1)
param_2 = f32[] parameter(2)
ROOT d.1 = (f32[4], f32[4]) reduce(f32[3,4,5]{2,1,0} param_0,
f32[3,4,5]{2,1,0} %param_1, f32[] param_2, f32[] param_2),
dimensions={0,2}, to_apply=Add
}
ENTRY main {
a = f32[3,4,5]{2,1,0} parameter(0)
b = f32[3,4,5]{2,1,0} parameter(1)
c = f32[] constant(0)
ROOT fusion = (f32[4]{0}, f32[4]{0}) fusion(a, b, c),
kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirLoopFusionTest, MinimumMaximum) {
auto kHloString = R"(
HloModule Test
fused_computation {
param0 = f64[] parameter(0)
param1 = f64[] parameter(1)
minimum = f64[] minimum(f64[] param0, f64[] param1)
maximum = f64[] maximum(f64[] param0, f64[] param1)
ROOT tuple = (f64[], f64[]) tuple(minimum, maximum)
}
ENTRY main {
param0 = f64[] parameter(0)
param1 = f64[] parameter(1)
ROOT fusion = (f64[], f64[]) fusion(f64[] param0, f64[] param1), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirLoopFusionTest, TupleBitcast) {
auto kHloString = R"(
HloModule Test
fused_computation {
param0 = f64[8] parameter(0)
param1 = f64[8] parameter(1)
minimum = f64[8] minimum(param0, param1)
maximum = f64[8] maximum(param0, param1)
bc = f64[2, 4] bitcast(maximum)
ROOT tuple = (f64[8], f64[2,4]) tuple(minimum, bc)
}
ENTRY main {
param0 = f64[8] parameter(0)
param1 = f64[8] parameter(1)
ROOT fusion = (f64[8], f64[2,4]) fusion(param0, param1),
kind=kLoop, calls=fused_computation
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirLoopFusionTest, NestedTuple) {
auto kHloString = R"(
add {
scalar_lhs.0 = f32[] parameter(0)
scalar_lhs.1 = f32[] parameter(1)
scalar_rhs.0 = f32[] parameter(2)
scalar_rhs.1 = f32[] parameter(3)
add = f32[] add(scalar_lhs.0, scalar_rhs.0)
mul = f32[] multiply(scalar_lhs.1, scalar_rhs.1)
ROOT t = (f32[], f32[]) tuple(add, mul)
}
fused_computation {
param_0 = f32[3,4,5]{2,1,0} parameter(0)
param_1 = f32[3,4,5]{2,1,0} parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[4] parameter(3)
reduce = (f32[4], f32[4]) reduce(f32[3,4,5]{2,1,0} param_0,
f32[3,4,5]{2,1,0} %param_1, f32[] param_2, f32[] param_2),
dimensions={0,2}, to_apply=add
log = f32[4] log(param_3)
ROOT tuple = ((f32[4], f32[4]), f32[4]) tuple(reduce, log)
}
ENTRY main {
a = f32[3,4,5]{2,1,0} parameter(0)
b = f32[3,4,5]{2,1,0} parameter(1)
c = f32[] constant(0)
d = f32[4] parameter(2)
ROOT fusion = ((f32[4], f32[4]), f32[4]) fusion(a, b, c, d),
kind=kLoop, calls=fused_computation
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirLoopFusionTest, DynamicSliceWith64BitInput) {
constexpr auto kHloString = R"(
%fused_computation {
%p0 = s64[] parameter(0)
%p1 = f64[5] parameter(1)
ROOT slice = f64[4] dynamic-slice(%p1, %p0), dynamic_slice_sizes={4}
}
ENTRY main {
%c = s64[] constant(-1000000000000)
%p0 = f64[5] parameter(0)
ROOT %fusion = f64[4]{0} fusion(%c, %p0), kind=kInput, calls=%fused_computation
})";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
}
}
} | 2,126 |
#ifndef XLA_SERVICE_GPU_FUSIONS_SCATTER_MLIR_H_
#define XLA_SERVICE_GPU_FUSIONS_SCATTER_MLIR_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/loop.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
class MlirScatterFusion : public MlirFusionEmitterBase {
public:
explicit MlirScatterFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis), config_(ComputeLoopFusionConfig(analysis)) {}
LaunchDimensions launch_dimensions() const override;
static bool IsSupported(const HloFusionAnalysis& analysis);
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override;
protected:
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const override;
std::vector<mlir_converter::EpilogueSpecification> GetEpilogues(
const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const override;
private:
const HloFusionAnalysis& analysis_;
LaunchDimensionsConfig config_;
};
}
}
#endif
#include "xla/service/gpu/fusions/scatter_mlir.h"
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/scatter_simplifier.h"
#include "xla/shape.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
namespace ma = ::mlir::arith;
namespace scf = ::mlir::scf;
using llvm::SmallVector;
using mlir::Location;
using mlir::OpBuilder;
using mlir::Value;
using mlir::ValueRange;
using mlir::func::ReturnOp;
using mlir::tensor::InsertOp;
using mlir_converter::CallTargetProvider;
using mlir_converter::PartitionedComputations;
using mlir_converter::ProvideParameter;
}
bool MlirScatterFusion::IsSupported(const HloFusionAnalysis& analysis) {
const auto* scatter =
Cast<HloScatterInstruction>(&analysis.fusion_hero(0).instruction());
if (scatter->scatter_operand_count() != 1) {
LOG(ERROR) << "Variadic scatter is not supported like in the legacy "
"emitter, although it is possible to make it work when the "
"indices are unique.";
return false;
}
return true;
}
std::optional<IndexingMap> MlirScatterFusion::ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const {
return std::nullopt;
}
std::optional<IndexingMap> MlirScatterFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const {
const auto* scatter =
DynCast<HloScatterInstruction>(&analysis_.fusion_hero(0).instruction());
CHECK(ScatterSimplifier::IsSimplifiedScatter(scatter))
<< "Non-simplified HLO Scatter is not supported.";
int64_t scatter_operand_count = scatter->scatter_operand_count();
if (hero_operand_index < scatter_operand_count) {
return std::nullopt;
}
Shape scatter_update_shape = scatter->scatter_updates().front()->shape();
IndexingMap scatter_update_map = GetDefaultThreadIdIndexingMap(
launch_dimensions(), config_.unroll_factor, scatter_update_shape, ctx);
if (hero_operand_index == scatter_operand_count) {
Shape scatter_indices_shape = scatter->scatter_indices()->shape();
CHECK_EQ(scatter_indices_shape.rank(), 2) << scatter->ToString();
IndexingMap updates_to_indices_map{
mlir::AffineMap::get(
scatter_update_shape.rank(), 1,
{mlir::getAffineDimExpr(0, ctx), mlir::getAffineSymbolExpr(0, ctx)},
ctx),
DimVarsFromTensorSizes(scatter_update_shape.dimensions()),
RangeVarsFromTensorSizes({scatter_indices_shape.dimensions(1)}),
{}};
auto scatter_indices_map = scatter_update_map * updates_to_indices_map;
scatter_indices_map.Simplify();
return scatter_indices_map;
}
return scatter_update_map;
}
LaunchDimensions MlirScatterFusion::launch_dimensions() const {
const auto& scatter = analysis_.fusion_hero(0).instruction();
auto& shape = scatter.operands().back()->shape();
return CalculateLaunchDimensions(shape, analysis_.device_info());
}
std::vector<mlir_converter::EpilogueSpecification>
MlirScatterFusion::GetEpilogues(const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const {
return {mlir_converter::EpilogueSpecification::FromIdentityIndexing(
&analysis_.fusion_hero(0).instruction(),
&analysis_.fusion_root(0).instruction(), mlir_context)};
}
mlir::Value EmitScatterComputation(
const HloInstruction* scatter, ValueRange indices, Value update_elem,
Value output_tensor,
const mlir_converter::PartitionedComputation& root_computation,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function, mlir::ImplicitLocOpBuilder& b) {
constexpr int kScatterOperandIndex = 0;
auto reducer =
call_targets(scatter->called_computations()[0]->root_instruction());
if (scatter->unique_indices()) {
auto operand_elem =
ProvideParameter(root_computation, scatter, kScatterOperandIndex,
indices, call_targets, entry_function, b)[0];
auto reduced_val = mlir_converter::InlineBlock(
b, reducer.getBody().front(), {operand_elem, update_elem})[0];
return b.create<InsertOp>(reduced_val, output_tensor, indices);
}
auto atomic_rmw = b.create<AtomicRMWOp>(output_tensor, indices);
mlir::OpBuilder body_builder = atomic_rmw.getBodyBuilder();
auto reduced_val = mlir_converter::InlineBlock(
body_builder, reducer.getBody().front(),
{atomic_rmw.getCurrentValue(), update_elem})[0];
body_builder.create<xla::gpu::YieldOp>(reducer->getLoc(), reduced_val);
return atomic_rmw->getResult(0);
}
absl::Status MlirScatterFusion::EmitEntryFunction(
const PartitionedComputations& computations,
const CallTargetProvider& call_targets, mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
constexpr int kScatterOperandIndex = 0;
constexpr int kScatterIndicesIndex = 1;
constexpr int kScatterUpdateIndex = 2;
const auto* scatter = &analysis_.fusion_hero(0).instruction();
const HloInstruction* scatter_operand =
scatter->operand(kScatterOperandIndex);
const HloInstruction* scatter_indices =
scatter->operand(kScatterIndicesIndex);
const HloInstruction* scatter_update = scatter->operand(kScatterUpdateIndex);
mlir::MLIRContext* mlir_context = entry_function.getContext();
auto thread_id_to_update_map =
ComputeThreadIdToInputIndexing(
0, kScatterUpdateIndex,
mlir_context)
.value();
thread_id_to_update_map.Simplify();
thread_id_to_update_map.RemoveUnusedSymbols();
const auto& root_computation = computations.FindPartitionedComputation(
fusion.fused_instructions_computation());
mlir::ImplicitLocOpBuilder b(entry_function.getLoc(), entry_function);
b.setInsertionPointToStart(entry_function.addEntryBlock());
SmallVector<Value> result_tensors{entry_function.getArguments().back()};
auto scatter_result = EmitThreadLoopNest(
b, result_tensors, thread_id_to_update_map,
[&](ValueRange output_tensors, ValueRange dim_values,
ValueRange symbol_values) -> SmallVector<Value> {
auto update_tensor_indices = mlir_converter::ApplyIndexing(
thread_id_to_update_map, dim_values, symbol_values, b);
auto update_elem = ProvideParameter(
root_computation, scatter, kScatterUpdateIndex,
update_tensor_indices, call_targets, entry_function, b)[0];
mlir::Value in_bounds = b.create<ma::ConstantIntOp>(1, b.getI1Type());
SmallVector<Value, 4> indices{
llvm::ArrayRef(update_tensor_indices).drop_front()};
for (int i = 0; i < scatter_indices->shape().dimensions(1); ++i) {
SmallVector<Value, 4> indices_tensor_indices = {
update_tensor_indices.front(), b.create<ma::ConstantIndexOp>(i)};
auto index = ProvideParameter(
root_computation, scatter, kScatterIndicesIndex,
indices_tensor_indices, call_targets, entry_function, b)[0];
if (primitive_util::IsUnsignedIntegralType(
scatter->operand(kScatterIndicesIndex)
->shape()
.element_type())) {
index = b.create<ma::IndexCastUIOp>(b.getIndexType(), index);
} else {
index = b.create<ma::IndexCastOp>(b.getIndexType(), index);
}
Value ub = b.create<ma::ConstantIndexOp>(
scatter_operand->shape().dimensions(i) -
scatter_update->shape().dimensions(i + 1));
in_bounds = b.create<ma::AndIOp>(
in_bounds,
b.create<ma::CmpIOp>(ma::CmpIPredicate::ule, index, ub));
indices[i] = b.create<ma::AddIOp>(index, indices[i]);
}
Value output_tensor = output_tensors.front();
Value predicated_update =
b.create<scf::IfOp>(
in_bounds,
[&](OpBuilder& then_builder, Location then_loc) -> void {
Value updated_output = EmitScatterComputation(
scatter, indices, update_elem, output_tensor,
root_computation, call_targets, entry_function, b);
b.create<scf::YieldOp>(updated_output);
},
[&](OpBuilder& else_b, Location else_loc) {
b.create<scf::YieldOp>(output_tensor);
})
.getResult(0);
return {predicated_update};
});
b.create<ReturnOp>(scatter_result);
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/fusions/scatter_mlir.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/service/gpu/fusions/mlir_emitter_test_base.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using MlirScatterFusionTest = MlirEmitterTestBase<MlirScatterFusion>;
TEST_F(MlirScatterFusionTest, ThreadIdIndexing) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
computation {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
%p2 = f32[] parameter(2)
%p3 = f32[] parameter(3)
ROOT %tuple = (f32[], f32[]) tuple(f32[] %p2, f32[] %p3)
}
scatter {
%operand0 = f32[300,200] parameter(0)
%operand1 = f32[300,200] parameter(1)
%indices = s32[42,1] parameter(2)
%update.1 = f32[42,10,20] parameter(3)
%update.2 = f32[42,10,20]parameter(4)
ROOT %scatter = (f32[300,200], f32[300,200]) scatter(
f32[300,200] %operand0,
f32[300,200] %operand1,
s32[42,1] %indices,
f32[42,10,20] %update.1,
f32[42,10,20] %update.2
),
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
to_apply=computation
}
ENTRY entry {
%operand0 = f32[300,200] parameter(0)
%operand1 = f32[300,200] parameter(1)
%indices = s32[42,1] parameter(2)
%update.1 = f32[42,10,20] parameter(3)
%update.2 = f32[42,10,20]parameter(4)
ROOT %fusion = (f32[300,200], f32[300,200]) fusion(
%operand0, %operand1, %indices, %update.1, %update.2),
kind=kLoop, calls=scatter
}
)"));
thread_id_printer_.SetSymbolName(0, "chunk_id");
thread_id_printer_.SetSymbolName(1, "unroll_id");
thread_id_printer_.SetSymbolName(2, "index_id");
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirScatterFusion fusion(analysis);
constexpr auto kUpdatesIndexing = R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
((bl_x * 128 + th_x) floordiv 200) mod 42,
((bl_x * 128 + th_x) floordiv 20) mod 10,
(bl_x * 128 + th_x) mod 20
)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 66)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
th_x + bl_x * 128 in [0, 8400)
)";
EXPECT_THAT(
fusion
.ComputeThreadIdToInputIndexing(
0, 3, &mlir_context_)
->ToString(thread_id_printer_),
MatchIndexingString(kUpdatesIndexing));
EXPECT_THAT(
fusion
.ComputeThreadIdToInputIndexing(
0, 4, &mlir_context_)
->ToString(thread_id_printer_),
MatchIndexingString(kUpdatesIndexing));
EXPECT_THAT(
fusion
.ComputeThreadIdToInputIndexing(
1, 3, &mlir_context_)
->ToString(thread_id_printer_),
MatchIndexingString(kUpdatesIndexing));
EXPECT_THAT(
fusion
.ComputeThreadIdToInputIndexing(
1, 4, &mlir_context_)
->ToString(thread_id_printer_),
MatchIndexingString(kUpdatesIndexing));
constexpr auto kIndicesIndexing = R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id, index_id] ->
(((bl_x * 128 + th_x) floordiv 200) mod 42, 0)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 66)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
index_id in [0, 1)
th_x + bl_x * 128 in [0, 8400)
)";
EXPECT_THAT(
fusion
.ComputeThreadIdToInputIndexing(
0, 2, &mlir_context_)
->ToString(thread_id_printer_),
MatchIndexingString(kIndicesIndexing));
EXPECT_THAT(
fusion
.ComputeThreadIdToInputIndexing(
1, 2, &mlir_context_)
->ToString(thread_id_printer_),
MatchIndexingString(kIndicesIndexing));
}
TEST_F(MlirScatterFusionTest, Scatter_UniqueIndices) {
auto kHloString = R"(
HloModule module
add {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %sum = f32[] add(%p0, %p1)
}
scatter {
%operand = f32[10,5] parameter(0)
%indices = s32[8,1] parameter(1)
%update = f32[8,1,2] parameter(2)
ROOT %scatter = f32[10,5] scatter(
f32[10,5] %operand,
s32[8,1] %indices,
f32[8,1,2] %update
),
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
unique_indices=true,
to_apply=add
}
ENTRY entry {
%c1 = f32[] constant(1)
%c1_tensor = f32[10,5] broadcast(%c1), dimensions={}
%indices = s32[8,1] constant({{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}})
%update = f32[8, 1, 2] parameter(0)
ROOT %fusion = f32[10, 5] fusion(
%c1_tensor, %indices, %update), kind=kLoop, calls=scatter
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirScatterFusionTest, Scatter_Unsigned) {
auto kHloString = R"(
HloModule module
add {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %sum = f32[] add(%p0, %p1)
}
scatter {
%operand = f32[10,5] parameter(0)
%indices = u32[24,1] parameter(1)
%update = f32[24,2,3] parameter(2)
ROOT %scatter = f32[10,5] scatter(%operand, %indices, %update),
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
to_apply=add
}
ENTRY entry {
%c1 = f32[] constant(1)
%c1_tensor = f32[10,5] broadcast(%c1), dimensions={}
%indices = u32[24,1] parameter(0)
%update = f32[24, 2, 3] parameter(1)
ROOT %fusion = f32[10, 5] fusion(%c1_tensor, %indices, %update),
kind=kLoop, calls=scatter
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirScatterFusionTest, Scatter_Add) {
auto kHloString = R"(
HloModule module
add {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %sum = f32[] add(%p0, %p1)
}
scatter {
%operand = f32[10,5] parameter(0)
%indices = s32[24,1] parameter(1)
%update = f32[24,2,3] parameter(2)
ROOT %scatter = f32[10,5] scatter(
f32[10,5] %operand,
s32[24,1] %indices,
f32[24,2,3] %update
),
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
unique_indices=false,
to_apply=add
}
ENTRY entry {
%c1 = f32[] constant(1)
%c1_tensor = f32[10,5] broadcast(%c1), dimensions={}
%indices = s32[24,1] parameter(0)
%update = f32[24, 2, 3] parameter(1)
ROOT %fusion = f32[10, 5] fusion(
%c1_tensor, %indices, %update), kind=kLoop, calls=scatter
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirScatterFusionTest, Scatter_Overwrite) {
auto kHloString = R"(
HloModule module
overwrite {
%p0 = f32[] parameter(0)
ROOT %p1 = f32[] parameter(1)
}
scatter {
%operand = f32[10,5] parameter(0)
%indices = s32[3,1] parameter(1)
%update = f32[3,2,3] parameter(2)
ROOT %scatter = f32[10,5] scatter(
f32[10,5] %operand,
s32[3,1] %indices,
f32[3,2,3] %update
),
update_window_dims={1,2},
inserted_window_dims={},
scatter_dims_to_operand_dims={0},
index_vector_dim=1,
unique_indices=false,
to_apply=overwrite
}
ENTRY entry {
%c1 = f32[] constant(1)
%c1_tensor = f32[10,5] broadcast(%c1), dimensions={}
%indices = s32[3,1] constant({ {0}, {3}, {6}})
%update = f32[3, 2, 3] parameter(0)
ROOT %fusion = f32[10, 5] fusion(
%c1_tensor, %indices, %update), kind=kLoop, calls=scatter
}
)";
TF_ASSERT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
}
}
} | 2,127 |
#ifndef XLA_SERVICE_GPU_FUSIONS_COPY_H_
#define XLA_SERVICE_GPU_FUSIONS_COPY_H_
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
namespace xla {
namespace gpu {
class MemcpyFusion : public FusionInterface {
public:
MemcpyFusion(const HloFusionAnalysis& analysis,
const BufferAssignment* buffer_assignment)
: analysis_(analysis), buffer_assignment_(buffer_assignment) {}
absl::StatusOr<FusionEmissionResult> Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const final;
private:
const HloFusionAnalysis& analysis_;
const BufferAssignment* buffer_assignment_;
};
}
}
#endif
#include "xla/service/gpu/fusions/copy.h"
#include <memory>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/runtime/copy_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<FusionEmissionResult> MemcpyFusion::Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const {
std::vector<BufferAllocation::Slice> src_buffers;
for (const HloInstructionAdaptor& root_adaptor : analysis_.fusion_roots()) {
const HloInstruction* root = &root_adaptor.instruction();
const HloInstruction* src_instr =
fusion.operand(root->operand(0)->parameter_number());
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
buffer_assignment_->GetUniqueSlice(src_instr, {}));
src_buffers.push_back(slice);
}
std::vector<BufferAllocation::Slice> dst_buffers;
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
fusion.shape(), [&](const Shape& subshape, const ShapeIndex& index) {
if (!subshape.IsArray()) {
return absl::OkStatus();
}
TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice,
buffer_assignment_->GetUniqueSlice(&fusion, index));
dst_buffers.push_back(slice);
return absl::OkStatus();
}));
FusionEmissionResult result;
for (int i = 0; i < src_buffers.size(); ++i) {
if (src_buffers[i] != dst_buffers[i]) {
result.thunks.emplace_back(std::make_unique<DeviceToDeviceCopyThunk>(
Thunk::ThunkInfo::WithProfileAnnotation(&fusion),
src_buffers[i],
dst_buffers[i],
src_buffers[i].size()));
}
}
return result;
}
}
} | #include <memory>
#include <utility>
#include "xla/array2d.h"
#include "xla/client/xla_builder.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/tests/client_library_test_base.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_macros.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
class CopyOpTest : public HloTestBase {
protected:
void TestCopyOp(const Literal& literal) {
auto builder = HloComputation::Builder(TestName());
auto constant =
builder.AddInstruction(HloInstruction::CreateConstant(literal.Clone()));
builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kCopy, constant));
auto computation = builder.Build();
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(std::move(computation));
Literal result = ExecuteAndTransfer(std::move(module), {});
EXPECT_TRUE(LiteralTestUtil::Equal(literal, result));
}
void TestCopyConstantLayout021(size_t n1, size_t n2, size_t n3);
void TestCopyConstantLayoutR4(size_t n1, size_t n2, size_t n3, size_t n4,
absl::Span<const int64_t> permutation);
};
XLA_TEST_F(CopyOpTest, CopyR0Bool) {
TestCopyOp(LiteralUtil::CreateR0<bool>(true));
}
XLA_TEST_F(CopyOpTest, CopyR1S0U32) {
TestCopyOp(LiteralUtil::CreateR1<uint32_t>({}));
}
XLA_TEST_F(CopyOpTest, CopyR1S3U32) {
TestCopyOp(LiteralUtil::CreateR1<uint32_t>({1, 2, 3}));
}
XLA_TEST_F(CopyOpTest, CopyR3F32_2x2x3) {
TestCopyOp(LiteralUtil::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}},
{{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}}));
}
XLA_TEST_F(CopyOpTest, CopyR4S32_2x2x3x2) {
TestCopyOp(LiteralUtil::CreateR4(
{{{{1, -2}, {-4, 5}, {6, 7}}, {{8, 9}, {10, 11}, {12, 13}}},
{{{10, 3}, {7, -2}, {3, 6}}, {{2, 5}, {-11, 5}, {-2, -5}}}}));
}
XLA_TEST_F(CopyOpTest, CopyR4S32_0x2x3x2) {
TestCopyOp(LiteralUtil::CreateR4FromArray4D(Array4D<int32_t>(0, 2, 3, 2)));
}
XLA_TEST_F(CopyOpTest, CopyParameterScalar) {
auto builder = HloComputation::Builder(TestName());
auto literal = LiteralUtil::CreateR0<float>(42.0);
Shape shape = literal.shape();
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kCopy, param0));
auto computation = builder.Build();
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(std::move(computation));
Literal result = ExecuteAndTransfer(std::move(module), {&literal});
LiteralTestUtil::ExpectR0Near<float>(42.0f, result, error_spec_);
}
XLA_TEST_F(CopyOpTest, CopyConstantR2Twice) {
auto builder = HloComputation::Builder(TestName());
auto literal = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
auto copy = builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kCopy, constant));
builder.AddInstruction(
HloInstruction::CreateUnary(copy->shape(), HloOpcode::kCopy, copy));
auto computation = builder.Build();
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(std::move(computation));
Literal result = ExecuteAndTransfer(std::move(module), {});
LiteralTestUtil::ExpectR2Near<float>({{1.0, 2.0}, {3.0, 4.0}}, result,
error_spec_);
}
XLA_TEST_F(CopyOpTest, CopyConstantR2DifferentLayouts) {
HloComputation::Builder builder(TestName());
Literal literal = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}});
Layout* literal_layout = literal.mutable_shape_do_not_use()->mutable_layout();
ASSERT_EQ(2, literal_layout->minor_to_major_size());
*literal_layout->mutable_minor_to_major() = {
literal_layout->minor_to_major(1), literal_layout->minor_to_major(0)};
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kCopy, constant));
std::unique_ptr<HloComputation> computation = builder.Build();
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(std::move(computation));
Literal result = ExecuteAndTransfer(std::move(module), {});
LiteralTestUtil::ExpectR2Near<float>({{1.0, 3.0}, {2.0, 4.0}}, result,
error_spec_);
}
void CopyOpTest::TestCopyConstantLayout021(size_t n1, size_t n2, size_t n3) {
Array3D<int32_t> a(n1, n2, n3);
for (size_t i = 0; i < n1; ++i) {
for (size_t j = 0; j < n2; ++j) {
for (size_t k = 0; k < n3; ++k) {
a(i, j, k) = i * n3 * n2 + j * n3 + k;
}
}
}
HloComputation::Builder builder(TestName());
Literal literal = LiteralUtil::CreateR3FromArray3D(a);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kCopy, constant));
std::unique_ptr<HloComputation> computation = builder.Build();
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(std::move(computation));
ForceResultLayout(module.get(), LayoutUtil::MakeLayout({1, 2, 0}));
Literal result = ExecuteAndTransfer(std::move(module), {});
LiteralTestUtil::ExpectR3EqualArray3D(a, result);
}
void CopyOpTest::TestCopyConstantLayoutR4(
size_t n1, size_t n2, size_t n3, size_t n4,
absl::Span<const int64_t> permutation) {
Array4D<int32_t> a(n1, n2, n3, n4);
for (size_t i = 0; i < n1; ++i) {
for (size_t j = 0; j < n2; ++j) {
for (size_t k = 0; k < n3; ++k) {
for (size_t l = 0; l < n4; ++l) {
a(i, j, k, l) = i * n4 * n3 * n2 + j * n4 * n3 + k * n4 + l;
}
}
}
}
HloComputation::Builder builder(TestName());
Literal literal = LiteralUtil::CreateR4FromArray4D(a);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(std::move(literal)));
builder.AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kCopy, constant));
std::unique_ptr<HloComputation> computation = builder.Build();
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(std::move(computation));
ForceResultLayout(module.get(), LayoutUtil::MakeLayout(permutation));
Literal result = ExecuteAndTransfer(std::move(module), {});
LiteralTestUtil::ExpectR4EqualArray4D(a, result);
}
XLA_TEST_F(CopyOpTest, CopyConstantR3Layout021_SingleIncompleteTilePerLayer) {
TestCopyConstantLayout021(2, 2, 3);
}
XLA_TEST_F(CopyOpTest, CopyConstantR3Layout021_SingleCompleteTilePerLayer) {
TestCopyConstantLayout021(2, 32, 32);
}
XLA_TEST_F(CopyOpTest, CopyConstantR3Layout021_MultipleTilesPerLayer) {
TestCopyConstantLayout021(2, 70, 35);
}
XLA_TEST_F(CopyOpTest, CopyConstantR4Layout0231_MultipleTilesPerLayer) {
TestCopyConstantLayoutR4(2, 70, 7, 5, {0, 2, 3, 1});
}
XLA_TEST_F(CopyOpTest, CopyConstantR4Layout0312_MultipleTilesPerLayer) {
TestCopyConstantLayoutR4(2, 14, 5, 35, {0, 3, 1, 2});
}
using CopyOpClientTest = ClientLibraryTestBase;
XLA_TEST_F(CopyOpClientTest, Copy0x0) {
Shape in_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {0, 0}, {0, 1});
Shape out_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {0, 0}, {1, 0});
auto empty = Literal::CreateFromShape(in_shape);
XlaBuilder builder(TestName());
Parameter(&builder, 0, in_shape, "input");
auto input_data = client_->TransferToServer(empty).value();
auto actual =
ExecuteAndTransfer(&builder, {input_data.get()}, &out_shape).value();
EXPECT_TRUE(LiteralTestUtil::Equal(empty, actual));
}
}
} | 2,128 |
#ifndef XLA_SERVICE_GPU_FUSIONS_INPUT_SLICES_MLIR_H_
#define XLA_SERVICE_GPU_FUSIONS_INPUT_SLICES_MLIR_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
class MlirInputSlicesFusion : public MlirFusionEmitterBase {
public:
explicit MlirInputSlicesFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis),
unroll_factor_(CeilOfRatio(
8, analysis.input_output_info().smallest_output_dtype_bits)) {}
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t output_id, mlir::MLIRContext* ctx) const override;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override {
return std::nullopt;
}
protected:
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const override;
std::vector<mlir_converter::EpilogueSpecification> GetEpilogues(
const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const override;
private:
const HloFusionAnalysis& analysis_;
const int unroll_factor_;
};
}
}
#endif
#include "xla/service/gpu/fusions/input_slices_mlir.h"
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
using llvm::SmallVector;
using mlir::Value;
using mlir::ValueRange;
std::optional<IndexingMap>
MlirInputSlicesFusion::ComputeThreadIdToOutputIndexing(
int64_t output_id, mlir::MLIRContext* ctx) const {
auto launch_dims = launch_dimensions();
auto* slice = &analysis_.fusion_root(output_id).instruction();
const auto& shape = slice->operand(0)->shape();
return GetDefaultThreadIdIndexingMap(launch_dims, unroll_factor_, shape,
ctx) *
*ComputeInputToOutputIndexing(slice, 0, ctx)
.indexing_maps.front()
.begin();
}
std::vector<mlir_converter::EpilogueSpecification>
MlirInputSlicesFusion::GetEpilogues(const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const {
std::vector<const HloInstruction*> roots;
roots.reserve(analysis_.fusion_root_count());
for (const auto& root : analysis_.fusion_roots()) {
roots.push_back(&root.instruction());
}
return {mlir_converter::EpilogueSpecification::FromOutputIndexing(
analysis_, roots, roots, *this, mlir_context)};
}
LaunchDimensions MlirInputSlicesFusion::launch_dimensions() const {
const auto& root = analysis_.fusion_root(0).instruction();
const auto& shape = root.operand(0)->shape();
return CalculateLaunchDimensions(shape, analysis_.device_info(),
{unroll_factor_});
}
absl::Status MlirInputSlicesFusion::EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
mlir::ImplicitLocOpBuilder builder(entry_function.getLoc(), entry_function);
builder.setInsertionPointToStart(entry_function.addEntryBlock());
auto launch_dims = launch_dimensions();
const auto& shape =
analysis_.fusion_root(0).instruction().operand(0)->shape();
auto input_indexing = GetDefaultThreadIdIndexingMap(
launch_dims, unroll_factor_, shape, builder.getContext());
int num_inputs = fusion.fused_instructions_computation()->num_parameters();
auto output_tensor_args =
entry_function.getArguments().drop_front(num_inputs);
auto result_tensors = EmitThreadLoopNest(
builder, output_tensor_args, input_indexing,
[&](ValueRange output_tensors, ValueRange dim_values,
ValueRange symbol_values) -> SmallVector<Value> {
auto input_indices = mlir_converter::ApplyIndexing(
input_indexing, dim_values, symbol_values, builder);
SmallVector<Value> input_operands(
entry_function.getArguments().take_front(num_inputs));
absl::c_copy(input_indices, std::back_inserter(input_operands));
SmallVector<Value> result_tensors;
result_tensors.reserve(output_tensor_args.size());
absl::flat_hash_map<const HloInstruction*, mlir::Value> input_values;
for (const HloInstructionAdaptor& root : analysis_.fusion_roots()) {
const auto* arg = root.instruction().operand(0);
if (auto& value = input_values[arg]; !value) {
value =
builder.create<PureCallOp>(call_targets(arg), input_operands)
.getResult(0);
}
}
for (auto [output_index, output] : llvm::enumerate(output_tensors)) {
auto output_indexing = ComputeThreadIdToOutputIndexing(
output_index, entry_function.getContext());
mlir::Value in_bounds = mlir_converter::CheckConstraints(
*output_indexing, dim_values, symbol_values, builder);
auto if_op = builder.create<mlir::scf::IfOp>(
in_bounds,
[&, output_index = output_index, output = output](
mlir::OpBuilder b, mlir::Location loc) {
mlir::ImplicitLocOpBuilder then_builder(loc, b);
auto output_indices = mlir_converter::ApplyIndexing(
*output_indexing, dim_values, symbol_values, then_builder);
const auto* arg = analysis_.fusion_root(output_index)
.instruction()
.operand(0);
auto inserted = then_builder.create<mlir::tensor::InsertOp>(
input_values[arg], output, output_indices);
then_builder.create<mlir::scf::YieldOp>(inserted.getResult());
},
[&, output = output](mlir::OpBuilder else_builder,
mlir::Location loc) {
else_builder.create<mlir::scf::YieldOp>(loc, output);
});
result_tensors.push_back(if_op.getResult(0));
}
return result_tensors;
});
builder.create<mlir::func::ReturnOp>(result_tensors);
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/fusions/input_slices_mlir.h"
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/service/gpu/fusions/mlir_emitter_test_base.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
namespace xla {
namespace gpu {
namespace {
using MlirInputSlicesFusionTest = MlirEmitterTestBase<MlirInputSlicesFusion>;
TEST_F(MlirInputSlicesFusionTest, ThreadIndexing) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
%input = f32[4,5] parameter(0)
slice0 = f32[3,3] slice(input), slice={[1:4],[0:3]}
slice1 = f32[2,3] slice(input), slice={[0:2],[0:3]}
ROOT tuple = (f32[3,3], f32[2,3]) tuple(slice0, slice1)
}
ENTRY entry {
%input = f32[4,5] parameter(0)
ROOT %fusion = (f32[3,3], f32[2,3]) fusion(%input), kind=kLoop, calls=fused_computation
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
auto emitter = GetEmitter(analysis);
auto thread_id_to_output_indexing_0 =
emitter->ComputeThreadIdToOutputIndexing(0, &mlir_context_);
thread_id_to_output_indexing_0->Simplify();
EXPECT_THAT(thread_id_to_output_indexing_0->ToString(thread_id_printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[s0, s1] -> (
th_x floordiv 5 - 1,
th_x mod 5
)
domain:
th_x in [5, 20)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1)
bl_y in [0, 1)
bl_z in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
th_x mod 5 in [0, 3)
)"));
auto thread_id_to_output_indexing_1 =
emitter->ComputeThreadIdToOutputIndexing(1, &mlir_context_);
thread_id_to_output_indexing_1->Simplify();
EXPECT_THAT(thread_id_to_output_indexing_1->ToString(thread_id_printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[s0, s1] -> (
th_x floordiv 5,
th_x mod 5
)
domain:
th_x in [0, 10)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1)
bl_y in [0, 1)
bl_z in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
th_x mod 5 in [0, 3)
)"));
}
TEST_F(MlirInputSlicesFusionTest, SimpleInputSlices) {
auto kHloString = R"(
HloModule module
fused_computation {
%input = f32[2,4,5,7]{2,1,0,3} parameter(0)
slice0 = f32[1,3,3,5]{2,1,0,3} slice(input), slice={[0:1],[1:4],[0:3],[2:7]}
slice1 = f32[1,2,3,5]{2,1,0,3} slice(input), slice={[0:1],[0:2],[0:3],[2:7]}
ROOT tuple = (f32[1,3,3,5]{2,1,0,3}, f32[1,2,3,5]{2,1,0,3}) tuple(slice0, slice1)
}
ENTRY entry {
%input = f32[2,4,5,7]{2,1,0,3} parameter(0)
ROOT %fusion = (f32[1,3,3,5]{2,1,0,3}, f32[1,2,3,5]{2,1,0,3}) fusion(%input), kind=kLoop, calls=fused_computation
}
)";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirInputSlicesFusionTest, SliceOfPad) {
auto kHloString = R"(
fusion {
%p0 = f32[6] parameter(0)
%c0 = f32[] constant(0)
%pad0 = f32[12] pad(%p0, %c0), padding=0_1_1
%slice0 = f32[11] slice(%pad0), slice={[1:12]}
%pad1 = f32[12] pad(%p0, %c0), padding=1_0_1
%slice1 = f32[11] slice(%pad1), slice={[1:12]}
ROOT %tuple.9 = (f32[11], f32[11]) tuple(%slice0, %slice1)
}
ENTRY entry {
input = f32[6] parameter(0)
ROOT fusion = (f32[11], f32[11]) fusion(input), kind=kLoop, calls=fusion
})";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirInputSlicesFusionTest, ZeroSlice) {
auto kHloString = R"(
fusion {
%p0 = s32[0] parameter(0)
%p1 = s32[2] parameter(1)
%concatenate = s32[2] concatenate(p0, p1), dimensions={0}
%slice = s32[0] slice(%concatenate), slice={[0:0]}
%slice.1 = s32[2] slice(%concatenate), slice={[0:2]}
ROOT %tuple = (s32[0], s32[2]) tuple(%slice, %slice.1)
}
ENTRY entry {
%p0 = s32[0] parameter(0)
%p1 = s32[2] parameter(1)
ROOT fusion = (s32[0], s32[2]) fusion(%p0, %p1), kind=kLoop, calls=fusion
})";
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
}
}
} | 2,129 |
#ifndef XLA_SERVICE_GPU_FUSIONS_CUDNN_H_
#define XLA_SERVICE_GPU_FUSIONS_CUDNN_H_
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
namespace xla {
namespace gpu {
class CuDnnFusion : public FusionInterface {
public:
explicit CuDnnFusion(const HloFusionAnalysis&) {}
absl::StatusOr<FusionEmissionResult> Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const final;
};
}
}
#endif
#include "xla/service/gpu/fusions/cudnn.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "tsl/platform/statusor.h"
#if GOOGLE_CUDA
#include "xla/service/gpu/runtime/cudnn_thunk.h"
#endif
namespace xla {
namespace gpu {
absl::StatusOr<FusionEmissionResult> CuDnnFusion::Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const {
#if GOOGLE_CUDA
VLOG(3) << fusion.ToString();
TF_ASSIGN_OR_RETURN(
auto kernel_arguments,
KernelArguments::Create(ir_emitter_context.buffer_assignment(), &fusion));
FusionEmissionResult result;
result.thunks.emplace_back(std::make_unique<CuDnnThunk>(
GetComputationFingerprint(fusion.fused_instructions_computation(), {}),
Thunk::ThunkInfo::WithProfileAnnotation(&fusion),
kernel_arguments.args()));
return result;
#else
return absl::UnimplementedError("cuDNN support requires CUDA");
#endif
}
}
} | #include <array>
#include <memory>
#include <string>
#include <tuple>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_replace.h"
#include "absl/strings/substitute.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/primitive_util.h"
#include "xla/service/dump.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/cudnn_fusion_compiler.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/stream_executor_util.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/stream_executor/dnn.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/path.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class CuDnnFusionTest : public GpuCodegenTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_autotune_level(0);
debug_options.set_xla_gpu_cudnn_gemm_fusion_level(1);
return debug_options;
}
bool IsAtLeastHopperWithCuDnn9() {
se::StreamExecutor* executor = backend().default_stream_executor();
return executor->GetDeviceDescription()
.cuda_compute_capability()
.IsAtLeastHopper() &&
GetDnnVersionInfoOrDefault(executor).major_version() >= 9;
}
bool IsAtLeastCuDnn91() {
se::StreamExecutor* executor = backend().default_stream_executor();
const se::dnn::VersionInfo version = GetDnnVersionInfoOrDefault(executor);
return (version.major_version() == 9 && version.minor_version() >= 1) ||
version.major_version() > 9;
}
protected:
void SetUp() override {
if (!IsAtLeastHopperWithCuDnn9()) {
GTEST_SKIP()
<< "cuDNN GEMM fusion is not enabled before Hopper / cuDNN 9.";
}
}
};
TEST_F(CuDnnFusionTest, DumpingWorks) {
HloModuleConfig config;
DebugOptions options = GetDebugOptionsForTest();
std::string output_directory;
if (!tsl::io::GetTestUndeclaredOutputsDir(&output_directory)) {
output_directory = tsl::testing::TmpDir();
}
options.set_xla_dump_to(output_directory);
config.set_debug_options(options);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fd0 {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
ROOT d = f32[64,64] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[64,64] parameter(0)
p1 = f32[64,64] parameter(1)
ROOT d0 = f32[64,64] fusion(p0, p1), kind=kCustom, calls=fd0,
backend_config={"fusion_backend_config":{"kind":"__cudnn$fusion","cudnn_fusion_config":{"plan_id":"0"}}}
})",
config));
Thunk::BinaryMap dnn_compiled_graphs;
CuDnnFusionCompiler cudnn_compiler(*backend().default_stream_executor(),
dnn_compiled_graphs);
TF_ASSERT_OK_AND_ASSIGN(bool changed, cudnn_compiler.Run(module.get()));
EXPECT_TRUE(changed);
std::string dump;
TF_EXPECT_OK(tsl::ReadFileToString(
tsl::Env::Default(),
tsl::io::JoinPath(output_directory,
FilenameFor(*module, "",
"cudnn_fusion_d0.json")),
&dump));
EXPECT_TRUE(*RunFileCheck(dump, R"(
CHECK: "nodes": [
CHECK: "inputs": {
CHECK: "A": "p0",
CHECK: "B": "p1"
CHECK: },
CHECK: "outputs": {
CHECK: "C": "d"
CHECK: },
CHECK: "tag": "MATMUL"
CHECK: }
CHECK: ],
CHECK: "tensors": {
CHECK: "d": {
CHECK: "data_type": "FLOAT",
CHECK: "dim": [1,64,64],
CHECK: "stride": [1,64,1],
CHECK: "uid": 3,
CHECK: "uid_assigned": true
CHECK: },
CHECK: "p0": {
CHECK: "data_type": "FLOAT",
CHECK: "dim": [1,64,64],
CHECK: "stride": [1,64,1],
CHECK: "uid": 1,
CHECK: "uid_assigned": true
CHECK: },
CHECK: "p1": {
CHECK: "data_type": "FLOAT",
CHECK: "dim": [1,64,64],
CHECK: "stride": [1,64,1],
CHECK: "uid": 2,
CHECK: "uid_assigned": true
CHECK: }
)"));
}
using CuDnnFusionExecutionTest = CuDnnFusionTest;
namespace m = ::xla::match;
TEST_F(CuDnnFusionExecutionTest, WorkspaceAllocationWorks) {
if (!IsAtLeastCuDnn91()) {
GTEST_SKIP() << "This test case requests a workspace only with cuDNN 9.1+.";
}
const std::string kHloText = R"(
fusion1 {
p0 = f32[32,96] parameter(0)
p1 = f32[96,64] parameter(1)
ROOT r = f32[32,64] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,96] parameter(0)
p1 = f32[96,64] parameter(1)
ROOT _ = f32[32,64] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloText));
Thunk::BinaryMap dnn_compiled_graphs;
CuDnnFusionCompiler cudnn_compiler(*backend().default_stream_executor(),
dnn_compiled_graphs);
TF_ASSERT_OK_AND_ASSIGN(bool changed, cudnn_compiler.Run(module.get()));
EXPECT_TRUE(changed);
EXPECT_THAT(module->entry_computation()->root_instruction(),
GmockMatch(m::GetTupleElement(m::Fusion())));
EXPECT_THAT(module->entry_computation()
->root_instruction()
->operand(0)
->fused_instructions_computation()
->root_instruction(),
GmockMatch(m::Tuple(m::Dot(), m::CustomCall())));
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionExecutionTest,
NoTritonConfigIsAssignedAtZeroAutotuningLevel) {
EXPECT_EQ(GetDebugOptionsForTest().xla_gpu_autotune_level(), 0);
MatchOptimizedHlo(R"(
fusion1 {
p0 = f32[32,96] parameter(0)
p1 = f32[96,64] parameter(1)
ROOT r = f32[32,64] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,96] parameter(0)
p1 = f32[96,64] parameter(1)
ROOT _ = f32[32,64] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
R"(
CHECK-NOT: triton_gemm_config
)");
}
TEST_F(CuDnnFusionExecutionTest, DotF32ExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = f32[32,96] parameter(0)
p1 = f32[96,64] parameter(1)
ROOT r = f32[32,64] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,96] parameter(0)
p1 = f32[96,64] parameter(1)
ROOT _ = f32[32,64] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionExecutionTest, DotBF16WithCopyExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = bf16[96,512,64]{1,2,0} parameter(0)
cp = bf16[96,512,64]{2,1,0} copy(p0)
p1 = bf16[96,64,512]{2,1,0} parameter(1)
ROOT d = bf16[96,512,512]{2,1,0} dot(cp, p1),
lhs_batch_dims={0}, lhs_contracting_dims={2},
rhs_batch_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = bf16[96,512,64]{1,2,0} parameter(0)
p1 = bf16[96,64,512]{2,1,0} parameter(1)
ROOT r = bf16[96,512,512]{2,1,0} fusion(p0, p1), kind=kCustom,
calls=fusion1,
backend_config={"fusion_backend_config": {kind :"__cudnn$fusion"}}
})",
ErrorSpec{1e-2, 1e-3}));
}
TEST_F(CuDnnFusionExecutionTest, DotBF16BF16F32ExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = bf16[16,32,128] parameter(0)
p1 = bf16[16,128,64] parameter(1)
ROOT r = f32[16,32,64] dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = bf16[16,32,128] parameter(0)
p1 = bf16[16,128,64] parameter(1)
ROOT _ = f32[16,32,64] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-6, 1e-6}));
}
TEST_F(CuDnnFusionExecutionTest, DotF32WithOutputSubtractionExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = f32[9,32,96] parameter(0)
p1 = f32[9,96,64] parameter(1)
d = f32[9,32,64] dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
p2 = f32[9,32,64] parameter(2)
ROOT s = f32[9,32,64] subtract(p2, d)
}
ENTRY e {
p0 = f32[9,32,96] parameter(0)
p1 = f32[9,96,64] parameter(1)
p2 = f32[9,32,64] parameter(2)
ROOT _ = f32[9,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionExecutionTest, DotWithNonDefaultLayoutsExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = bf16[32,32]{0,1} parameter(0)
p1 = bf16[32,32]{1,0} parameter(1)
ROOT r = bf16[32,32]{0,1} dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = bf16[32,32]{0,1} parameter(0)
p1 = bf16[32,32]{1,0} parameter(1)
ROOT _ = bf16[32,32]{0,1} fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-4, 1e-4}));
}
TEST_F(CuDnnFusionExecutionTest, RHSFusionExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = bf16[5,32,96] parameter(0)
p1 = s8[5,96,16] parameter(1)
p1c = bf16[5,96,16] convert(p1)
ROOT r = bf16[5,32,16] dot(p0, p1c),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = bf16[5,32,96] parameter(0)
p1 = s8[5,96,16] parameter(1)
ROOT _ = bf16[5,32,16] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionExecutionTest, SkipNonDefaultPrecision) {
EXPECT_FALSE(Run(R"(
t {
p0 = f32[27,23] parameter(0)
p0c = s8[27,23] convert(p0)
p0cc = f32[27,23] convert(p0c)
p1 = f32[23,21] parameter(1)
ROOT r = f32[27,21] dot(p0cc, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0},
operand_precision={HIGH, HIGH}
}
ENTRY e {
p0 = f32[27,23] parameter(0)
p1 = f32[23,21] parameter(1)
ROOT r = f32[27,21] fusion(p0, p1), kind=kCustom, calls=t,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})"));
}
TEST_F(CuDnnFusionExecutionTest,
DotF16NegateNonDefaultDimensionsExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = f16[16,32,96] parameter(0)
p0n = f16[16,32,96] negate(p0)
p1 = f16[16,64,96] parameter(1)
ROOT r = f16[16,32,64] dot(p0n, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={2}
}
ENTRY e {
p0 = f16[16,32,96] parameter(0)
p1 = f16[16,64,96] parameter(1)
ROOT _ = f16[16,32,64] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionExecutionTest, DotS8BF16ExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = s8[5,32,96] parameter(0)
p0c = bf16[5,32,96] convert(p0)
p1 = bf16[5,96,16] parameter(1)
ROOT r = bf16[5,32,16] dot(p0c, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = s8[5,32,96] parameter(0)
p1 = bf16[5,96,16] parameter(1)
ROOT _ = bf16[5,32,16] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-5, 1e-5}));
}
TEST_F(CuDnnFusionExecutionTest, IntegerMathExecutesCorrectly) {
if (!IsAtLeastCuDnn91()) {
GTEST_SKIP() << "Integer math requires cuDNN 9.1+.";
}
const std::string kHloText =
R"(
fusion1 {
p0 = s8[16,16] parameter(0)
p1 = s8[16,16] parameter(1)
d = s32[16,16] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
p2 = s32[16,16] parameter(2)
ROOT a = s32[16,16] add(d, p2)
}
ENTRY e {
p0 = s8[16,16] parameter(0)
p1 = s8[16,16] parameter(1)
p2 = s32[16,16] parameter(2)
ROOT r = s32[16,16] fusion(p0, p1, p2), kind=kCustom,
calls=fusion1,
backend_config={"fusion_backend_config": {"kind":"__cudnn$fusion"}}
})";
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{0, 0}));
}
class CuDnnFusionCommandBufferTest : public CuDnnFusionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = CuDnnFusionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_graph_min_graph_size(1);
return debug_options;
}
};
TEST_F(CuDnnFusionCommandBufferTest, CommandBuffersAreSupported) {
const std::string kHloText = R"(
fd0 {
p0 = f32[64,64]{1,0} parameter(0)
p1 = f32[64,64]{1,0} parameter(1)
ROOT d = f32[64,64]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
fd1 {
p0 = f32[64,64]{1,0} parameter(0)
p1 = f32[64,64]{1,0} parameter(1)
ROOT d = f32[64,64]{1,0} dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f32[64,64]{1,0} parameter(0)
p1 = f32[64,64]{1,0} parameter(1)
d0 = f32[64,64]{1,0} fusion(p0, p1), kind=kCustom, calls=fd0,
backend_config={"fusion_backend_config":{"kind":"__cudnn$fusion","cudnn_fusion_config":{"plan_id":"0"}}}
a = f32[64,64]{1,0} add(d0, d0)
ROOT d1 = f32[64,64]{1,0} fusion(a, d0), kind=kCustom, calls=fd1,
backend_config={"fusion_backend_config":{"kind":"__cudnn$fusion","cudnn_fusion_config":{"plan_id":"0"}}}
})";
se::StreamExecutorMemoryAllocator allocator(
backend().default_stream_executor());
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Executable> executable,
backend().compiler()->RunBackend(
GetOptimizedModule(kHloText).value(),
backend().default_stream_executor(), &allocator));
absl::StatusOr<bool> filecheck_result =
RunFileCheck(executable->module().ToString(), R"(
; CHECK: ENTRY
; CHECK-NEXT: parameter
; CHECK-NEXT: parameter
; CHECK: command_buffer
; CHECK-NOT: fusion
)");
TF_ASSERT_OK(filecheck_result.status());
EXPECT_TRUE(filecheck_result.value());
EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3}));
}
class CuDnnFusionLevel2Test : public CuDnnFusionExecutionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
CuDnnFusionExecutionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_cudnn_gemm_fusion_level(2);
return debug_options;
}
};
TEST_F(CuDnnFusionLevel2Test, BroadcastToDim2ExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = f16[16,32,128] parameter(0)
p1 = f16[16,128,64] parameter(1)
p2 = f16[16,32] parameter(2)
p2b = f16[16,32,128] broadcast(p2), dimensions={0,1}
a = f16[16,32,128] add(p0, p2b)
ROOT r = f16[16,32,64] dot(a, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f16[16,32,128] parameter(0)
p1 = f16[16,128,64] parameter(1)
p2 = f16[16,32] parameter(2)
ROOT _ = f16[16,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionLevel2Test, BroadcastToDim1ExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = f16[16,32,128] parameter(0)
p1 = f16[16,128,64] parameter(1)
p2 = f16[16,128] parameter(2)
p2b = f16[16,32,128] broadcast(p2), dimensions={0,2}
a = f16[16,32,128] add(p0, p2b)
ROOT r = f16[16,32,64] dot(a, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f16[16,32,128] parameter(0)
p1 = f16[16,128,64] parameter(1)
p2 = f16[16,128] parameter(2)
ROOT _ = f16[16,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionLevel2Test, BroadcastToDim0ExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = bf16[32,128] parameter(0)
p0b = bf16[5,32,128] broadcast(p0), dimensions={1,2}
p1 = bf16[5,128,64] parameter(1)
ROOT r = f32[5,32,64] dot(p0b, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = bf16[32,128] parameter(0)
p1 = bf16[5,128,64] parameter(1)
ROOT _ = f32[5,32,64] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionLevel2Test, BroadcastTo2DimsExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = f16[16,32,128] parameter(0)
p1 = f16[16,128,64] parameter(1)
p2 = f16[128] parameter(2)
p2b = f16[16,32,128] broadcast(p2), dimensions={2}
a = f16[16,32,128] add(p0, p2b)
ROOT r = f16[16,32,64] dot(a, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f16[16,32,128] parameter(0)
p1 = f16[16,128,64] parameter(1)
p2 = f16[128] parameter(2)
ROOT _ = f16[16,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionLevel2Test, BroadcastTo3DimsExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = f16[16,32,128] parameter(0)
p1 = f16[16,128,64] parameter(1)
p2 = f16[] parameter(2)
p2b = f16[16,32,128] broadcast(p2), dimensions={}
a = f16[16,32,128] add(p0, p2b)
ROOT r = f16[16,32,64] dot(a, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
ENTRY e {
p0 = f16[16,32,128] parameter(0)
p1 = f16[16,128,64] parameter(1)
p2 = f16[] parameter(2)
ROOT _ = f16[16,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionLevel2Test, ConstantExecutesCorrectly) {
if (!IsAtLeastCuDnn91()) {
GTEST_SKIP() << "Fused scalar constants require cuDNN 9.1+.";
}
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
x = bf16[16,32] parameter(0)
y = bf16[32,16] parameter(1)
x_const = bf16[] constant(-1)
y_const = s32[] constant(-2)
x_const_bcast = bf16[16,32] broadcast(x_const), dimensions={}
y_const_bcast = s32[32,16] broadcast(y_const), dimensions={}
y_const_convert = bf16[32,16] convert(y_const_bcast)
x_add = bf16[16,32] minimum(x, x_const_bcast)
y_add = bf16[32,16] minimum(y, y_const_convert)
dot_a = f32[16,16] dot(x_add, y_add), lhs_contracting_dims={1}, rhs_contracting_dims={0}
c = f32[] constant(0)
c_bcast = f32[16,16] broadcast(c), dimensions={}
ROOT out = f32[16,16] maximum(dot_a, c_bcast)
}
ENTRY e {
p0 = bf16[16,32] parameter(0)
p1 = bf16[32,16] parameter(1)
ROOT _ = f32[16,16] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionLevel2Test, ClampExecutesCorrectly) {
if (!IsAtLeastCuDnn91()) {
GTEST_SKIP() << "Clamp test requires cuDNN 9.1+.";
}
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
x = bf16[16,32] parameter(0)
y = bf16[32,16] parameter(1)
x_const_lower = bf16[] constant(3e-3)
x_const_upper = bf16[] constant(1e-1)
y_const_lower = bf16[] constant(3e-3)
y_const_upper = bf16[] constant(1e-1)
x_const_bcast_lower = bf16[16,32] broadcast(x_const_lower), dimensions={}
x_const_bcast_upper = bf16[16,32] broadcast(x_const_upper), dimensions={}
y_const_bcast_lower = bf16[32,16] broadcast(y_const_lower), dimensions={}
y_const_bcast_upper = bf16[32,16] broadcast(y_const_upper), dimensions={}
x_clamp = bf16[16,32] clamp(x_const_bcast_lower, x, x_const_bcast_upper)
y_clamp = bf16[32,16] clamp(y_const_bcast_lower, y, y_const_bcast_upper)
ROOT dot_a = f32[16,16] dot(x_clamp, y_clamp), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = bf16[16,32] parameter(0)
p1 = bf16[32,16] parameter(1)
ROOT _ = f32[16,16] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
TEST_F(CuDnnFusionLevel2Test, DotF8ExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
x = f8e4m3fn[16,32] parameter(0)
y = f8e4m3fn[32,16] parameter(1)
dot = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
combined_scale = f32[] multiply(x_scale, y_scale)
scale_bcast = f32[16,16] broadcast(combined_scale), dimensions={}
ROOT out = f32[16,16] multiply(dot, scale_bcast)
}
ENTRY e {
p0 = f8e4m3fn[16,32] parameter(0)
p1 = f8e4m3fn[32,16] parameter(1)
x_scale = f32[] parameter(2)
y_scale = f32[] parameter(3)
ROOT _ = f32[16,16] fusion(p0, p1, x_scale, y_scale), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1e-3, 1e-3}));
}
class CuDnnFusionLevel3Test : public CuDnnFusionExecutionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options =
CuDnnFusionExecutionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_cudnn_gemm_fusion_level(3);
return debug_options;
}
};
TEST_F(CuDnnFusionLevel3Test,
DotWithSplitNonContractingInputExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = s8[4,3,16,400]{2,1,3,0} parameter(0)
cp0 = s8[4,3,16,400]{3,2,1,0} copy(p0)
bc0 = s8[192,400]{1,0} bitcast(cp0)
cvt0 = bf16[192,400]{1,0} convert(bc0)
p1 = bf16[1,128,400]{2,1,0} parameter(1)
bc1 = bf16[128,400]{1,0} reshape(p1)
ROOT d = bf16[192,128]{1,0} dot(cvt0, bc1),
lhs_contracting_dims={1}, rhs_contracting_dims={1}
}
ENTRY r {
p0 = s8[4,3,16,400]{2,1,3,0} parameter(0)
p1 = bf16[1,128,400]{2,1,0} parameter(1)
ROOT r = bf16[192,128]{1,0} fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1, 1e-3}));
}
TEST_F(CuDnnFusionLevel3Test,
DotWithSplitNonContractingInOutExecutesCorrectly) {
EXPECT_TRUE(RunAndCompare(R"(
fusion1 {
p0 = s8[4,3,16,400]{2,1,3,0} parameter(0)
cp0 = s8[4,3,16,400]{3,2,1,0} copy(p0)
bc0 = s8[192,400]{1,0} bitcast(cp0)
cvt0 = bf16[192,400]{1,0} convert(bc0)
p1 = bf16[1,128,400]{2,1,0} parameter(1)
bc1 = bf16[128,400]{1,0} reshape(p1)
d = bf16[192,128]{1,0} dot(cvt0, bc1), lhs_contracting_dims={1}, rhs_contracting_dims={1}
bc = bf16[4,3,16,128]{3,2,1,0} bitcast(d)
ROOT cp = bf16[4,3,16,128]{2,1,3,0} copy(bc)
}
ENTRY r {
p0 = s8[4,3,16,400]{2,1,3,0} parameter(0)
p1 = bf16[1,128,400]{2,1,0} parameter(1)
ROOT r = bf16[4,3,16,128]{2,1,3,0} fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})",
ErrorSpec{1, 1e-3}));
}
class ElementwiseTest : public CuDnnFusionExecutionTest,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, HloOpcode, float>> {};
std::string ElementwiseTestParamsToString(
const ::testing::TestParamInfo<std::tuple<PrimitiveType, HloOpcode, float>>&
data) {
PrimitiveType data_type;
HloOpcode opcode;
float tolerance;
std::tie(data_type, opcode, tolerance) = data.param;
return absl::StrCat(
primitive_util::LowercasePrimitiveTypeName(data_type), "_",
absl::StrReplaceAll(HloOpcodeString(opcode), {{"-", "_"}}));
}
using UnaryElementwiseTest = ElementwiseTest;
TEST_P(UnaryElementwiseTest, ElementwiseFusionExecutesCorrectly) {
PrimitiveType data_type;
HloOpcode opcode;
float tolerance;
std::tie(data_type, opcode, tolerance) = GetParam();
const std::string kHloTemplate = R"(
fusion_computation {
p0 = f32[32,32] parameter(0)
p1 = $0[32,32] parameter(1)
f1.1 = $0[32,32] $1(p1)
c.1 = f32[32,32] convert(f1.1)
ROOT _ = f32[32,32] dot(p0, c.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p1 = $0[32,32] parameter(1)
p0 = f32[32,32] parameter(0)
ROOT r = f32[32,32] fusion(p0, p1), kind=kCustom,
calls=fusion_computation,
backend_config={"fusion_backend_config":{"kind":"__cudnn$$fusion"}}
})";
const std::string hlo_test = absl::Substitute(
kHloTemplate, primitive_util::LowercasePrimitiveTypeName(data_type),
HloOpcodeString(opcode));
EXPECT_TRUE(RunAndCompare(hlo_test,
ErrorSpec{tolerance, tolerance}));
}
INSTANTIATE_TEST_SUITE_P(
ElementwiseTestSuiteF32, UnaryElementwiseTest,
::testing::Combine(::testing::Values(F32),
::testing::ValuesIn(
{HloOpcode::kAbs, HloOpcode::kCeil, HloOpcode::kCos,
HloOpcode::kExp, HloOpcode::kFloor, HloOpcode::kLog,
HloOpcode::kNegate, HloOpcode::kRsqrt,
HloOpcode::kSin, HloOpcode::kSqrt, HloOpcode::kTan,
HloOpcode::kTanh}),
::testing::Values(5e-4)),
ElementwiseTestParamsToString);
using BinaryElementwiseTest = ElementwiseTest;
TEST_P(BinaryElementwiseTest, ElementwiseFusionExecutesCorrectly) {
PrimitiveType data_type;
HloOpcode opcode;
float tolerance;
std::tie(data_type, opcode, tolerance) = GetParam();
const std::string kHloTemplate = R"(
fusion_computation {
p0 = f32[32,32] parameter(0)
p1 = $0[32,32] parameter(1)
p2 = $0[32,32] parameter(2)
f1.1 = $0[32,32] $1(p1, p2)
c.1 = f32[32,32] convert(f1.1)
ROOT _ = f32[32,32] dot(p0, c.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,32] parameter(0)
p1 = $0[32,32] parameter(1)
p2 = $0[32,32] parameter(2)
ROOT r = f32[32,32] fusion(p0, p1, p2), kind=kCustom,
calls=fusion_computation,
backend_config={"fusion_backend_config":{"kind":"__cudnn$$fusion"}}
})";
const std::string hlo_test = absl::Substitute(
kHloTemplate, primitive_util::LowercasePrimitiveTypeName(data_type),
HloOpcodeString(opcode));
EXPECT_TRUE(RunAndCompare(hlo_test,
ErrorSpec{tolerance, tolerance}));
}
INSTANTIATE_TEST_SUITE_P(
ElementwiseTestSuiteF32, BinaryElementwiseTest,
::testing::Combine(
::testing::Values(F32),
::testing::ValuesIn({HloOpcode::kAdd, HloOpcode::kDivide,
HloOpcode::kMaximum, HloOpcode::kMinimum,
HloOpcode::kMultiply, HloOpcode::kPower,
HloOpcode::kSubtract}),
::testing::Values(3e-3)),
ElementwiseTestParamsToString);
class CompareTest : public CuDnnFusionExecutionTest,
public ::testing::WithParamInterface<
std::tuple<PrimitiveType, Comparison::Direction>> {};
std::string CompareTestParamsToString(
const ::testing::TestParamInfo<
std::tuple<PrimitiveType, Comparison::Direction>>& data) {
PrimitiveType data_type;
Comparison::Direction direction;
std::tie(data_type, direction) = data.param;
return absl::StrCat(primitive_util::LowercasePrimitiveTypeName(data_type),
"_", ComparisonDirectionToString(direction));
}
TEST_P(CompareTest, FusedComparisonExecutesCorrectly) {
PrimitiveType data_type;
Comparison::Direction direction;
std::tie(data_type, direction) = GetParam();
const std::string kHloTemplate = R"(
fusion_computation {
p0 = f32[32,32] parameter(0)
p1 = $0[32,32] parameter(1)
p2 = $0[32,32] parameter(2)
f1.1 = pred[32,32] compare(p1, p2), direction=$1
c.1 = f32[32,32] convert(f1.1)
ROOT _ = f32[32,32] dot(p0, c.1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,32] parameter(0)
p1 = $0[32,32] parameter(1)
p2 = $0[32,32] parameter(2)
ROOT r = f32[32,32] fusion(p0, p1, p2), kind=kCustom,
calls=fusion_computation,
backend_config={"fusion_backend_config":{"kind":"__cudnn$$fusion"}}
})";
const std::string hlo_test = absl::Substitute(
kHloTemplate, primitive_util::LowercasePrimitiveTypeName(data_type),
ComparisonDirectionToString(direction));
EXPECT_TRUE(RunAndCompare(hlo_test, ErrorSpec{1e-3, 1e-3}));
}
using cd = Comparison::Direction;
INSTANTIATE_TEST_SUITE_P(
CompareTestSuite, CompareTest,
::testing::Combine(::testing::Values(PRED, S8, S32, F16, F32),
::testing::Values(cd::kEq, cd::kNe, cd::kGe, cd::kGt,
cd::kLe, cd::kLt)),
CompareTestParamsToString);
class SelectTest : public CuDnnFusionExecutionTest,
public ::testing::WithParamInterface<PrimitiveType> {};
TEST_P(SelectTest, SelectFusionExecutesCorrectly) {
if (!IsAtLeastCuDnn91()) {
GTEST_SKIP() << "Select operation requires cuDNN 9.1+.";
}
const std::string kHloTemplate = R"(
fusion_computation {
p0 = f32[32,32] parameter(0)
p1 = $0[32,32] parameter(1)
p2 = $0[32,32] parameter(2)
p3 = pred[32,32] parameter(3)
s = $0[32,32] select(p3, p1, p2)
c = f32[32,32] convert(s)
ROOT r = f32[32,32] dot(p0, c),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,32] parameter(0)
p1 = $0[32,32] parameter(1)
p2 = $0[32,32] parameter(2)
p3 = pred[32,32] parameter(3)
ROOT r = f32[32,32] fusion(p0, p1, p2, p3), kind=kCustom,
calls=fusion_computation,
backend_config={"fusion_backend_config":{"kind":"__cudnn$$fusion"}}
})";
const std::string hlo_test = absl::Substitute(
kHloTemplate, primitive_util::LowercasePrimitiveTypeName(GetParam()));
EXPECT_TRUE(RunAndCompare(hlo_test, ErrorSpec{1e-4, 1e-4}));
}
constexpr std::array<PrimitiveType, 3> kSupportedDataTypes{F16, F32, BF16};
INSTANTIATE_TEST_SUITE_P(SelectTestSuite, SelectTest,
::testing::ValuesIn(kSupportedDataTypes));
class CuDnnFusionRewriteTest : public CuDnnFusionTest {
public:
DebugOptions GetDebugOptionsForTest() override {
DebugOptions debug_options = CuDnnFusionTest::GetDebugOptionsForTest();
debug_options.set_xla_gpu_autotune_level(
GetDebugOptionsFromFlags().xla_gpu_autotune_level());
debu | 2,130 |
#ifndef XLA_SERVICE_GPU_FUSIONS_TRANSPOSE_MLIR_H_
#define XLA_SERVICE_GPU_FUSIONS_TRANSPOSE_MLIR_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
class MlirTransposeFusion : public MlirFusionEmitterBase {
public:
explicit MlirTransposeFusion(const HloFusionAnalysis& analysis);
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* mlir_context) const override;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* mlir_context) const override;
protected:
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const override;
std::vector<mlir_converter::EpilogueSpecification> GetEpilogues(
const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const override;
struct WriteResult {
mlir::SmallVector<mlir::Value> updated_outputs;
mlir::ValueRange shmem_tensors;
};
WriteResult EmitWriteToShMemMlir(
mlir::ImplicitLocOpBuilder& builder, mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion,
const mlir_converter::PartitionedComputation& root_computation,
const mlir_converter::CallTargetProvider& call_target_provider,
mlir::ValueRange output_args) const;
void EmitReadFromShMemMlir(
mlir::ImplicitLocOpBuilder& builder, mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion,
const mlir_converter::PartitionedComputations& computations,
const WriteResult& written) const;
private:
const HloFusionAnalysis& analysis_;
IndexingMap GetIndexing(bool input, const xla::Shape& shape,
mlir::MLIRContext* ctx) const;
IndexingMap GetSharedMemoryIndexing(bool read, mlir::MLIRContext* ctx) const;
llvm::SmallVector<mlir::AffineExpr, 4> GetThreadOffsets(
mlir::MLIRContext* ctx) const;
TransposeDescription transpose_;
Vector3 permutation_;
std::vector<int64_t> input_shape_;
std::vector<int64_t> block_sizes_;
std::vector<int64_t> block_counts_;
int vector_size_;
int block_size_;
std::vector<const HloInstruction*> shmem_transposes_;
std::vector<const HloInstruction*> shmem_transpose_roots_;
std::vector<int> shmem_transpose_root_indices_;
std::vector<const HloInstruction*> side_output_roots_;
std::vector<int> side_output_root_indices_;
};
}
}
#endif
#include "xla/service/gpu/fusions/transpose_mlir.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <optional>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/fusions/mlir/type_util.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::AffineExpr;
using mlir::MLIRContext;
using mlir::RankedTensorType;
using mlir::Value;
using mlir::ValueRange;
using mlir::func::FuncOp;
using mlir::func::ReturnOp;
using mlir::tensor::ExtractOp;
using mlir::tensor::InsertOp;
using mlir_converter::ApplyIndexing;
constexpr int kNumRows = 4;
constexpr int kBaseBlockSize = WarpSize();
constexpr int kNumThreadsPerBlock = 128;
constexpr int kMaxVectorizedBytes = 4;
}
MlirTransposeFusion::MlirTransposeFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis),
transpose_(analysis.tiled_transpose()),
permutation_(transpose_.permutation),
input_shape_(Permute(transpose_.dimensions, permutation_)) {
ConstHloInstructionSet transposes_to_tile;
int index = 0;
int64_t shmem_usage = 0;
int max_element_bytes = 0;
for (auto [root, hero] :
llvm::zip(analysis_.fusion_roots(), analysis_.fusion_heroes())) {
if (auto transpose = GetDescriptionForTiledTransposeEmitter(
root.instruction(), hero.instruction())) {
transposes_to_tile.insert(&hero.instruction());
shmem_transpose_roots_.push_back(&root.instruction());
int size = primitive_util::ByteWidth(hero.shape().element_type());
max_element_bytes = std::max(max_element_bytes, size);
shmem_usage += kBaseBlockSize * (kBaseBlockSize + 1) * size;
shmem_transpose_root_indices_.push_back(index);
} else {
side_output_roots_.push_back(&root.instruction());
side_output_root_indices_.push_back(index);
}
++index;
}
shmem_transposes_ = {transposes_to_tile.begin(), transposes_to_tile.end()};
auto compute_block_sizes = [this](int vector_size) {
vector_size_ = vector_size;
block_size_ = kBaseBlockSize * vector_size_;
block_sizes_ = {1, 1, block_size_};
block_sizes_[permutation_[2]] = block_size_;
block_counts_ = {CeilOfRatio(input_shape_[0], block_sizes_[0]),
CeilOfRatio(input_shape_[1], block_sizes_[1]),
CeilOfRatio(input_shape_[2], block_sizes_[2])};
};
compute_block_sizes(1);
const auto& device = analysis_.device_info();
for (int vec_size = kMaxVectorizedBytes / max_element_bytes; vec_size > 1;
vec_size /= 2) {
int elems_per_thread = vec_size * vec_size;
bool enough_work = Product(block_counts_) * kNumThreadsPerBlock >=
elems_per_thread * device.core_count() *
device.threads_per_core_limit();
bool enough_shmem =
shmem_usage * elems_per_thread <= device.shared_memory_per_block();
bool aligned_dims = (input_shape_[2] % vec_size == 0) &&
(input_shape_[permutation_[2]] % vec_size == 0);
if (enough_work && enough_shmem && aligned_dims) {
compute_block_sizes(vec_size);
break;
}
}
}
std::optional<IndexingMap> MlirTransposeFusion::ComputeThreadIdToOutputIndexing(
int64_t root_index, MLIRContext* mlir_context) const {
const auto& hero = analysis_.fusion_hero(root_index);
if (hero.opcode() != HloOpcode::kTranspose) {
auto map = ComposeIndexingMaps(
GetIndexing(true, hero.shape(), mlir_context),
GetBitcastMap(hero.shape(), analysis_.fusion_root(root_index).shape(),
mlir_context));
map.Simplify();
return map;
}
return GetIndexing(false, hero.shape(), mlir_context);
}
std::optional<IndexingMap> MlirTransposeFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
MLIRContext* mlir_context) const {
const auto& hero = analysis_.fusion_hero(root_index).instruction();
if (hero.opcode() != HloOpcode::kTranspose) {
auto map = ComposeIndexingMaps(
*ComputeThreadIdToOutputIndexing(root_index, mlir_context),
*ComputeOutputToInputIndexing(
&analysis_.fusion_root(root_index).instruction(), 0, mlir_context)
.indexing_maps[hero_operand_index]
.begin());
map.Simplify();
return map;
}
return GetIndexing(true, hero.operand(hero_operand_index)->shape(),
mlir_context);
}
LaunchDimensions MlirTransposeFusion::launch_dimensions() const {
return LaunchDimensions(Product(block_counts_), kNumThreadsPerBlock);
}
IndexingMap MlirTransposeFusion::GetSharedMemoryIndexing(
bool read, mlir::MLIRContext* ctx) const {
auto thread_offsets =
Permute(GetThreadOffsets(ctx), read ? Vector3{0, 1, 2} : permutation_);
return {mlir::AffineMap::get(6, 2, thread_offsets, ctx),
DimVarsFromTensorSizes({kNumThreadsPerBlock, 1, 1, 1, 1, 1}),
RangeVarsFromTensorSizes({block_size_ / kNumRows, vector_size_}),
{}};
}
MlirTransposeFusion::WriteResult MlirTransposeFusion::EmitWriteToShMemMlir(
mlir::ImplicitLocOpBuilder& builder, FuncOp entry_function,
const HloFusionInstruction& fusion,
const mlir_converter::PartitionedComputation& root_computation,
const mlir_converter::CallTargetProvider& call_target_provider,
ValueRange output_args) const {
MLIRContext* ctx = builder.getContext();
auto shmem_tensor_size = block_sizes_;
++shmem_tensor_size.back();
SmallVector<Value> inits;
for (auto* transpose : shmem_transposes_) {
auto elem_type = mlir_converter::PrimitiveTypeToMlirType(
transpose->shape().element_type(), builder);
inits.push_back(builder.create<AllocateSharedOp>(
RankedTensorType::get(shmem_tensor_size, elem_type)));
}
int num_inputs = fusion.fused_instructions_computation()->num_parameters();
for (int index : side_output_root_indices_) {
inits.push_back(entry_function.getArgument(num_inputs + index));
}
IndexingMap write_indexing = GetSharedMemoryIndexing(false, ctx);
auto body_builder = [&](ValueRange output_tensors, ValueRange dim_values,
ValueRange symbol_values) -> SmallVector<Value> {
auto input_indices = [&](const HloInstruction* instr) {
return ApplyIndexing(GetIndexing(true, instr->shape(), ctx),
dim_values, symbol_values, builder);
};
SmallVector<Value> result_tensors;
auto shmem_indices =
ApplyIndexing(write_indexing, dim_values, symbol_values, builder);
for (auto [transpose, output] :
llvm::zip(shmem_transposes_, output_tensors)) {
auto result_scalar = mlir_converter::ProvideParameter(
root_computation, transpose,
0, input_indices(transpose->operand(0)),
call_target_provider, entry_function, builder)[0];
result_tensors.push_back(
builder.create<InsertOp>(result_scalar, output, shmem_indices));
}
SmallVector<Value> side_outputs;
SmallVector<SmallVector<Value>> side_output_indices;
auto* root_tuple = fusion.fused_expression_root();
for (auto root : side_output_roots_) {
side_output_indices.push_back(input_indices(root));
side_outputs.append(mlir_converter::ProvideParameter(
root_computation, root_tuple, root_tuple->operand_index(root),
side_output_indices.back(), call_target_provider, entry_function,
builder));
}
for (const auto& [value, indices, output] :
llvm::zip(side_outputs, side_output_indices,
output_tensors.take_back(side_output_roots_.size()))) {
result_tensors.push_back(
builder.create<InsertOp>(value, output, indices));
}
return result_tensors;
};
auto indexing = GetIndexing(
true, shmem_transposes_.front()->operand(0)->shape(), ctx);
auto written_vector =
EmitThreadLoopNest(builder, inits, indexing, body_builder);
ValueRange written = written_vector;
auto shmem_tensors = written.take_front(shmem_transposes_.size());
WriteResult result;
result.shmem_tensors =
builder
.create<SyncThreadsOp>(mlir::TypeRange(shmem_tensors), shmem_tensors)
.getResults();
result.updated_outputs = output_args;
for (auto [index, side_output_result] :
llvm::zip(side_output_root_indices_,
written.take_back(side_output_roots_.size()))) {
result.updated_outputs[index] = side_output_result;
}
return result;
}
void MlirTransposeFusion::EmitReadFromShMemMlir(
mlir::ImplicitLocOpBuilder& builder, FuncOp entry_function,
const HloFusionInstruction& fusion,
const mlir_converter::PartitionedComputations& computations,
const WriteResult& written) const {
auto* mlir_context = builder.getContext();
auto output_indexing = *ComputeThreadIdToOutputIndexing(
shmem_transpose_root_indices_[0], mlir_context);
auto shmem_read_indexing =
GetSharedMemoryIndexing(true, mlir_context);
auto result_tensors = EmitThreadLoopNest(
builder, written.updated_outputs, output_indexing,
[&](ValueRange output_tensors, ValueRange dim_values,
ValueRange symbol_values) -> SmallVector<Value> {
auto shmem_indices = ApplyIndexing(shmem_read_indexing, dim_values,
symbol_values, builder);
absl::flat_hash_map<const HloInstruction*, llvm::SmallVector<Value>>
transpose_values;
for (auto [transpose, shmem] :
llvm::zip(shmem_transposes_, written.shmem_tensors)) {
transpose_values[transpose].push_back(
builder.create<ExtractOp>(shmem, shmem_indices));
}
llvm::SmallVector<Value> epilogue_indices = dim_values;
absl::c_copy(symbol_values, std::back_inserter(epilogue_indices));
auto result_scalars =
EmitEpilogue(0, computations, entry_function,
transpose_values, epilogue_indices, builder);
SmallVector<Value> results = output_tensors;
for (auto [root, indexing, root_index] :
llvm::zip(shmem_transpose_roots_,
computations.epilogues().front().root_indexing,
shmem_transpose_root_indices_)) {
llvm::SmallVector<Value> indices =
ApplyIndexing(indexing, dim_values, symbol_values, builder);
results[root_index] = builder.create<InsertOp>(
result_scalars.at(root).front(), results[root_index], indices);
}
return results;
});
builder.create<ReturnOp>(result_tensors);
}
std::vector<mlir_converter::EpilogueSpecification>
MlirTransposeFusion::GetEpilogues(const HloFusionInstruction& fusion,
MLIRContext* mlir_context) const {
std::vector<mlir_converter::EpilogueSpecification> epilogues{
mlir_converter::EpilogueSpecification::FromOutputIndexing(
analysis_, shmem_transposes_, shmem_transpose_roots_, *this,
mlir_context)};
for (const auto* root : side_output_roots_) {
epilogues.push_back(
mlir_converter::EpilogueSpecification::FromIdentityIndexing(
root, root, mlir_context));
}
return epilogues;
}
absl::Status MlirTransposeFusion::EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
const auto& root_computation = computations.FindPartitionedComputation(
fusion.fused_instructions_computation());
mlir::ImplicitLocOpBuilder builder(entry_function.getLoc(), entry_function);
builder.setInsertionPointToStart(entry_function.addEntryBlock());
auto written = EmitWriteToShMemMlir(
builder, entry_function, fusion, root_computation, call_targets,
entry_function.getArguments().take_back(analysis_.fusion_roots().size()));
EmitReadFromShMemMlir(builder, entry_function, fusion, computations, written);
return absl::OkStatus();
}
llvm::SmallVector<mlir::AffineExpr, 4> MlirTransposeFusion::GetThreadOffsets(
mlir::MLIRContext* ctx) const {
auto thread = mlir::getAffineDimExpr(
KernelFusionInterface::kIndexingMapThreadIdxDims[0], ctx);
auto loop = mlir::getAffineSymbolExpr(0, ctx);
auto vector = mlir::getAffineSymbolExpr(1, ctx);
int loop_stride = block_size_ * kNumRows;
auto linear_index = loop * loop_stride + thread * vector_size_ + vector;
return DelinearizeInBoundsIndex(linear_index, block_sizes_);
}
IndexingMap MlirTransposeFusion::GetIndexing(bool input,
const xla::Shape& shape,
mlir::MLIRContext* ctx) const {
auto raw_id = mlir::getAffineDimExpr(
KernelFusionInterface::kIndexingMapBlockIdxDims[0], ctx);
auto block_ids = Permute(DelinearizeInBoundsIndex(raw_id, block_counts_),
input ? Vector3{0, 1, 2} : permutation_);
auto thread_offsets = GetThreadOffsets(ctx);
llvm::SmallVector<AffineExpr, 3> offsets;
for (auto [block_id, block_size, thread] :
llvm::zip(block_ids, block_sizes_, thread_offsets)) {
offsets.push_back(block_id * block_size + thread);
}
IndexingMap result{
mlir::AffineMap::get(6, 2, offsets, ctx),
DimVarsFromTensorSizes(
{kNumThreadsPerBlock, 1, 1, Product(block_counts_), 1, 1}),
RangeVarsFromTensorSizes({block_size_ / kNumRows, vector_size_}),
{}};
auto normalized_shape =
input ? ShapeUtil::MakeShape(shape.element_type(), input_shape_)
: ShapeUtil::MakeShape(shape.element_type(), transpose_.dimensions);
for (auto [size, dim] : llvm::zip(normalized_shape.dimensions(),
result.GetAffineMap().getResults())) {
result.AddConstraint(dim, {0, size - 1});
}
result =
ComposeIndexingMaps(result, GetBitcastMap(normalized_shape, shape, ctx));
result.Simplify();
return result;
}
}
} | #include "xla/service/gpu/fusions/transpose_mlir.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/error_spec.h"
#include "xla/service/gpu/fusions/mlir_emitter_test_base.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using MlirTransposeFusionTest = MlirEmitterTestBase<MlirTransposeFusion>;
TEST_F(MlirTransposeFusionTest, ThreadIndexing021) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fusion {
%input = f32[100,32,64] parameter(0)
ROOT transpose = f32[100,64,32] transpose(%input), dimensions={0,2,1}
}
ENTRY entry {
%input = f32[100,32,64] parameter(0)
ROOT %fusion = f32[100,64,32] fusion(%input), kind=kInput, calls=fusion
}
)"));
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirTransposeFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d3 floordiv 2,
d0 floordiv 32 + s0 * 4,
(d3 mod 2) * 32 + d0 mod 32
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 200)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 8)
s1 in [0, 1)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d3 floordiv 2,
(d3 mod 2) * 32 + s0 * 4 + d0 floordiv 32,
d0 mod 32
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 200)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 8)
s1 in [0, 1)
)"));
}
TEST_F(MlirTransposeFusionTest, ThreadIndexing201) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fusion {
%input = f32[100,64,32] parameter(0)
ROOT transpose = f32[32,100,64] transpose(%input), dimensions={2,0,1}
}
ENTRY entry {
%input = f32[100,64,32] parameter(0)
ROOT %fusion = f32[32,100,64] fusion(%input), kind=kInput, calls=fusion
})"));
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirTransposeFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d3 floordiv 2,
(d3 * 32 + s0 * 4) mod 64 + d0 floordiv 32,
d0 mod 32
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 200)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 8)
s1 in [0, 1)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d0 floordiv 32 + s0 * 4,
d3 floordiv 2,
(d3 mod 2) * 32 + d0 mod 32
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 200)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 8)
s1 in [0, 1)
)"));
}
TEST_F(MlirTransposeFusionTest, ThreadIndexingVectorized021) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fusion {
%input = f16[8192,64,64] parameter(0)
ROOT transpose = f16[8192,64,64] transpose(%input), dimensions={0,2,1}
}
ENTRY entry {
%input = f16[8192,64,64] parameter(0)
ROOT %fusion = f16[8192,64,64] fusion(%input), kind=kInput, calls=fusion
}
)"));
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirTransposeFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d3,
d0 floordiv 32 + s0 * 4,
(d0 mod 32) * 2 + s1
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 8192)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 16)
s1 in [0, 2)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d3,
d0 floordiv 32 + s0 * 4,
(d0 mod 32) * 2 + s1
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 8192)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 16)
s1 in [0, 2)
)"));
}
TEST_F(MlirTransposeFusionTest, ThreadIndexingVectorized210) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fusion {
%input = f16[64,64,8192] parameter(0)
ROOT transpose = f16[8192,64,64] transpose(%input), dimensions={2,1,0}
}
ENTRY entry {
%input = f16[64,64,8192] parameter(0)
ROOT %fusion = f16[8192,64,64] fusion(%input), kind=kInput, calls=fusion
})"));
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirTransposeFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d0 floordiv 32 + s0 * 4,
d3 floordiv 128,
(d0 mod 32) * 2 + (d3 mod 128) * 64 + s1
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 8192)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 16)
s1 in [0, 2)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
(d3 mod 128) * 64 + s0 * 4 + d0 floordiv 32,
d3 floordiv 128,
(d0 mod 32) * 2 + s1
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 8192)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 16)
s1 in [0, 2)
)"));
}
TEST_F(MlirTransposeFusionTest, FusedTranspose021) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%p0 = f32[20,160,170] parameter(0)
%exp = f32[20,160,170] exponential(%p0)
%transpose = f32[20,170,160] transpose(%exp), dimensions={0,2,1}
ROOT %abs = f32[20,170,160] abs(%transpose)
}
ENTRY main {
%param = f32[20,160,170] parameter(0)
ROOT %fusion = f32[20,170,160] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, FusedTranspose210) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%p0 = f32[20,160,170] parameter(0)
%exp = f32[20,160,170] exponential(%p0)
%transpose = f32[170,160,20] transpose(%exp), dimensions={2,1,0}
ROOT %abs = f32[170,160,20] abs(%transpose)
}
ENTRY main {
%param = f32[20,160,170] parameter(0)
ROOT %fusion = f32[170,160,20] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, Transpose021_Parameter) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%p0 = f32[20,160,170] parameter(0)
%transpose = f32[20,170,160] transpose(%p0), dimensions={0,2,1}
ROOT %abs = f32[20,170,160] abs(%transpose)
}
ENTRY main {
%param = f32[20,160,170] parameter(0)
ROOT %fusion = f32[20,170,160] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, Transpose021_NoEpilogue) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%p0 = f32[20,160,170] parameter(0)
ROOT %transpose = f32[20,170,160] transpose(%p0), dimensions={0,2,1}
}
ENTRY main {
%param = f32[20,160,170] parameter(0)
ROOT %fusion = f32[20,170,160] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, R"(
)"));
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, Transpose_4D) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%param_0 = f64[2,24,6,4] parameter(0)
ROOT %transpose= f64[6,4,2,24] transpose(f64[2,24,6,4] %param_0),
dimensions={2,3,0,1}
}
ENTRY main {
%param = f64[2,24,6,4] parameter(0)
ROOT %fusion = f64[6,4,2,24] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, "
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, Transpose_2D) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%param_0 = f64[64, 64] parameter(0)
ROOT %transpose= f64[64,64] transpose( %param_0),
dimensions={1,0}
}
ENTRY main {
%param = f64[64,64] parameter(0)
ROOT %fusion = f64[64,64] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, "
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, Transpose_2D_2) {
auto kHloString = R"(
HloModule m
%fused_computation {
%p0 = f32[17,2820]{0,1} parameter(0)
%p1 = f32[30,17,94] parameter(1)
%bitcast0 = f32[2,3,5,17,94] bitcast(f32[30,17,94] %p1)
%transpose = f32[2,3,5,94,17] transpose(f32[2,3,5,17,94] %bitcast0), dimensions={0,1,2,4,3}
%bitcast1 = f32[2820,17]{1,0} bitcast(f32[2,3,5,94,17] %transpose)
%bitcast2 = f32[2820,17]{1,0} bitcast(f32[17,2820]{0,1} %p0)
%neg = f32[2820,17]{1,0} negate(f32[2820,17] %bitcast2)
ROOT %add = f32[2820,17]{1,0} add(f32[2820,17] %bitcast1, f32[2820,17]{1,0} %neg)
}
ENTRY main {
%p1 = f32[30,17,94]{2,1,0} parameter(1)
%p0 = f32[17,2820]{0,1} parameter(0)
ROOT %fusion = f32[2820,17]{1,0} fusion(%p0, %p1), kind=kInput, calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, "
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, MultipleRootsForTranspose) {
auto kHloString = R"(
HloModule m
%fused_computation {
%iota.0 = s32[200,200] iota(), iota_dimension=1
%iota.1 = s32[200,200] iota(), iota_dimension=0
%compare = pred[200,200] compare(%iota.0, %iota.1), direction=GE
%transpose = pred[200,200] transpose(%compare), dimensions={1,0}
%copy = pred[200,200] copy(%transpose)
%copy.1 = pred[200,200] copy(%transpose)
ROOT %tuple = (pred[200,200], pred[200,200], pred[200,200]{1,0})
tuple(%transpose, %copy, %copy.1)
}
ENTRY main {
ROOT %fusion =
(pred[200,200]{1,0}, pred[200,200]{1,0}, pred[200,200]{1,0})
fusion(), kind=kInput, calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, "
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, PartialTile) {
auto kHloString = R"(
HloModule m
fused_computation {
%p0 = f64[24,2,6,4] parameter(0)
ROOT %t = f64[6,4,2,24] transpose(%p0), dimensions={2,3,1,0}
}
ENTRY main {
%p0 = f64[24,2,6,4] parameter(0)
ROOT %fusion = f64[6,4,2,24] fusion(%p0), kind=kInput, calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, "
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, MixedIndexing) {
auto kHloString = R"(
HloModule m
fused_computation {
%p0 = f64[24,2,6,4] parameter(0)
%bc = f64[24,2,24] bitcast(%p0)
%t1 = f64[6,4,2,24] transpose(%p0), dimensions={2,3,1,0}
%t2 = f64[24,2,24] transpose(%bc), dimensions={2,1,0}
%p1 = f64[] parameter(1)
%bc1 = f64[6,4,2,24] broadcast(%p1), dimensions={}
%bc2 = f64[24,2,24] broadcast(%p1), dimensions={}
%a1 = f64[6,4,2,24] add(%t1, %bc1)
%a2 = f64[24,2,24] add(%t2, %bc2)
ROOT %t = (f64[6,4,2,24], f64[24,2,24]) tuple(%a1, %a2)
}
ENTRY main {
%p0 = f64[24,2,6,4] parameter(0)
%p1 = f64[] parameter(1)
ROOT %fusion = (f64[6,4,2,24], f64[24,2,24]) fusion(%p0, %p1),
kind=kInput, calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, "
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, SideOutputs) {
auto kHloString = R"(
HloModule m
fused_computation {
%p0 = f64[24,2,36] parameter(0)
%p1 = f64[36,2,24] parameter(1)
%tr = f64[36,2,24] transpose(%p0), dimensions={2,1,0}
%neg = f64[36,2,24] negate(%p1)
%log = f64[24,2,36] log(%p0)
ROOT %t = (f64[36,2,24], f64[36,2,24], f64[24,2,36])
tuple(%neg, %tr, %log)
}
ENTRY main {
%p0 = f64[24,2,36] parameter(0)
%p1 = f64[36,2,24] parameter(1)
ROOT %fusion = (f64[36,2,24], f64[36,2,24], f64[24,2,36])
fusion(%p0, %p1), kind=kInput, calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(kHloString, "
EXPECT_TRUE(RunAndCompareNoHloPasses(kHloString, ErrorSpec{1e-3}));
}
TEST_F(MlirTransposeFusionTest, SameInputIndexingForRealHeroAndSideOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fusion {
%input = f32[100,32,64] parameter(0)
%transpose = f32[100,64,32] transpose(%input), dimensions={0,2,1}
%bitcast = f32[100,2048] bitcast(%input)
ROOT %tuple = (f32[100,64,32], f32[100,2048]) tuple(%transpose, %bitcast)
}
ENTRY entry {
%input = f32[100,32,64] parameter(0)
ROOT %fusion = (f32[100,64,32], f32[100,2048]) fusion(%input), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirTransposeFusion fusion(analysis);
mlir::MLIRContext mlir_context;
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context)->ToString(),
fusion.ComputeThreadIdToInputIndexing(1, 0, &mlir_context)->ToString());
}
TEST_F(MlirTransposeFusionTest, ThreadIndexingSideOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fusion {
%input0 = f32[100,32,64] parameter(0)
%input1 = f32[100,32] parameter(1)
%transpose = f32[100,64,32] transpose(%input0), dimensions={0,2,1}
%broadcast = f32[100,32,64] broadcast(%input1), dimensions={0,1}
ROOT %tuple = (f32[100,64,32], f32[100,32,64]) tuple(%transpose, %broadcast)
}
ENTRY entry {
%input0 = f32[100,32,64] parameter(0)
%input1 = f32[100,32] parameter(1)
ROOT %fusion = (f32[100,64,32], f32[100,32,64]) fusion(%input0, %input1), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
MlirTransposeFusion fusion(analysis);
mlir::MLIRContext mlir_context;
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(1, 0, &mlir_context)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d3 floordiv 2,
d0 floordiv 32 + s0 * 4
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 200)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 8)
s1 in [0, 1)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(1, &mlir_context)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1] -> (
d3 floordiv 2,
d0 floordiv 32 + s0 * 4,
(d3 mod 2) * 32 + d0 mod 32
)
domain:
d0 in [0, 128)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 200)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 8)
s1 in [0, 1)
)"));
}
TEST_F(MlirTransposeFusionTest, VectorizedTranspose021) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%p0 = bf16[256,128,128] parameter(0)
%transpose = bf16[256,128,128] transpose(%p0), dimensions={0,2,1}
}
ENTRY main {
%param = bf16[256,128,128] parameter(0)
ROOT %fusion = bf16[256,128,128] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(
kHloString, "
}
TEST_F(MlirTransposeFusionTest, VectorizedTranspose210) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%p0 = bf16[256,128,128] parameter(0)
%transpose = bf16[128,128,256] transpose(%p0), dimensions={2,1,0}
}
ENTRY main {
%param = bf16[256,128,128] parameter(0)
ROOT %fusion = bf16[128,128,256] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(
kHloString, "
}
TEST_F(MlirTransposeFusionTest, PreferLargeVectorSize021) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%p0 = u8[256,256,256] parameter(0)
%transpose = u8[256,256,256] transpose(%p0), dimensions={0,2,1}
}
ENTRY main {
%param = u8[256,256,256] parameter(0)
ROOT %fusion = u8[256,256,256] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(
kHloString, "
}
TEST_F(MlirTransposeFusionTest, PreferLargeVectorSize210) {
auto kHloString = R"(
HloModule Transpose
%fused_computation {
%p0 = u8[256,256,256] parameter(0)
%transpose = u8[256,256,256] transpose(%p0), dimensions={2,1,0}
}
ENTRY main {
%param = u8[256,256,256] parameter(0)
ROOT %fusion = u8[256,256,256] fusion(%param), kind=kInput,
calls=%fused_computation
}
)";
TF_EXPECT_OK(EmitAndCheckIR(
kHloString, "
}
}
}
} | 2,131 |
#ifndef XLA_SERVICE_GPU_FUSIONS_LOOP_H_
#define XLA_SERVICE_GPU_FUSIONS_LOOP_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "llvm/IR/IRBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/ir_array.h"
namespace xla {
namespace gpu {
class LoopFusion : public KernelFusionEmitterBase {
public:
explicit LoopFusion(const HloFusionAnalysis& analysis);
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override;
protected:
absl::Status EmitKernel(IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs,
llvm::IRBuilder<>* builder) const override;
private:
const HloFusionAnalysis& analysis_;
LaunchDimensionsConfig config_;
};
LaunchDimensionsConfig ComputeLoopFusionConfig(
const HloFusionAnalysis& analysis);
}
}
#endif
#include "xla/service/gpu/fusions/loop.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/log/log.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Type.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/gpu/elemental_ir_emitter.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/parallel_loop_emitter.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "tsl/platform/macros.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
const Shape& GetElementShape(const HloFusionAnalysis& analysis) {
const Shape* shape = &analysis.fusion_root(0).shape();
while (shape->IsTuple()) {
shape = &shape->tuple_shapes(0);
}
return *shape;
}
int ComputeMaxUnrollFactor(int64_t num_elements) {
constexpr int kMaxUnrollFactor = 4;
for (int i = kMaxUnrollFactor; i > 1; i /= 2) {
if (num_elements % i == 0) {
return i;
}
}
return 1;
}
std::pair<bool , int> RowVectorizationEnabled(
const HloFusionAdaptor& fusion, int64_t out_rank) {
auto roots = fusion.GetRoots();
const auto is_row_major = [](auto instr) {
return LayoutUtil::IsMonotonicWithDim0Major(instr.shape().layout());
};
bool row_vectorized = roots.size() == 1 && !roots[0].shape().IsTuple() &&
is_row_major(roots[0]);
if (!row_vectorized) {
return {false, 0};
}
int num_big_inputs = 0;
bool some_row_broadcasting = false;
HloBfsConsumersFirstTraversal(
roots, fusion,
[&](auto node) -> TraversalResult {
if (!row_vectorized) {
return TraversalResult::kInterrupt;
}
if (node.instruction().IsElementwise()) {
return TraversalResult::kAdvance;
}
switch (node.opcode()) {
case HloOpcode::kConstant:
return TraversalResult::kSkip;
case HloOpcode::kParameter:
return TraversalResult::kAdvance;
case HloOpcode::kBroadcast: {
auto dims = node.instruction().dimensions();
if (dims.empty()) {
return TraversalResult::kAdvance;
}
if (dims.size() == 1 && dims.front() == node.shape().rank() - 1) {
some_row_broadcasting = true;
return TraversalResult::kAdvance;
}
TF_FALLTHROUGH_INTENDED;
}
default:
VLOG(2) << "Row vectorization not enabled due to: "
<< node.ToString();
row_vectorized = false;
return TraversalResult::kInterrupt;
}
},
[&](auto argument) {
if (argument.shape().rank() == out_rank) {
++num_big_inputs;
}
if (!is_row_major(argument)) {
row_vectorized = false;
}
});
return std::make_pair(row_vectorized && some_row_broadcasting,
num_big_inputs);
}
}
LaunchDimensionsConfig ComputeLoopFusionConfig(
const HloFusionAnalysis& analysis) {
int unroll_factor = 1;
const auto& element_shape = GetElementShape(analysis);
int64_t num_elements = ShapeUtil::ElementsIn(element_shape);
int64_t n_threads_max = analysis.device_info().threads_per_core_limit() *
analysis.device_info().core_count();
if (num_elements >= n_threads_max &&
!MayPreventVectorization(analysis.fusion())) {
unroll_factor = ComputeMaxUnrollFactor(num_elements);
}
CHECK(absl::has_single_bit(static_cast<uint64_t>(unroll_factor)));
unroll_factor = std::max(
unroll_factor,
CeilOfRatio(8, analysis.input_output_info().smallest_output_dtype_bits));
CHECK(absl::has_single_bit(static_cast<uint64_t>(unroll_factor)));
VLOG(2) << "Unroll factor: " << unroll_factor;
bool row_vectorized;
int num_big_inputs;
std::tie(row_vectorized, num_big_inputs) =
RowVectorizationEnabled(analysis.fusion(), element_shape.rank());
bool few_waves = !HloAnyOf(
analysis.fusion().GetRoots(), analysis.fusion(), [&](auto instr) {
if (instr.opcode() == HloOpcode::kParameter ||
instr.opcode() == HloOpcode::kConstant ||
HloInstruction::IsOpElementwise(instr.opcode())) {
return false;
}
if (auto broadcast =
DynCast<HloBroadcastInstruction>(&instr.instruction())) {
if (broadcast->dimensions().empty() ||
(row_vectorized && num_big_inputs <= 3)) {
return false;
}
}
VLOG(2) << "few_waves not enabled due to: "
<< instr.instruction().ToString();
return true;
});
LaunchDimensionsConfig launch_config{unroll_factor, few_waves,
row_vectorized};
if (launch_config.row_vectorized &&
ThreadsPerBlockRowVectorized(element_shape, analysis.device_info(),
launch_config) <= 0) {
VLOG(2) << "Cancelling row_vectorization as the shape isn't supported.";
launch_config.row_vectorized = false;
launch_config.few_waves = false;
}
return launch_config;
}
LoopFusion::LoopFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis), config_(ComputeLoopFusionConfig(analysis)) {}
std::optional<IndexingMap> LoopFusion::ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const {
auto launch_dims = launch_dimensions();
return GetDefaultThreadIdIndexingMap(launch_dims, config_.unroll_factor,
GetElementShape(analysis_), ctx);
}
std::optional<IndexingMap> LoopFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const {
std::optional<IndexingMap> thread_id_to_output_indexing =
ComputeThreadIdToOutputIndexing(root_index, ctx);
if (!thread_id_to_output_indexing.has_value()) {
return std::nullopt;
}
const HloInstruction* fusion_root =
&analysis_.fusion_root(root_index).instruction();
auto output_to_input_indexing =
ComputeOutputToInputIndexing(fusion_root, 0, ctx);
IndexingMapSet output_to_input_indexing_set =
output_to_input_indexing.indexing_maps[hero_operand_index];
CHECK_EQ(output_to_input_indexing_set.size(), 1);
IndexingMap thread_id_to_input_indexing_map = ComposeIndexingMaps(
*thread_id_to_output_indexing, *output_to_input_indexing_set.begin());
thread_id_to_input_indexing_map.Simplify();
return thread_id_to_input_indexing_map;
}
absl::Status LoopFusion::EmitKernel(IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs,
llvm::IRBuilder<>* builder) const {
GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder);
FusedIrEmitter fused_emitter(elemental_emitter);
for (int i = 0; i < fusion.fused_parameters().size(); i++) {
fused_emitter.BindGenerator(
*fusion.fused_parameter(i), [&, i](llvm_ir::IrArray::Index index) {
return inputs[i].EmitReadArrayElement(index, builder);
});
}
TF_ASSIGN_OR_RETURN(
auto element_generator,
fused_emitter.GetGenerator(*fusion.fused_expression_root()));
llvm::Type* index_type =
GetIndexTypeForKernel(&fusion, launch_dims.launch_bound(), builder);
return ParallelLoopEmitter(element_generator, outputs, launch_dims, builder,
config_)
.EmitLoop(fusion.name(), index_type);
}
LaunchDimensions LoopFusion::launch_dimensions() const {
return CalculateLaunchDimensions(GetElementShape(analysis_),
analysis_.device_info(), config_);
}
}
} | #include <memory>
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class LoopTest : public HloTestBase {
public:
void SetUp() override {
HloTestBase::SetUp();
printer_ =
AffineMapPrinter({"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"},
{"chunk_id", "unroll_id"});
}
protected:
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
AffineMapPrinter printer_;
mlir::MLIRContext mlir_context_;
};
absl::StatusOr<std::unique_ptr<KernelFusionInterface>> GetFusion(
const HloFusionAnalysis& analysis) {
auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis});
auto fusion = dynamic_cast<KernelFusionInterface*>(emitter.get());
TF_RET_CHECK(fusion != nullptr);
emitter.release();
return std::unique_ptr<KernelFusionInterface>{fusion};
}
TEST_F(LoopTest, ThreadIndexingUnrolled) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
neg {
%input = f32[100,200,300] parameter(0)
ROOT neg = f32[100,200,300] negate(%input)
}
ENTRY entry {
%input = f32[100,200,300] parameter(0)
ROOT %fusion = f32[100,200,300] fusion(%input), kind=kLoop, calls=neg
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
TF_ASSERT_OK_AND_ASSIGN(auto loop_fusion, GetFusion(analysis));
auto thread_id_to_output_indexing =
loop_fusion->ComputeThreadIdToOutputIndexing(0,
&mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing->ToString(printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
((bl_x * 128 + chunk_id * 129024 + th_x) floordiv 15000) mod 100,
((bl_x * 128 + chunk_id * 129024 + th_x) floordiv 75) mod 200,
(th_x * 4 + bl_x * 512 + chunk_id * 516096) mod 300 + unroll_id
)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1008)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 12)
unroll_id in [0, 4)
(th_x + bl_x * 128) * 4 + chunk_id * 516096 in [0, 5999997)
)"));
}
TEST_F(LoopTest, ThreadIndexingNotUnrolled) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
neg {
%input = f32[20] parameter(0)
ROOT neg = f32[20] negate(%input)
}
ENTRY entry {
%input = f32[20] parameter(0)
ROOT %fusion = f32[20] fusion(%input), kind=kLoop, calls=neg
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
TF_ASSERT_OK_AND_ASSIGN(auto loop_fusion, GetFusion(analysis));
auto thread_id_to_output_indexing =
loop_fusion->ComputeThreadIdToOutputIndexing(0,
&mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing->ToString(printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (th_x)
domain:
th_x in [0, 20)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
)"));
auto thread_id_to_input_indexing =
loop_fusion->ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_);
EXPECT_THAT(thread_id_to_input_indexing->ToString(printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (th_x)
domain:
th_x in [0, 20)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
)"));
}
TEST_F(LoopTest, Broadcast) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
bcast {
%input = f32[20] parameter(0)
ROOT bcast = f32[10, 20, 30] broadcast(%input), dimensions={1}
}
ENTRY entry {
%input = f32[20] parameter(0)
ROOT %fusion = f32[10, 20, 30] fusion(%input), kind=kLoop, calls=bcast
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
TF_ASSERT_OK_AND_ASSIGN(auto loop_fusion, GetFusion(analysis));
auto thread_id_to_output_indexing =
loop_fusion->ComputeThreadIdToOutputIndexing(0,
&mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing->ToString(printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
((bl_x * 128 + th_x) floordiv 600) mod 10,
((bl_x * 128 + th_x) floordiv 30) mod 20,
(bl_x * 128 + th_x) mod 30)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 47)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
th_x + bl_x * 128 in [0, 6000)
)"));
auto thread_id_to_input_indexing =
loop_fusion->ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_);
EXPECT_THAT(thread_id_to_input_indexing->ToString(printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] ->
(((bl_x * 128 + th_x) floordiv 30) mod 20)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 47)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
th_x + bl_x * 128 in [0, 6000)
)"));
}
}
}
} | 2,132 |
#ifndef XLA_SERVICE_GPU_FUSIONS_INPUT_SLICES_H_
#define XLA_SERVICE_GPU_FUSIONS_INPUT_SLICES_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "llvm/IR/IRBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
class InputSlicesFusion : public KernelFusionEmitterBase {
public:
explicit InputSlicesFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis),
unroll_factor_(CeilOfRatio(
8, analysis.input_output_info().smallest_output_dtype_bits)) {}
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t output_id, mlir::MLIRContext* ctx) const override;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override {
return std::nullopt;
}
protected:
absl::Status EmitKernel(IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs,
llvm::IRBuilder<>* builder) const override;
private:
const HloFusionAnalysis& analysis_;
const int unroll_factor_;
};
}
}
#endif
#include "xla/service/gpu/fusions/input_slices.h"
#include <cstddef>
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Value.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/elemental_ir_emitter.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/parallel_loop_emitter.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/kernel_support_library.h"
#include "xla/service/llvm_ir/llvm_loop.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
absl::Status EmitElementForInputFusibleSlices(
ElementalIrEmitter& elemental_emitter,
const HloComputation* fused_computation,
const std::vector<llvm_ir::IrArray>& inputs,
const std::vector<llvm_ir::IrArray>& outputs,
const llvm_ir::IrArray::Index& index, llvm::IRBuilder<>* builder) {
VLOG(10) << "Emitting slice input fusion for "
<< fused_computation->ToString();
HloInstruction* slice_or_tuple = fused_computation->root_instruction();
auto slice_instructions = [&]() -> absl::Span<HloInstruction* const> {
if (slice_or_tuple->opcode() == HloOpcode::kSlice) {
return absl::Span<HloInstruction* const>(&slice_or_tuple, 1);
}
CHECK_EQ(slice_or_tuple->opcode(), HloOpcode::kTuple);
return slice_or_tuple->operands();
}();
std::vector<llvm::Value*> input_ir_values;
FusedIrEmitter fused_emitter(elemental_emitter);
for (int i = 0; i < fused_computation->num_parameters(); i++) {
fused_emitter.BindGenerator(
*fused_computation->parameter_instruction(i),
[&inputs, i, builder](llvm_ir::IrArray::Index index) {
return inputs[i].EmitReadArrayElement(index, builder);
});
}
for (const HloInstruction* slice : slice_instructions) {
auto input_generator = *fused_emitter.GetGenerator(*slice->operand(0));
input_ir_values.push_back(input_generator(index).value());
}
KernelSupportLibrary ksl(builder, llvm_ir::UnrollMode::kDefaultUnroll);
for (int64_t i = 0; i < slice_instructions.size(); ++i) {
HloInstruction* slice = slice_instructions[i];
std::vector<llvm::Value*> index_within_ranges;
for (size_t dim = 0; dim < slice->slice_starts().size(); ++dim) {
CHECK_EQ(slice->slice_strides(dim), 1);
auto larger_or_equal_than_start = builder->CreateICmpSGE(
index.multidim()[dim],
index.GetConstantWithIndexType(slice->slice_starts(dim)));
llvm::Value* smaller_than_limit = builder->CreateICmpSLT(
index.multidim()[dim],
index.GetConstantWithIndexType(slice->slice_limits(dim)));
llvm::Value* within_range =
builder->CreateAnd(larger_or_equal_than_start, smaller_than_limit);
index_within_ranges.push_back(within_range);
}
llvm::Value* guarding_cond = builder->CreateAnd(index_within_ranges);
auto emit_slice_elem_func = [&] {
const std::vector<llvm::Value*>& src_multidim = index.multidim();
std::vector<llvm::Value*> dst_multidim(src_multidim.size());
for (size_t dim = 0; dim < src_multidim.size(); ++dim) {
dst_multidim[dim] = builder->CreateSub(
src_multidim[dim],
index.GetConstantWithIndexType(slice->slice_starts(dim)));
}
const llvm_ir::IrArray& src_ir_array = outputs[i];
llvm_ir::IrArray::Index slice_dst_index(dst_multidim, slice->shape(),
index.GetType());
src_ir_array.EmitWriteArrayElement(slice_dst_index, input_ir_values[i],
builder);
};
ksl.If(absl::StrCat("slice", i), guarding_cond, emit_slice_elem_func);
}
return absl::OkStatus();
}
absl::StatusOr<Shape> GetConsistentInputShapeForRootSlices(
const HloComputation* fused_computation) {
const HloInstruction& root = *fused_computation->root_instruction();
if (root.opcode() == HloOpcode::kSlice) {
return root.operands()[0]->shape();
}
CHECK_EQ(root.opcode(), HloOpcode::kTuple);
const Shape& first_slice_operand_shape =
root.operands()[0]->operands()[0]->shape();
for (size_t i = 1; i < root.operands().size(); ++i) {
const HloInstruction* slice = root.operands()[i];
const Shape& operand_shape = slice->operands()[0]->shape();
if (!ShapeUtil::EqualIgnoringElementType(first_slice_operand_shape,
operand_shape)) {
return FailedPrecondition(
"Fused slices do not have the same input shape, fused computation = "
"%s.",
root.parent()->name());
}
}
return first_slice_operand_shape;
}
}
LaunchDimensions InputSlicesFusion::launch_dimensions() const {
const auto& root = analysis_.fusion_root(0).instruction();
const auto& shape = root.operand(0)->shape();
return CalculateLaunchDimensions(shape, analysis_.device_info(),
{unroll_factor_});
}
std::optional<IndexingMap> InputSlicesFusion::ComputeThreadIdToOutputIndexing(
int64_t output_id, mlir::MLIRContext* ctx) const {
auto launch_dims = launch_dimensions();
const auto& shape = analysis_.fusion_root(output_id).shape();
return GetDefaultThreadIdIndexingMap(launch_dims, unroll_factor_, shape, ctx);
}
absl::Status InputSlicesFusion::EmitKernel(
IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const {
TF_ASSIGN_OR_RETURN(Shape element_shape,
GetConsistentInputShapeForRootSlices(
fusion.fused_instructions_computation()));
LaunchDimensionsConfig launch_config;
launch_config.unroll_factor = unroll_factor_;
GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder);
return ParallelLoopEmitter(
[&](const llvm_ir::IrArray::Index index) -> absl::Status {
return EmitElementForInputFusibleSlices(
elemental_emitter, fusion.fused_instructions_computation(),
inputs, outputs, index, builder);
},
element_shape, launch_dims, builder, launch_config)
.EmitLoop(
fusion.name(),
GetIndexTypeForKernel(&fusion, launch_dims.launch_bound(), builder));
}
}
} | #include "xla/service/gpu/fusions/input_slices.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
class InputSlicesTest : public HloTestBase {
public:
void SetUp() override {
HloTestBase::SetUp();
printer_ =
AffineMapPrinter({"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"},
{"chunk_id", "unroll_id"});
}
protected:
AffineMapPrinter printer_;
mlir::MLIRContext mlir_context_;
};
TEST_F(InputSlicesTest, ThreadIndexing) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
%input = f32[2,3,5,7]{2,1,0,3} parameter(0)
slice0 = f32[1,2,3,5]{2,1,0,3} slice(input), slice={[0:1],[1:3],[0:3],[2:7]}
slice1 = f32[1,2,3,5]{2,1,0,3} slice(input), slice={[0:1],[0:2],[0:3],[2:7]}
ROOT tuple = (f32[1,2,3,5]{2,1,0,3}, f32[1,2,3,5]{2,1,0,3}) tuple(slice0, slice1)
}
ENTRY entry {
%input = f32[2,3,5,7]{2,1,0,3} parameter(0)
ROOT %fusion = (f32[1,2,3,5]{2,1,0,3}, f32[1,2,3,5]{2,1,0,3}) fusion(%input), kind=kLoop, calls=fused_computation
})")
.value();
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused = AnalyzeFusion(*root, device_info);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto fusion = dynamic_cast<InputSlicesFusion*>(emitter.get());
ASSERT_NE(fusion, nullptr);
auto thread_id_to_output_indexing =
fusion->ComputeThreadIdToOutputIndexing(0, &mlir_context_);
EXPECT_THAT(thread_id_to_output_indexing->ToString(printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (0,
((bl_x * 128 + th_x) floordiv 3) mod 2,
(bl_x * 128 + th_x) mod 3,
((bl_x * 128 + th_x) floordiv 6) mod 5)
domain:
th_x in [0, 128)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 2)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
th_x + bl_x * 128 in [0, 30)
)"));
}
}
}
} | 2,133 |
#ifndef XLA_SERVICE_GPU_FUSIONS_TRITON_H_
#define XLA_SERVICE_GPU_FUSIONS_TRITON_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
class TritonFusion : public FusionInterface {
public:
struct LaunchConfig {
LaunchDimensions launch_dimensions;
BlockLevelParameters block_level_parameters;
};
explicit TritonFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis) {}
absl::StatusOr<FusionEmissionResult> Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const final;
std::optional<LaunchConfig> launch_config() const;
private:
const HloFusionAnalysis& analysis_;
};
}
}
#endif
#include "xla/service/gpu/fusions/triton.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_triton.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/runtime/kernel_thunk.h"
#include "xla/service/gpu/triton_fusion_analysis.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
absl::StatusOr<FusionEmissionResult> TritonFusion::Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const {
llvm::IRBuilder builder(ir_emitter_context.llvm_module()->getContext());
VLOG(3) << fusion.ToString();
std::string suggested_kernel_name = std::string(fusion.name());
TF_ASSIGN_OR_RETURN(
auto kernel_arguments,
KernelArguments::Create(ir_emitter_context.buffer_assignment(), &fusion));
const HloComputation* hlo_computation =
fusion.fused_instructions_computation();
auto generate = [&]() -> absl::StatusOr<KernelReuseCache::Entry> {
VLOG(3) << "Generating: " << suggested_kernel_name;
const std::string impl_fn_name =
ir_emitter_context.name_uniquer()->GetUniqueName(
llvm_ir::SanitizeFunctionName(
absl::StrCat(suggested_kernel_name, "_impl")));
auto backend_config =
fusion.backend_config<GpuBackendConfig>()->fusion_backend_config();
absl::string_view fusion_kind = backend_config.kind();
TritonWrapperResult triton_wrapper_result;
LaunchDimensions launch_dimensions;
if (fusion_kind == kTritonFusionKind) {
std::optional<LaunchConfig> launch_config = *this->launch_config();
if (!launch_config.has_value()) {
return absl::InvalidArgumentError(absl::StrCat(
"Block level fusion config is required for Triton fusions: ",
fusion.ToString()));
}
launch_dimensions = std::move(launch_config->launch_dimensions);
TF_ASSIGN_OR_RETURN(
triton_wrapper_result,
TritonWrapper(impl_fn_name, &fusion,
ir_emitter_context.gpu_compute_capability(),
ir_emitter_context.gpu_device_info(),
launch_config->block_level_parameters,
ir_emitter_context.llvm_module(),
*ir_emitter_context.mlir_context()));
} else {
CHECK_EQ(fusion_kind, kTritonGemmFusionKind);
BlockLevelParameters block_level_parameters;
if (!backend_config.has_triton_gemm_config()) {
LOG(WARNING) << "Using fallback triton GEMM config for op "
<< fusion.name();
auto& triton_config = *backend_config.mutable_triton_gemm_config();
triton_config.set_block_m(64);
triton_config.set_block_k(64);
triton_config.set_block_n(64);
triton_config.set_split_k(1);
block_level_parameters.num_ctas = 1;
block_level_parameters.num_stages = 1;
block_level_parameters.num_warps = 2;
} else {
const auto& triton_config = backend_config.triton_gemm_config();
block_level_parameters.num_ctas = triton_config.num_ctas();
block_level_parameters.num_stages = triton_config.num_stages();
block_level_parameters.num_warps = triton_config.num_warps();
}
TF_ASSIGN_OR_RETURN(
triton_wrapper_result,
TritonWrapper(impl_fn_name, &fusion,
ir_emitter_context.gpu_compute_capability(),
ir_emitter_context.gpu_device_info(),
block_level_parameters,
ir_emitter_context.llvm_module(),
*ir_emitter_context.mlir_context()));
TF_ASSIGN_OR_RETURN(
TritonGemmConfig config,
TritonGemmConfig::FromProto(backend_config.triton_gemm_config()));
TF_ASSIGN_OR_RETURN(auto analysis, TritonFusionAnalysis::Execute(
*hlo_computation, config.split_k));
TF_ASSIGN_OR_RETURN(
launch_dimensions,
GetMatMulLaunchDimensions(analysis, analysis_.fusion(), config));
}
llvm::Function* impl_fn =
ir_emitter_context.llvm_module()->getFunction(impl_fn_name);
TF_RET_CHECK(impl_fn);
llvm::Function* kernel;
std::vector<llvm_ir::IrArray> inputs;
std::vector<llvm_ir::IrArray> outputs;
TF_ASSIGN_OR_RETURN(
std::tie(kernel, inputs, outputs),
BuildKernelPrototype(ir_emitter_context, suggested_kernel_name,
kernel_arguments.args(), impl_fn->arg_size(),
launch_dimensions, &builder));
llvm::Function* prototype_func = builder.GetInsertBlock()->getParent();
prototype_func->splice(prototype_func->begin(), impl_fn);
for (const auto& [arg, ir_array] : llvm::zip(impl_fn->args(), inputs)) {
arg.replaceAllUsesWith(ir_array.GetBasePointer());
}
impl_fn->eraseFromParent();
return {{kernel->getName().str(), launch_dimensions,
triton_wrapper_result.cluster_dim,
triton_wrapper_result.shmem_bytes}};
};
auto [status_or_entry, was_cached] =
ir_emitter_context.kernel_cache().GetWithStatus(
hlo_computation, kernel_arguments.args(),
"", generate);
TF_ASSIGN_OR_RETURN(const KernelReuseCache::Entry* entry, status_or_entry);
FusionEmissionResult result;
result.thunks.emplace_back(std::make_unique<KernelThunk>(
&fusion, entry->kernel_name, kernel_arguments.args(),
entry->launch_dimensions, entry->cluster_dim, entry->shmem_bytes));
return result;
}
std::optional<TritonFusion::LaunchConfig> TritonFusion::launch_config() const {
if (analysis_.fusion_backend_config().has_block_level_fusion_config()) {
BlockLevelParameters block_level_parameters =
BlockLevelParameters::FromBlockLevelFusionConfig(
analysis_.fusion_backend_config().block_level_fusion_config());
int64_t num_blocks = 1;
for (auto [dim_size, dim_tile_size] :
llvm::zip(analysis_.fusion_root(0).shape().dimensions(),
block_level_parameters.output_tile_sizes)) {
num_blocks *= (dim_size + dim_tile_size - 1) / dim_tile_size;
}
LaunchConfig launch_config;
launch_config.launch_dimensions = LaunchDimensions{
static_cast<uint64_t>(num_blocks),
static_cast<uint64_t>(block_level_parameters.num_warps * WarpSize())};
launch_config.block_level_parameters = std::move(block_level_parameters);
return launch_config;
}
return std::nullopt;
}
}
} | #include "xla/service/gpu/fusions/triton.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
class TritonFusionTest : public HloTestBase {};
TEST_F(TritonFusionTest,
TritonFusionWithBlockLevelFusionConfig_LaunchConfigIsCorrect) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation {
param_0.2 = f32[125] parameter(0)
ROOT broadcast.1 = f32[125,127] broadcast(param_0.2), dimensions={0}
}
fused_computation.1 {
param_0.3 = f32[125,127] parameter(0)
param_1.3 = f32[125,127] parameter(1)
ROOT multiply.2 = f32[125,127] multiply(param_0.3, param_1.3)
}
ENTRY entry_computation {
param_0.4 = f32[125] parameter(0)
param_1 = f32[125,127] parameter(1)
fusion = f32[125,127] fusion(param_0.4), kind=kLoop, calls=fused_computation
ROOT fusion.1 = f32[125,127] fusion(fusion, param_1), kind=kCustom, calls=fused_computation.1, backend_config={"fusion_backend_config":{"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["3","127"],"num_warps":"4"}}}
})"));
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
auto emitter_fused =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto triton_fusion = dynamic_cast<TritonFusion*>(emitter_fused.get());
ASSERT_NE(triton_fusion, nullptr);
auto launch_config = triton_fusion->launch_config();
ASSERT_NE(launch_config, std::nullopt);
EXPECT_EQ(launch_config->launch_dimensions.num_blocks(),
42);
EXPECT_EQ(launch_config->launch_dimensions.num_threads_per_block(),
128);
EXPECT_THAT(launch_config->block_level_parameters.output_tile_sizes,
ElementsAre(3, 127));
}
TEST_F(TritonFusionTest,
TritonFusionWithoutBlockLevelFusionConfig_LaunchConfigIsNullopt) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation {
param_0.2 = f32[125] parameter(0)
ROOT broadcast.1 = f32[125,127] broadcast(param_0.2), dimensions={0}
}
fused_computation.1 {
param_0.3 = f32[125,127] parameter(0)
param_1.3 = f32[125,127] parameter(1)
ROOT multiply.2 = f32[125,127] multiply(param_0.3, param_1.3)
}
ENTRY entry_computation {
param_0.4 = f32[125] parameter(0)
param_1 = f32[125,127] parameter(1)
fusion = f32[125,127] fusion(param_0.4), kind=kLoop, calls=fused_computation
ROOT fusion.1 = f32[125,127] fusion(fusion, param_1), kind=kCustom, calls=fused_computation.1, backend_config={"fusion_backend_config":{"kind":"__triton"}}
})"));
stream_executor::DeviceDescription device_info =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info);
auto emitter_fused =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto triton_fusion = dynamic_cast<TritonFusion*>(emitter_fused.get());
ASSERT_NE(triton_fusion, nullptr);
EXPECT_EQ(triton_fusion->launch_config(), std::nullopt);
}
}
}
} | 2,134 |
#ifndef XLA_SERVICE_GPU_FUSIONS_IN_PLACE_DYNAMIC_UPDATE_SLICE_H_
#define XLA_SERVICE_GPU_FUSIONS_IN_PLACE_DYNAMIC_UPDATE_SLICE_H_
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/IR/IRBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/ir_array.h"
namespace xla {
namespace gpu {
class InPlaceDynamicUpdateSliceFusion : public KernelFusionEmitterBase {
public:
explicit InPlaceDynamicUpdateSliceFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis),
dus_ops_(
GetOutputDefiningDynamicUpdateSlices(analysis.fusion_roots())) {}
LaunchDimensions launch_dimensions() const override;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override {
return std::nullopt;
}
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* indexing_context) const override;
protected:
absl::Status EmitKernel(IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs,
llvm::IRBuilder<>* builder) const override;
const HloFusionAnalysis& analysis_;
std::vector<const HloInstruction*> dus_ops_;
};
}
}
#endif
#include "xla/service/gpu/fusions/in_place_dynamic_update_slice.h"
#include <optional>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/IRBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/elemental_ir_emitter.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/dynamic_update_slice_util.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
namespace xla {
namespace gpu {
namespace {
constexpr int kDUSUpdateIndex = 1;
}
LaunchDimensions InPlaceDynamicUpdateSliceFusion::launch_dimensions() const {
const auto& update_shape = dus_ops_.front()->operand(1)->shape();
return CalculateLaunchDimensions(update_shape, analysis_.device_info());
}
std::optional<IndexingMap>
InPlaceDynamicUpdateSliceFusion::ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* mlir_context) const {
if (hero_operand_index != kDUSUpdateIndex) {
return std::nullopt;
}
auto launch_dims = launch_dimensions();
const auto& update_shape =
dus_ops_.front()->operand(kDUSUpdateIndex)->shape();
return GetDefaultThreadIdIndexingMap(launch_dims, 1,
update_shape, mlir_context);
}
absl::Status InPlaceDynamicUpdateSliceFusion::EmitKernel(
IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const {
for (auto [op, output] : llvm::zip(dus_ops_, outputs)) {
output = output.CastToShape(op->shape(), builder);
}
auto* fused_computation = fusion.fused_instructions_computation();
GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder);
FusedIrEmitter fused_emitter(elemental_emitter);
for (auto [index, input] : llvm::enumerate(inputs)) {
auto fused_operand = fused_computation->parameter_instruction(index);
fused_emitter.BindGenerator(
*fused_operand, [input = input, builder,
fused_operand](const llvm_ir::IrArray::Index& index) {
return input.EmitReadArrayElement(index, builder,
fused_operand->name());
});
}
std::vector<std::pair<const HloInstruction*, const llvm_ir::IrArray>>
dus_and_output_array;
dus_and_output_array.reserve(dus_ops_.size());
for (auto [op, output] : llvm::zip(dus_ops_, outputs)) {
dus_and_output_array.push_back(std::make_pair(op, output));
}
return llvm_ir::EmitParallelFusedDynamicUpdateSliceInPlace(
fused_computation, dus_and_output_array, &fused_emitter, launch_dims,
builder);
}
}
} | #include "xla/service/gpu/fusions/in_place_dynamic_update_slice.h"
#include <optional>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class InPlaceDynamicUpdateSliceFusionTest : public HloTestBase {
public:
void SetUp() override {
HloTestBase::SetUp();
printer_ =
AffineMapPrinter({"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"},
{"chunk_id", "unroll_id"});
}
protected:
AffineMapPrinter printer_;
mlir::MLIRContext mlir_context_;
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
};
TEST_F(InPlaceDynamicUpdateSliceFusionTest, ThreadIndexing) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule module
fused_computation {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] parameter(2)
i1 = s32[] parameter(3)
ROOT updated = f32[20,30] dynamic-update-slice(in, updates, i0, i1)
}
ENTRY entry {
in = f32[20,30] parameter(0)
updates = f32[5,6] parameter(1)
i0 = s32[] constant(2)
i1 = s32[] constant(3)
ROOT fusion = f32[20,30] fusion(in, updates, i0, i1), kind=kLoop, calls=fused_computation
}
)"));
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused = AnalyzeFusion(*root, device_info_);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto fusion = dynamic_cast<InPlaceDynamicUpdateSliceFusion*>(emitter.get());
ASSERT_NE(fusion, nullptr);
auto thread_id_update_indexing = fusion->ComputeThreadIdToInputIndexing(
0, 1, &mlir_context_);
EXPECT_THAT(thread_id_update_indexing->ToString(printer_),
MatchIndexingString(R"(
(th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (
th_x floordiv 6, th_x mod 6)
domain:
th_x in [0, 30)
th_y in [0, 1)
th_z in [0, 1)
bl_x in [0, 1)
bl_y in [0, 1)
bl_z in [0, 1)
chunk_id in [0, 1)
unroll_id in [0, 1)
)"));
auto thread_id_dst_indexing = fusion->ComputeThreadIdToInputIndexing(
0, 0, &mlir_context_);
EXPECT_THAT(thread_id_dst_indexing, ::testing::Eq(std::nullopt));
}
TEST_F(InPlaceDynamicUpdateSliceFusionTest, ProduceConsumerFusion) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
fused_computation.1 {
param_0 = bf16[1,2,5,1,2] parameter(0)
bitcast = bf16[1,5,1,2,2] bitcast(param_0)
param_1 = bf16[1,1,1,2,2] parameter(1)
param_2 = s32[] parameter(2)
param_3 = s32[] parameter(3)
ROOT dynamic-update-slice = bf16[1,5,1,2,2] dynamic-update-slice(bitcast, param_1, param_2, param_3, param_2, param_2, param_2)
}
ENTRY entry_computation {
param_0.2 = bf16[1,2,5,1,2] parameter(3)
param_1.2 = bf16[1,1,1,2,2] parameter(0)
param_2.2 = s32[] parameter(1)
param_3.2 = s32[] parameter(2)
fusion = bf16[1,5,1,2,2] fusion(param_0.2, param_1.2, param_2.2, param_3.2), kind=kLoop, calls=fused_computation.1
ROOT bitcast.1 = bf16[1,2,5,1,2] bitcast(fusion)
}
)"));
auto* root = module->entry_computation()->root_instruction();
auto analysis_fused =
AnalyzeProducerConsumerFusion(*root->operand(0), *root, device_info_);
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused});
auto fusion = dynamic_cast<InPlaceDynamicUpdateSliceFusion*>(emitter.get());
ASSERT_NE(fusion, nullptr);
EXPECT_EQ(fusion->launch_dimensions().launch_bound(), 4 );
}
}
}
} | 2,135 |
#ifndef XLA_SERVICE_GPU_FUSIONS_REDUCTION_H_
#define XLA_SERVICE_GPU_FUSIONS_REDUCTION_H_
#include <optional>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "llvm/IR/IRBuilder.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/reduction_base.h"
#include "xla/service/gpu/fusions/tiling_util.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
class ReductionInfo {
public:
static ReductionInfo Create(const HloFusionAnalysis& analysis);
const Tiling& GetTiling() const { return tiling_; }
const ReductionGroups& GetGroups() const { return groups_; }
Shape GetReduceOperandShape() const {
return first_reduce_->operand(0)->shape();
}
bool IsRowReduction() const { return is_row_reduction_; }
bool IsRaceFree() const { return is_race_free_; }
int GetRowsPerWarp() const;
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const;
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const;
LaunchDimensions launch_dimensions() const;
private:
ReductionInfo(const HloFusionAnalysis& analysis, Tiling tiling,
bool is_row_reduction, bool is_race_free,
ReductionGroups groups, const HloInstruction* first_reduce)
: analysis_(analysis),
tiling_(tiling),
is_row_reduction_(is_row_reduction),
is_race_free_(is_race_free),
groups_(std::move(groups)),
first_reduce_(first_reduce) {}
const HloFusionAnalysis& analysis_;
Tiling tiling_;
bool is_row_reduction_;
bool is_race_free_;
ReductionGroups groups_;
const HloInstruction* first_reduce_;
};
class ReductionFusion : public KernelFusionEmitterBase {
public:
explicit ReductionFusion(const HloFusionAnalysis& analysis)
: analysis_(analysis), reduction_info_(ReductionInfo::Create(analysis)) {}
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t root_index, mlir::MLIRContext* ctx) const override {
return reduction_info_.ComputeThreadIdToOutputIndexing(root_index, ctx);
}
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t root_index, int64_t hero_operand_index,
mlir::MLIRContext* ctx) const override {
return reduction_info_.ComputeThreadIdToInputIndexing(
root_index, hero_operand_index, ctx);
}
LaunchDimensions launch_dimensions() const override {
return reduction_info_.launch_dimensions();
}
const ReductionInfo& reduction_info() const { return reduction_info_; }
protected:
absl::StatusOr<FusionEmissionResult> EmitInitializers(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const override;
absl::Status EmitKernel(IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs,
llvm::IRBuilder<>* builder) const override;
private:
const HloFusionAnalysis& analysis_;
ReductionInfo reduction_info_;
};
}
}
#endif
#include "xla/service/gpu/fusions/reduction.h"
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/container/node_hash_map.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/elemental_ir_emitter.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/reduction_base.h"
#include "xla/service/gpu/fusions/thunk_util.h"
#include "xla/service/gpu/fusions/tiling_util.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/ir_emitter_nested.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/parallel_loop_emitter.h"
#include "xla/service/gpu/reduction_utils.h"
#include "xla/service/gpu/runtime/kernel_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/llvm_ir/fused_ir_emitter.h"
#include "xla/service/llvm_ir/ir_array.h"
#include "xla/service/llvm_ir/kernel_support_library.h"
#include "xla/service/llvm_ir/llvm_loop.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/service/llvm_ir/loop_emitter.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using TypedPointer = std::pair<llvm::Value* const, llvm::Type* const>;
using ReductionOutputMap =
ConstHloInstructionMap<absl::Span<llvm_ir::IrArray const>>;
using ExtraOutputGensMap = ConstHloInstructionMap<llvm_ir::ElementGenerator>;
int GetNumOutputs(const Shape& shape) {
if (shape.IsTuple()) {
return shape.tuple_shapes_size();
}
return 1;
}
const Shape& OutputShape(const Shape& output_shape, int output_index) {
CHECK(output_index == 0 || output_shape.IsTuple());
return output_shape.IsTuple() ? output_shape.tuple_shapes(output_index)
: output_shape;
}
llvm::Type* GetIndexType(const HloFusionInstruction& fusion,
const Tiling& tiling, llvm::IRBuilder<>* builder) {
return GetIndexTypeForKernel(
&fusion, tiling.GetNumThreadsPerBlock() * tiling.GetNumBlocks(), builder);
}
llvm::Value* CastSharedToGlobal(llvm::IRBuilder<>* builder, llvm::Value* input,
llvm::Type* element_type, llvm::Twine name) {
return builder->CreateAddrSpaceCast(
input,
llvm::PointerType::get(element_type,
0),
name);
}
class ReductionEmitter {
public:
ReductionEmitter(const HloFusionAnalysis& analysis,
const ReductionInfo& reduction_codegen_info,
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion,
llvm::IRBuilder<>* builder)
: builder_(builder),
elemental_emitter_(ir_emitter_context, builder_),
analysis_(analysis),
reduction_codegen_info_(reduction_codegen_info),
ir_emitter_context_(ir_emitter_context),
fusion_(fusion),
index_ty_(GetIndexType(fusion, reduction_codegen_info.GetTiling(),
elemental_emitter_.builder())) {
for (auto hero : analysis.fusion_heroes()) {
if (hero.opcode() == HloOpcode::kReduce) {
for (int i = 0; i < hero.instruction().operand_count() / 2; ++i) {
CHECK(LayoutUtil::IsMonotonicWithDim0Major(
hero.instruction().operand(i)->shape().layout()))
<< "reduction-layout-normalizer must run before code generation";
}
}
}
}
absl::StatusOr<FusionEmissionResult> EmitInitializers();
absl::Status EmitKernel(const LaunchDimensions& launch_dims,
std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs);
private:
friend class ReductionGroupEmitter;
absl::StatusOr<std::unique_ptr<Thunk>> BuildKernelThunkForFusion(
const LaunchDimensions& launch_dimensions,
absl::string_view discriminator,
std::function<absl::Status(std::vector<llvm_ir::IrArray>,
std::vector<llvm_ir::IrArray>)>
kernel_builder_fn);
absl::StatusOr<std::unique_ptr<Thunk>> BuildFusedInitializerThunk(
const HloInstruction* fusion_root, BufferAllocation::Slice dest_slice,
int output_index);
absl::Status EmitIRForReduction(
absl::Span<const HloInstruction* const> instr_index_group,
FusedIrEmitter& fused_emitter, const ReductionOutputMap& result_ir_arrays,
const Shape& input_shape);
void MaybeEmitFenceForAMDGPU();
void EmitSyncThreads();
int ReducedDimensionSize() const {
return reduction_codegen_info_.GetTiling().GetShape()[2];
}
llvm::IRBuilder<>* builder_;
GpuElementalIrEmitter elemental_emitter_;
const HloFusionAnalysis& analysis_;
const ReductionInfo& reduction_codegen_info_;
IrEmitterContext& ir_emitter_context_;
const HloFusionInstruction& fusion_;
llvm::Type* index_ty_;
};
class ReductionEmitter;
class ReductionGroupEmitter {
public:
struct ReductionCalculationState {
std::optional<llvm_ir::SharedMemoryTile> shared_cache;
llvm::Value* initial_value;
llvm::AllocaInst* partial_result_address;
llvm::AllocaInst* input_address;
llvm_ir::ElementGenerator input_gen;
};
ReductionGroupEmitter(
ReductionEmitter& reduction_emitter,
absl::Span<const HloReduceInstruction* const> reduce_instr_index_group,
const ReductionOutputMap& result_ir_arrays,
FusedIrEmitter& fused_emitter);
const ReductionCalculationState& GetCalculationStateFor(
const HloInstruction* instruction, int operand_idx) const {
const ReductionOpState& op_state = state_.at(instruction);
CHECK_LT(operand_idx, op_state.size());
return op_state[operand_idx];
}
void SetCalculationStateFor(
const ReductionCalculationState& calculation_state,
const HloInstruction* instruction, int operand_idx) {
ReductionOpState& op_state = state_[instruction];
CHECK_EQ(operand_idx, op_state.size());
op_state.push_back(calculation_state);
}
void EmitReductionOutputForRowReduction(
const TilingKernelInfo& tiling_kernel_info,
const HloReduceInstruction* reduction,
const std::vector<const HloInstruction*>& roots) const;
void EmitReductionOutputForColumnReduction(
const TilingKernelInfo& tiling_kernel_info,
const HloReduceInstruction* reduction,
const std::vector<const HloInstruction*>& roots) const;
void EmitFullWarpShuffleDownLoopForReduce(
const HloComputation* reducer,
absl::Span<TypedPointer const> partial_result_addresses,
int threads_per_block, int num_results_per_warp) const;
void WriteReductionOutput(const TilingKernelInfo& tiling_kernel_info,
const HloReduceInstruction* reduction,
const std::vector<const HloInstruction*>& roots,
absl::Span<TypedPointer const> values) const;
llvm_ir::IrArray::Index GetOutputIndexForReduction(
const TilingKernelInfo& tiling_kernel_info,
const HloReduceInstruction* reduction, const HloInstruction* root,
int output_idx) const;
void GenerateElementForReducer(const HloReduceInstruction* reduction,
const llvm_ir::IrArray::Index& index) const;
absl::Status EmitExtraOutputsForReduce(
const Shape& reduction_operand_shape,
const llvm_ir::IrArray::Index& index,
const ExtraOutputGensMap& extra_output_gens);
private:
ReductionEmitter& reduction_emitter_;
const ReductionOutputMap& result_ir_arrays_;
using ReductionOpState = absl::InlinedVector<ReductionCalculationState, 2>;
absl::flat_hash_map<const HloInstruction*, ReductionOpState> state_;
};
ReductionGroupEmitter::ReductionGroupEmitter(
ReductionEmitter& reduction_emitter,
absl::Span<const HloReduceInstruction* const> reduce_instr_index_group,
const ReductionOutputMap& result_ir_arrays, FusedIrEmitter& fused_emitter)
: reduction_emitter_(reduction_emitter),
result_ir_arrays_(result_ir_arrays) {
const ReductionInfo& reduction_info =
reduction_emitter_.reduction_codegen_info_;
VLOG(10) << "Emit prologue for reduction: "
<< reduction_emitter_.fusion_.ToString();
auto* builder = reduction_emitter_.builder_;
for (const HloReduceInstruction* reduce_hlo : reduce_instr_index_group) {
for (int op_result_idx = 0;
op_result_idx < GetNumOutputs(reduce_hlo->shape()); op_result_idx++) {
Shape result_shape = OutputShape(reduce_hlo->shape(), op_result_idx);
llvm::Type* element_type = llvm_ir::PrimitiveTypeToIrType(
result_shape.element_type(), builder->GetInsertBlock()->getModule());
llvm::AllocaInst* reduction_input_address =
llvm_ir::EmitAllocaAtFunctionEntry(
element_type, "reduction_input_address", builder);
llvm::AllocaInst* result_address = llvm_ir::EmitAllocaAtFunctionEntry(
element_type, "partial_reduction_result", builder);
const HloInstruction* init_value =
reduce_hlo->init_values()[op_result_idx];
llvm::Value* init_ir_value = (*fused_emitter.GetGenerator(
*init_value))(llvm_ir::IrArray::Index(builder->getInt32Ty()))
.value();
builder->CreateStore(init_ir_value, result_address);
const Tiling& tiling = reduction_info.GetTiling();
auto shared_cache = [&]() -> std::optional<llvm_ir::SharedMemoryTile> {
auto* module = reduction_emitter.ir_emitter_context_.llvm_module();
if (reduction_info.IsRowReduction()) {
if (RowReductionGetRowsPerWarp(
reduction_emitter_.ReducedDimensionSize()) > 1) {
return std::nullopt;
}
auto block_size = tiling.GetThreadsPerBlock();
CHECK_EQ(block_size[ReductionDimensions::kRowMinorReducedDimension] %
WarpSize(),
0);
return llvm_ir::AllocateSharedMemoryTile(
module, element_type,
{block_size[ReductionDimensions::kRowKeptDimension],
block_size[ReductionDimensions::kRowMinorReducedDimension] /
WarpSize()},
"shared_cache");
}
const auto& num_threads = tiling.GetThreadsPerBlock();
int n = num_threads[ReductionDimensions::kColReducedDimension];
CHECK_EQ(n, num_threads[ReductionDimensions::kColMinorKeptDimension]);
return llvm_ir::AllocateSharedMemoryTile(module, element_type,
{n, n + 1}, "shared_cache");
}();
llvm_ir::ElementGenerator input_gen =
*fused_emitter.GetGenerator(*reduce_hlo->inputs()[op_result_idx]);
SetCalculationStateFor({shared_cache, init_ir_value, result_address,
reduction_input_address, input_gen},
reduce_hlo, op_result_idx);
}
}
}
void ReductionEmitter::MaybeEmitFenceForAMDGPU() {
auto* module = builder_->GetInsertBlock()->getModule();
if (IsAMDGPU(module) &&
ir_emitter_context_.rocm_compute_capability().fence_before_barrier()) {
builder_->CreateFence(
llvm::AtomicOrdering::SequentiallyConsistent,
builder_->getContext().getOrInsertSyncScopeID("workgroup"));
}
}
void ReductionEmitter::EmitSyncThreads() {
MaybeEmitFenceForAMDGPU();
EmitCallToTargetIntrinsic(TargetIntrinsicID::kBarrierId, {}, {}, builder_);
}
absl::StatusOr<std::unique_ptr<Thunk>>
ReductionEmitter::BuildKernelThunkForFusion(
const LaunchDimensions& launch_dimensions, absl::string_view discriminator,
std::function<absl::Status(std::vector<llvm_ir::IrArray>,
std::vector<llvm_ir::IrArray>)>
kernel_builder_fn) {
const HloComputation* fused_computation =
fusion_.fused_instructions_computation();
std::string suggested_kernel_name = std::string(fusion_.name());
TF_ASSIGN_OR_RETURN(auto kernel_arguments,
KernelArguments::Create(
ir_emitter_context_.buffer_assignment(), &fusion_));
auto [status_or_entry, cached] =
ir_emitter_context_.kernel_cache().GetWithStatus(
fused_computation, kernel_arguments.args(), discriminator,
[&]() -> absl::StatusOr<KernelReuseCache::Entry> {
llvm::Function* kernel;
std::vector<llvm_ir::IrArray> input_arrays;
std::vector<llvm_ir::IrArray> output_arrays;
TF_ASSIGN_OR_RETURN(
std::tie(kernel, input_arrays, output_arrays),
BuildKernelPrototype(ir_emitter_context_, suggested_kernel_name,
kernel_arguments.args(),
fusion_.operand_count(), launch_dimensions,
builder_));
TF_RETURN_IF_ERROR(kernel_builder_fn(input_arrays, output_arrays));
return {{kernel->getName().str(), launch_dimensions,
std::nullopt,
0}};
});
TF_ASSIGN_OR_RETURN(const KernelReuseCache::Entry* entry, status_or_entry);
if (cached) {
VLOG(3) << "Reuse: " << suggested_kernel_name << " -> "
<< entry->kernel_name;
}
return std::make_unique<KernelThunk>(
&fusion_, entry->kernel_name, kernel_arguments.args(), launch_dimensions,
entry->cluster_dim, entry->shmem_bytes);
}
absl::Status ReductionGroupEmitter::EmitExtraOutputsForReduce(
const Shape& reduction_operand_shape, const llvm_ir::IrArray::Index& index,
const ExtraOutputGensMap& extra_output_gens) {
if (extra_output_gens.empty()) {
return absl::OkStatus();
}
auto* builder = reduction_emitter_.builder_;
std::vector<std::pair<const HloInstruction*, llvm::Value*>>
extra_output_ir_values;
extra_output_ir_values.reserve(extra_output_gens.size());
auto get_index = [&](const HloInstruction* instr) {
const Shape& s = instr->shape();
return ShapeUtil::EqualIgnoringElementType(reduction_operand_shape, s)
? index
: index.SourceIndexOfBitcast(reduction_operand_shape, s,
builder);
};
for (const auto& [instr, generator] : extra_output_gens) {
TF_ASSIGN_OR_RETURN(llvm::Value* const extra_output_ir_value,
generator(get_index(instr)));
extra_output_ir_values.emplace_back(instr, extra_output_ir_value);
}
for (const auto& [instr, generator] : extra_output_ir_values) {
absl::Span<llvm_ir::IrArray const> result_ir = result_ir_arrays_.at(instr);
CHECK_EQ(result_ir.size(), 1);
result_ir[0].EmitWriteArrayElement(get_index(instr), generator, builder);
}
return absl::OkStatus();
}
absl::StatusOr<std::unique_ptr<Thunk>>
ReductionEmitter::BuildFusedInitializerThunk(const HloInstruction* fusion_root,
BufferAllocation::Slice dest_slice,
int output_index) {
const HloReduceInstruction* reduce =
DynCast<HloReduceInstruction>(fusion_root);
TF_RET_CHECK(reduce);
const HloInstruction* init_value = reduce->init_values()[0];
TF_ASSIGN_OR_RETURN(
std::optional<std::unique_ptr<Thunk>> constant_init_thunk,
BuildConstantInitializerThunk(ir_emitter_context_, fusion_root,
init_value, dest_slice));
if (constant_init_thunk) {
return *std::move(constant_init_thunk);
}
const Shape& dest_shape = fusion_root->shape();
LaunchDimensions launch_dimensions = CalculateLaunchDimensions(
dest_shape, ir_emitter_context_.gpu_device_info());
const HloComputation* fused_computation =
fusion_.fused_instructions_computation();
auto builder_fn = [&](std::vector<llvm_ir::IrArray> inputs,
std::vector<llvm_ir::IrArray> outputs) -> absl::Status {
FusedIrEmitter fused_emitter(elemental_emitter_);
for (int i = 0; i < fused_computation->num_parameters(); i++) {
fused_emitter.BindGenerator(
*fused_computation->parameter_instruction(i),
[builder = builder_,
input = inputs[i]](llvm_ir::IrArray::Index index) {
return input.EmitReadArrayElement(index, builder);
});
}
HloInstruction* instr = fused_computation->root_instruction();
if (instr->opcode() == HloOpcode::kTuple) {
instr = instr->mutable_operand(output_index);
} else {
CHECK_EQ(0, output_index);
}
TF_RET_CHECK(instr->shape().IsArray());
TF_ASSIGN_OR_RETURN(auto generator,
fused_emitter.GetGenerator(*instr->operand(1)));
TF_RETURN_IF_ERROR(ParallelLoopEmitter(generator, {outputs[output_index]},
launch_dimensions, builder_)
.EmitLoop(fusion_.name()));
return absl::OkStatus();
};
return BuildKernelThunkForFusion(launch_dimensions,
absl::StrCat("init_", output_index),
builder_fn);
}
void ReductionGroupEmitter::EmitFullWarpShuffleDownLoopForReduce(
const HloComputation* reducer,
absl::Span<TypedPointer const> partial_result_addresses,
int threads_per_block, int num_results_per_warp) const {
CHECK_EQ(threads_per_block % 32, 0);
CHECK_EQ(WarpSize() % num_results_per_warp, 0);
auto* builder = reduction_emitter_.builder_;
for (int distance = 16 / num_results_per_warp; distance >= 1; distance /= 2) {
absl::InlinedVector<llvm::Value*, 2> reduction_params;
for (auto acc : partial_result_addresses) {
reduction_params.push_back(acc.first);
}
for (auto [partial_result_address, element_type] :
partial_result_addresses) {
int bit_width = llvm_ir::GetSizeInBits(element_type);
llvm::Value* result_from_other_lane = llvm_ir::EmitAllocaAtFunctionEntry(
element_type, "result_from_other_lane", builder);
reduction_params.push_back(result_from_other_lane);
llvm::Type* shuffled_value_type = element_type->isStructTy()
? builder->getIntNTy(bit_width)
: element_type;
llvm::Value* partial_result =
builder->CreateLoad(shuffled_value_type, partial_result_address,
"partial_reduction_result");
builder->CreateStore(
EmitFullWarpShuffleDown(
partial_result, builder->getInt32(distance), builder,
reduction_emitter_.ir_emitter_context_.gpu_device_info()),
result_from_other_lane);
}
absl::StatusOr<std::vector<llvm::Value*>> returned_scalars =
CallNestedComputationWithScalarAddrs(
builder, reduction_emitter_.ir_emitter_context_, *reducer,
reduction_params);
TF_CHECK_OK(returned_scalars.status());
for (int i = 0; i < returned_scalars->size(); i++) {
builder->CreateStore(returned_scalars->at(i),
partial_result_addresses[i].first);
}
}
}
llvm_ir::IrArray::Index ReductionGroupEmitter::GetOutputIndexForReduction(
const TilingKernelInfo& tiling_kernel_info,
const HloReduceInstruction* reduction, const HloInstruction* root,
int output_idx) const {
auto* builder = reduction_emitter_.builder_;
auto* index_ty = reduction_emitter_.index_ty_;
auto projected_index = [&]() -> llvm_ir::IrArray::Index {
const auto& reduction_info = reduction_emitter_.reduction_codegen_info_;
const auto& offset = tiling_kernel_info.tile_origin;
const auto& shape = reduction_info.GetTiling().GetXlaShape();
const auto& thread_ids = tiling_kernel_info.thread_id_info.thread_ids;
if (reduction_info.IsRowReduction()) {
constexpr int kDim = ReductionDimensions::kRowKeptDimension;
return {{builder->CreateAdd(offset[kDim], thread_ids[kDim])},
{shape.dimensions(kDim)},
index_ty};
}
auto* major_idx = offset[ReductionDimensions::kColMajorKeptDimension];
auto* minor_idx = builder->CreateAdd(
offset[ReductionDimensions::kColMinorKeptDimension],
thread_ids[ReductionDimensions::kColReducedDimension]);
return {{major_idx, minor_idx},
ShapeUtil::DeleteDimension(
ReductionDimensions::kColReducedDimension, shape),
index_ty};
}();
auto physical_shape = ShapeUtil::DeleteDimensions(
reduction->dimensions(), reduction->operand(output_idx)->shape());
auto physical_index =
projected_index.SourceIndexOfBitcast(physical_shape, builder);
return llvm_ir::IrArray::Index(physical_index.multidim(),
OutputShape(reduction->shape(), output_idx),
index_ty)
.SourceIndexOfBitcast(OutputShape(root->shape(), output_idx), builder);
}
void ReductionGroupEmitter::WriteReductionOutput(
const TilingKernelInfo& tiling_kernel_info,
const HloReduceInstruction* reduction,
const std::vector<const HloInstruction*>& roots,
const absl::Span<TypedPointer const> values) const {
auto* builder = reduction_emitter_.builder_;
const auto& reduction_info = reduction_emitter_.reduction_codegen_info_;
const HloComputation* reducer = reduction->to_apply();
for (const auto& [oidx, typed_ptr] : llvm::enumerate(values)) {
auto [output_ptr, type] = typed_ptr;
for (auto root : roots) {
llvm_ir::IrArray::Index output_index =
GetOutputIndexForReduction(tiling_kernel_info, reduction, root, oidx);
llvm::Value* output_address =
result_ir_arrays_.at(root)[oidx].EmitArrayElementAddress( | #include "xla/service/gpu/fusions/reduction.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::SizeIs;
class ReductionTest : public HloTestBase {
protected:
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
mlir::MLIRContext mlir_context_;
};
TEST_F(ReductionTest, ThreadIndexingRowReduction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,512] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,512] parameter(0)
ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> (
(d3 * 8 + d0 floordiv 32) floordiv 64,
(d3 * 8 + d0 floordiv 32) mod 64,
(d0 mod 32 + s2 * 32) * 2 + s3
)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 800)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 8)
s3 in [0, 2)
d0 mod 32 + s2 * 32 in [0, 256)
d3 * 8 + d0 floordiv 32 in [0, 6400)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (
(d3 * 8 + d0 floordiv 32) floordiv 64,
(d3 * 8 + d0 floordiv 32) mod 64
)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 800)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 32 in [0, 1)
d3 * 8 + d0 floordiv 32 in [0, 6400)
)"));
}
TEST_F(ReductionTest, ThreadIndexingMultiRowReduction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,4] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,4] parameter(0)
ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2] -> (
d3 + (d0 floordiv 4) floordiv 64,
(d0 floordiv 4) mod 64,
d0 mod 4
)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 100)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 1)
d0 mod 4 in [0, 4)
d3 * 64 + d0 floordiv 4 in [0, 6400)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (
d3 + (d0 floordiv 4) floordiv 64,
(d0 floordiv 4) mod 64
)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 100)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 4 in [0, 1)
d3 * 64 + d0 floordiv 4 in [0, 6400)
)"));
}
TEST_F(ReductionTest, ThreadIndexingColumnReduction) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,32] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,32] reduce(%input, %c0), dimensions={1}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,32] parameter(0)
ROOT %fusion = f32[100,32] fusion(%input), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2] -> (
d3,
d0 floordiv 32 + s1 * 32,
d0 mod 32
)
domain:
d0 in [0, 1024) d1 in [0, 1) d2 in [0, 1)
d3 in [0, 100) d4 in [0, 1) d5 in [0, 1)
s0 in [0, 1) s1 in [0, 128) s2 in [0, 1)
d0 floordiv 32 + s1 * 32 in [0, 64)
d0 mod 32 in [0, 32)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (
d3,
d0 floordiv 32
)
domain:
d0 in [0, 1024) d1 in [0, 1) d2 in [0, 1)
d3 in [0, 100) d4 in [0, 1) d5 in [0, 1)
d0 mod 32 in [0, 1)
)"));
}
TEST_F(ReductionTest, ThreadIndexingOutputLayout) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,512] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64]{0,1} reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,512] parameter(0)
ROOT %fusion = f32[100,64]{0,1} fusion(%input), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5) -> (
(d3 * 8 + d0 floordiv 32) floordiv 64,
(d3 * 8 + d0 floordiv 32) mod 64
)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 800)
d4 in [0, 1)
d5 in [0, 1)
d0 mod 32 in [0, 1)
d3 * 8 + d0 floordiv 32 in [0, 6400)
)"));
}
TEST_F(ReductionTest, ThreadIndexingSideOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,512] parameter(0)
%c0 = f32[] constant(0)
%log = f32[100,64,512] log(%input)
%reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
ROOT tuple = (f32[100,64], f32[100,64,512]) tuple(%reduce, %log)
}
ENTRY entry {
%input = f32[100,64,512] parameter(0)
ROOT %fusion = (f32[100,64], f32[100,64,512]) fusion(%input), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
constexpr char kExpectedIndexing[] = R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> (
d3 floordiv 8,
(d3 mod 8) * 8 + d0 floordiv 32,
(d0 mod 32) * 2 + s2 * 64 + s3
)
domain:
d0 in [0, 256)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 800)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 8)
s3 in [0, 2)
)";
auto input_indexing =
fusion.ComputeThreadIdToInputIndexing(1, 0, &mlir_context_);
input_indexing->Simplify();
EXPECT_THAT(input_indexing->ToString(),
MatchIndexingString(kExpectedIndexing));
auto output_indexing =
fusion.ComputeThreadIdToOutputIndexing(1, &mlir_context_);
output_indexing->Simplify();
EXPECT_THAT(output_indexing->ToString(),
MatchIndexingString(kExpectedIndexing));
}
TEST_F(ReductionTest, ThreadIndexingVectorized) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[1024, 8192] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[1024]{0} reduce(f32[1024, 8192] %input, f32[] %c0),
dimensions={1}, to_apply=add
}
ENTRY entry {
%input = f32[1024, 8192] parameter(0)
ROOT %fusion = f32[1024] fusion(%input), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2, s3] -> (
d3,
(d0 + s2 * 512) * 2 + s3
)
domain:
d0 in [0, 512)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1024)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 8)
s3 in [0, 2)
d0 + s2 * 512 in [0, 4096)
)"));
}
TEST_F(ReductionTest, ThreadIndexingBroadcastSideOutput) {
auto module = ParseAndReturnVerifiedModule(R"(
%add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
%fusion {
%p0 = f32[6,6] parameter(0)
%c0 = f32[] constant(0)
%reduce = f32[] reduce(%p0, %c0), dimensions={0,1}, to_apply=%add
%broadcast = f32[6,6] broadcast(%reduce), dimensions={}
ROOT %tuple = (f32[6,6], f32[]) tuple(%broadcast, %reduce)
}
ENTRY main {
%p0 = f32[6,6] parameter(0)
ROOT %fusion = (f32[6,6], f32[]) fusion(%p0), kind=kInput, calls=%fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
EXPECT_THAT(
fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2] -> (
(d0 + s2 * 32) floordiv 6,
(d0 + s2 * 32) mod 6
)
domain:
d0 in [0, 32)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 16)
d0 + s2 * 32 in [0, 36)
)"));
EXPECT_THAT(
fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)->ToString(),
MatchIndexingString(R"(
(d0, d1, d2, d3, d4, d5)[s0, s1, s2] -> ()
domain:
d0 in [0, 32)
d1 in [0, 1)
d2 in [0, 1)
d3 in [0, 1)
d4 in [0, 1)
d5 in [0, 1)
s0 in [0, 1)
s1 in [0, 1)
s2 in [0, 16)
(d0 + s2 * 32) mod 6 in [0, 6)
d0 + s2 * 32 in [0, 36)
)"));
}
TEST_F(ReductionTest, TwoGroups) {
auto module = ParseAndReturnVerifiedModule(R"(
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%p0 = f32[2] parameter(0)
%p1 = f32[2] parameter(1)
%c0 = f32[] constant(-inf)
%r0 = f32[] reduce(%p0, %c0), dimensions={0}, to_apply=add
%c1 = f32[] constant(inf)
%r1 = f32[] reduce(%p1, %c1), dimensions={0}, to_apply=add
ROOT %tuple = (f32[], f32[]) tuple(%r0, %r1)
}
ENTRY entry {
%p0 = f32[2] parameter(0)
%p1 = f32[2] parameter(1)
ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
EXPECT_THAT(fusion.reduction_info().GetGroups().grouped_roots,
ElementsAre(ElementsAre(&analysis.fusion_root(0).instruction()),
ElementsAre(&analysis.fusion_root(1).instruction())));
}
TEST_F(ReductionTest, OneGroup) {
auto module = ParseAndReturnVerifiedModule(R"(
%add {
%p0 = c128[] parameter(0)
%p1 = c128[] parameter(1)
ROOT %add.35 = c128[] add(c128[] %p0, c128[] %p1)
}
%fusion {
%p0 = c128[1,2] parameter(0)
%c0 = c128[] constant((0, 0))
%reduce = c128[] reduce(%p0, %c0), dimensions={0,1}, to_apply=%add
%real = f64[] real(c128[] %reduce)
%imag = f64[] imag(c128[] %reduce)
%negate = f64[] negate(f64[] %imag)
ROOT %tuple.29 = (f64[], f64[]) tuple(f64[] %real, f64[] %negate)
}
ENTRY entry {
%p0 = c128[1,2] parameter(0)
ROOT %fusion = (f64[], f64[]) fusion(%p0), kind=kInput, calls=fusion
})")
.value();
auto* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
ReductionFusion fusion(analysis);
EXPECT_THAT(fusion.reduction_info().GetGroups().grouped_roots, SizeIs(2));
}
}
}
} | 2,136 |
#ifndef XLA_SERVICE_GPU_FUSIONS_MLIR_ELEMENTAL_HLO_TO_MLIR_H_
#define XLA_SERVICE_GPU_FUSIONS_MLIR_ELEMENTAL_HLO_TO_MLIR_H_
#include <functional>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
namespace mlir_converter {
using OperandProvider =
std::function<absl::StatusOr<llvm::SmallVector<mlir::Value>>(
const HloInstruction* instr, int index, mlir::ValueRange indices)>;
llvm::SmallVector<mlir::Value> ProvideParameter(
const PartitionedComputation& computation, const HloInstruction* instr,
int operand_index, mlir::ValueRange indices,
const CallTargetProvider& call_target_provider, mlir::func::FuncOp this_fn,
mlir::ImplicitLocOpBuilder& builder,
const PartitionedComputation::Subgraph* caller = nullptr);
llvm::SmallVector<mlir::Value> ProvideParameterRange(
const PartitionedComputation& computation, const HloInstruction* instr,
int start, int num, mlir::ValueRange indices,
const CallTargetProvider& call_target_provider, mlir::func::FuncOp this_fn,
mlir::ImplicitLocOpBuilder& builder);
bool IsHloOpSupported(const HloInstruction* instr,
se::CudaComputeCapability compute_capability);
bool IsHloConversionSupported(const HloComputation* computation,
se::GpuComputeCapability compute_capability);
bool IsHloConversionSupported(const HloFusionAdaptor& fusion,
se::GpuComputeCapability compute_capability);
absl::Status SubgraphToMlirFunction(
const PartitionedComputation& computation,
const PartitionedComputation::Subgraph& subgraph, mlir::func::FuncOp& func,
const CallTargetProvider& call_target_provider);
mlir::Value UnrealizedConversionCast(mlir::Type type, mlir::Value value,
mlir::ImplicitLocOpBuilder& b);
mlir::SmallVector<mlir::Value> UnrealizedConversionCast(
mlir::TypeRange types, mlir::ValueRange values,
mlir::ImplicitLocOpBuilder& b);
mlir::Value ApplyAffineExpr(mlir::AffineExpr expr, mlir::ValueRange dims,
mlir::ValueRange symbols,
mlir::ImplicitLocOpBuilder& b);
llvm::SmallVector<mlir::Value> ApplyIndexing(const IndexingMap& map,
mlir::ValueRange dims,
mlir::ValueRange symbols,
mlir::ImplicitLocOpBuilder& b);
mlir::Value CheckConstraints(const IndexingMap& map, mlir::ValueRange dims,
mlir::ValueRange symbols,
mlir::ImplicitLocOpBuilder& b);
llvm::SmallVector<mlir::Value> EmitLoopNest(
mlir::ImplicitLocOpBuilder& b, mlir::ValueRange dim_values,
mlir::ValueRange iter_args_inits, const IndexingMap& indexing_map,
mlir::function_ref<llvm::SmallVector<mlir::Value>(
mlir::ValueRange iter_args, mlir::ValueRange dim_values,
mlir::ValueRange symbol_values)>
create_body,
bool vectorize = false);
absl::StatusOr<llvm::SmallVector<mlir::Value>> EmitLoopNestWithStatus(
mlir::ImplicitLocOpBuilder& b, mlir::ValueRange dim_values,
mlir::ValueRange iter_args_inits, const IndexingMap& indexing_map,
mlir::function_ref<absl::StatusOr<llvm::SmallVector<mlir::Value>>(
mlir::ValueRange iter_args, mlir::ValueRange dim_values,
mlir::ValueRange symbol_values)>
create_body);
mlir::Value ClampIndex(mlir::Value index, bool is_unsigned, int64_t high,
mlir::ImplicitLocOpBuilder& b);
mlir::SmallVector<mlir::Value, 2> InlineBlock(mlir::OpBuilder& builder,
mlir::Block& src_block,
mlir::ValueRange mapped_args);
}
}
}
#endif
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MathExtras.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/LoopUtils.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Complex/IR/Complex.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/BuiltinAttributeInterfaces.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypeInterfaces.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/IRMapping.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "mlir/Support/LLVM.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/mlir/utils/type_util.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/map_mhlo_to_scalar_op.h"
#include "xla/mlir_hlo/mhlo/utils/type_conversion.h"
#include "xla/primitive_util.h"
#include "xla/service/algorithm_util.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/fusions/mlir/type_util.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/translate/hlo_to_mhlo/hlo_utils.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace mlir_converter {
namespace {
using llvm::SmallVector;
using llvm::SmallVectorImpl;
using mlir::Block;
using mlir::FloatType;
using mlir::ImplicitLocOpBuilder;
using mlir::IRMapping;
using mlir::Location;
using mlir::MLIRContext;
using mlir::OpBuilder;
using mlir::Value;
using mlir::ValueRange;
using mlir::arith::AndIOp;
using mlir::arith::CmpIOp;
using mlir::arith::CmpIPredicate;
using mlir::arith::ConstantIndexOp;
using mlir::arith::ConstantOp;
using mlir::scf::IfOp;
using mlir::scf::YieldOp;
namespace arith = ::mlir::arith;
namespace mhlo = ::mlir::mhlo;
namespace scf = ::mlir::scf;
static auto& kUnsupportedOps =
*new absl::flat_hash_set<HloOpcode>{HloOpcode::kAddDependency,
HloOpcode::kAfterAll,
HloOpcode::kAllGather,
HloOpcode::kAllGatherDone,
HloOpcode::kAllGatherStart,
HloOpcode::kAllReduce,
HloOpcode::kAllReduceDone,
HloOpcode::kAllReduceStart,
HloOpcode::kAllToAll,
HloOpcode::kAsyncDone,
HloOpcode::kAsyncStart,
HloOpcode::kAsyncUpdate,
HloOpcode::kBatchNormGrad,
HloOpcode::kBatchNormInference,
HloOpcode::kBatchNormTraining,
HloOpcode::kCholesky,
HloOpcode::kCollectivePermute,
HloOpcode::kCollectivePermuteDone,
HloOpcode::kCollectivePermuteStart,
HloOpcode::kCopyDone,
HloOpcode::kCopyStart,
HloOpcode::kCustomCall,
HloOpcode::kDomain,
HloOpcode::kDynamicReshape,
HloOpcode::kFft,
HloOpcode::kFusion,
HloOpcode::kGetDimensionSize,
HloOpcode::kOptimizationBarrier,
HloOpcode::kInfeed,
HloOpcode::kOutfeed,
HloOpcode::kPartitionId,
HloOpcode::kRecv,
HloOpcode::kRecvDone,
HloOpcode::kReduceScatter,
HloOpcode::kReplicaId,
HloOpcode::kRng,
HloOpcode::kRngBitGenerator,
HloOpcode::kRngGetAndUpdateState,
HloOpcode::kScatter,
HloOpcode::kSelectAndScatter,
HloOpcode::kSend,
HloOpcode::kSendDone,
HloOpcode::kSetDimensionSize,
HloOpcode::kSort,
HloOpcode::kTopK,
HloOpcode::kTriangularSolve,
HloOpcode::kWhile,
HloOpcode::kConditional,
HloOpcode::kStochasticConvert,
HloOpcode::kCall};
bool IsUnsupportedGather(const HloInstruction* instr) {
if (instr->opcode() != HloOpcode::kGather) return false;
auto* gather = Cast<HloGatherInstruction>(instr);
const auto& dims = gather->gather_dimension_numbers();
int indices_rank = gather->operand(1)->shape().rank();
if (dims.index_vector_dim() != 1 || !dims.collapsed_slice_dims().empty() ||
indices_rank == 0 || indices_rank > 2) {
return true;
}
for (auto [index, val] : llvm::enumerate(dims.start_index_map())) {
if (index != val) return true;
}
for (auto [index, val] : llvm::enumerate(dims.offset_dims())) {
if (index + 1 != val) return true;
}
return false;
}
absl::StatusOr<Value> GetSingleOperandValue(
const OperandProvider& operand_provider, const HloInstruction* instr,
int operand_index, ValueRange indices) {
TF_ASSIGN_OR_RETURN(auto operand,
operand_provider(instr, operand_index, indices));
TF_RET_CHECK(operand.size() == 1) << "Expected operand to be a single value.";
return operand.front();
}
absl::StatusOr<SmallVector<Value>> EmitReduce(
const HloInstruction* instr, ValueRange indices,
const OperandProvider& operand_provider,
const CallTargetProvider& call_target_provider, ImplicitLocOpBuilder& b) {
auto* mlir_context = b.getContext();
HloInstructionIndexing indexing =
ComputeOutputToInputIndexing(instr, 0, mlir_context);
const auto& indexing_map = *indexing.indexing_maps[0].begin();
SmallVector<Value> init_values;
for (int i = instr->operand_count() / 2; i < instr->operand_count(); ++i) {
TF_ASSIGN_OR_RETURN(init_values.emplace_back(),
GetSingleOperandValue(operand_provider, instr, i, {}));
}
auto body =
[&](ValueRange iter_args, ValueRange dim_values,
ValueRange symbol_values) -> absl::StatusOr<SmallVector<Value>> {
auto indices = ApplyIndexing(indexing_map, dim_values, symbol_values, b);
SmallVector<Value> args{iter_args};
for (int i = 0; i < instr->operand_count() / 2; ++i) {
TF_ASSIGN_OR_RETURN(
args.emplace_back(),
GetSingleOperandValue(operand_provider, instr, i, indices));
}
auto reducer = call_target_provider(
instr->called_computations().front()->root_instruction());
return b.create<mlir::func::CallOp>(reducer, args).getResults();
};
return EmitLoopNestWithStatus(b, indices, init_values, indexing_map, body);
}
absl::StatusOr<SmallVector<Value>> EmitReduceWindow(
const HloInstruction* instr, mlir::Type result_element_type,
ValueRange indices, const OperandProvider& operand_provider,
const CallTargetProvider& call_target_provider, ImplicitLocOpBuilder& b) {
MLIRContext* mlir_context = b.getContext();
HloInstructionIndexing indexing =
ComputeOutputToInputIndexing(instr, 0, mlir_context);
auto indexing_map = *indexing.indexing_maps[0].begin();
indexing_map.RescaleSymbols();
auto reduce_window = DynCast<HloReduceWindowInstruction>(instr);
CHECK(reduce_window != nullptr);
SmallVector<Value> init_values;
for (auto [index, init_value] :
llvm::enumerate(reduce_window->init_values())) {
TF_ASSIGN_OR_RETURN(
init_values.emplace_back(),
GetSingleOperandValue(operand_provider, instr,
reduce_window->input_count() + index, {}));
}
auto body =
[&](ValueRange iter_args, ValueRange dim_values,
ValueRange symbol_values) -> absl::StatusOr<SmallVector<Value>> {
auto indices = ApplyIndexing(indexing_map, dim_values, symbol_values, b);
SmallVector<Value> args{iter_args};
for (auto [index, input] : llvm::enumerate(reduce_window->inputs())) {
TF_ASSIGN_OR_RETURN(
args.emplace_back(),
GetSingleOperandValue(operand_provider, instr, index, indices));
}
auto reducer = call_target_provider(
instr->called_computations().front()->root_instruction());
return b.create<mlir::func::CallOp>(reducer, args).getResults();
};
return EmitLoopNestWithStatus(b, indices, init_values, indexing_map, body);
}
absl::StatusOr<SmallVector<Value>> EmitConcat(
const HloInstruction* instr, mlir::Type result_element_type,
ValueRange indices, const OperandProvider& operand_provider,
ImplicitLocOpBuilder& b) {
int concat_dim =
Cast<HloConcatenateInstruction>(instr)->concatenate_dimension();
int64_t offset = 0;
IfOp outermost_if = nullptr;
SmallVector<Value> operand_indices = indices;
for (auto [index, operand] : llvm::enumerate(instr->operands())) {
int64_t limit = offset + operand->shape().dimensions(concat_dim);
auto ins = b.create<CmpIOp>(CmpIPredicate::ult, indices[concat_dim],
b.create<ConstantIndexOp>(limit));
auto generate_operand = [&, index = index]() {
operand_indices[concat_dim] = b.create<arith::SubIOp>(
indices[concat_dim], b.create<ConstantIndexOp>(offset));
TF_ASSIGN_OR_RETURN(auto operand,
operand_provider(instr, index, operand_indices));
b.create<YieldOp>(operand);
return absl::OkStatus();
};
if (index < instr->operand_count() - 1) {
auto if_op =
b.create<IfOp>(mlir::TypeRange{result_element_type}, ins, true, true);
if (outermost_if == nullptr) {
outermost_if = if_op;
} else {
b.create<YieldOp>(if_op.getResults());
}
b.setInsertionPointToStart(if_op.getBody(0));
TF_RETURN_IF_ERROR(generate_operand());
b.setInsertionPointToStart(if_op.getBody(1));
} else {
TF_RETURN_IF_ERROR(generate_operand());
}
offset = limit;
}
b.setInsertionPointAfter(outermost_if);
return outermost_if.getResults();
}
absl::StatusOr<llvm::SmallVector<Value>> EmitDynamicSlice(
const HloInstruction* instr, ValueRange indices,
const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) {
llvm::SmallVector<Value> input_indices(indices);
const auto& input_shape = instr->operand(0)->shape();
for (int i = 0; i < input_shape.rank(); ++i) {
TF_ASSIGN_OR_RETURN(
auto offset, GetSingleOperandValue(operand_provider, instr, i + 1, {}));
offset =
ClampIndex(offset,
primitive_util::IsUnsignedIntegralType(
instr->operand(i + 1)->shape().element_type()),
input_shape.dimensions(i) - instr->shape().dimensions(i), b);
input_indices[i] = b.create<arith::AddIOp>(input_indices[i], offset);
}
return operand_provider(instr, 0, input_indices);
}
absl::StatusOr<llvm::SmallVector<Value>> EmitDynamicUpdateSlice(
const HloInstruction* instr, mlir::Type result_element_type,
ValueRange indices, const OperandProvider& operand_provider,
ImplicitLocOpBuilder& b) {
mlir::Value is_in_bounds =
b.create<ConstantOp>(b.getIntegerAttr(b.getI1Type(), 1));
mlir::SmallVector<Value> update_indices;
const auto& updates_shape = instr->operand(1)->shape();
for (int i = 0; i < instr->shape().rank(); ++i) {
int64_t update_size = updates_shape.dimensions(i);
TF_ASSIGN_OR_RETURN(
auto start_index,
GetSingleOperandValue(operand_provider, instr, i + 2, {}));
start_index = ClampIndex(start_index,
primitive_util::IsUnsignedIntegralType(
instr->operand(i + 2)->shape().element_type()),
instr->shape().dimensions(i) - update_size, b);
auto end_index = b.create<arith::AddIOp>(
start_index, b.create<ConstantOp>(b.getIndexAttr(update_size)));
is_in_bounds = b.create<AndIOp>(
is_in_bounds,
b.create<CmpIOp>(CmpIPredicate::sge, indices[i], start_index));
is_in_bounds = b.create<AndIOp>(
is_in_bounds,
b.create<CmpIOp>(CmpIPredicate::slt, indices[i], end_index));
update_indices.push_back(b.create<arith::SubIOp>(indices[i], start_index));
}
auto if_op = b.create<IfOp>(mlir::TypeRange{result_element_type},
is_in_bounds, true, true);
b.setInsertionPointToStart(if_op.getBody(0));
TF_ASSIGN_OR_RETURN(
auto updated_value,
GetSingleOperandValue(operand_provider, instr, 1, update_indices));
b.create<YieldOp>(updated_value);
b.setInsertionPointToStart(if_op.getBody(1));
TF_ASSIGN_OR_RETURN(
auto original_value,
GetSingleOperandValue(operand_provider, instr, 0, indices));
b.create<YieldOp>(original_value);
b.setInsertionPointAfter(if_op);
return if_op.getResults();
}
absl::StatusOr<llvm::SmallVector<Value>> EmitGather(
const HloInstruction* instr, ValueRange indices,
const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) {
auto row = indices[0];
auto zero = b.create<ConstantIndexOp>(0);
SmallVector<Value> operand_indices(instr->operand(0)->shape().rank(), zero);
const auto& indices_shape = instr->operand(1)->shape();
int num_indices = indices_shape.rank() == 1 ? 1 : indices_shape.dimensions(1);
for (int i = 0; i < num_indices; ++i) {
auto i_val = i == 0 ? zero : b.create<ConstantIndexOp>(i);
int64_t slice_size = instr->gather_slice_sizes()[i];
int64_t input_size = instr->operand(0)->shape().dimensions()[i];
TF_ASSIGN_OR_RETURN(
auto input_index,
operand_provider(instr, 1,
indices_shape.rank() == 1 ? ValueRange{row}
: ValueRange{row, i_val}));
TF_RET_CHECK(input_index.size() == 1)
<< "Expected operand to be a single value.";
operand_indices[i] =
ClampIndex(input_index.front(),
primitive_util::IsUnsignedIntegralType(
instr->operand(1)->shape().element_type()),
input_size - slice_size, b);
}
for (int i = 0; i < operand_indices.size(); ++i) {
operand_indices[i] =
b.createOrFold<arith::AddIOp>(operand_indices[i], indices[i + 1]);
}
return operand_provider(instr, 0, operand_indices);
}
SmallVector<SmallVector<Value>> GetInputIndices(
const HloInstructionIndexing& indexing, ValueRange output_indices,
ImplicitLocOpBuilder& b) {
SmallVector<SmallVector<Value>> indices;
for (auto& maps : indexing.indexing_maps) {
CHECK_EQ(maps.size(), 1);
CHECK(!maps.begin()->IsUndefined());
indices.push_back(ApplyIndexing(*maps.begin(), output_indices, {}, b));
}
return indices;
}
absl::StatusOr<SmallVector<Value>> EmitPad(
const HloInstruction* instr, mlir::Type result_element_type,
ValueRange indices, const OperandProvider& operand_provider,
ImplicitLocOpBuilder& b) {
auto indexing = ComputeOutputToInputIndexing(instr, 0, b.getContext());
const auto& indexing_map = *indexing.indexing_maps[0].begin();
mlir::Value is_in_bounds = CheckConstraints(indexing_map, indices, {}, b);
auto if_op = b.create<IfOp>(mlir::TypeRange{result_element_type},
is_in_bounds, true, true);
b.setInsertionPointToStart(if_op.getBody(0));
TF_ASSIGN_OR_RETURN(auto input_value,
GetSingleOperandValue(
operand_provider, instr, 0,
GetInputIndices(indexing, indices,
b)[0 ]));
b.create<YieldOp>(input_value);
b.setInsertionPointToStart(if_op.getBody(1));
TF_ASSIGN_OR_RETURN(auto padding_value,
GetSingleOperandValue(operand_provider, instr, 1, {}));
b.create<YieldOp>(padding_value);
b.setInsertionPointAfter(if_op);
return if_op.getResults();
}
absl::StatusOr<Value> EmitFloatCast(Value value, mlir::Type target_type,
ImplicitLocOpBuilder& b) {
if (value.getType().getIntOrFloatBitWidth() <
target_type.getIntOrFloatBitWidth()) {
return b.create<arith::ExtFOp>(target_type, value);
}
if (value.getType().getIntOrFloatBitWidth() >
target_type.getIntOrFloatBitWidth()) {
return b.create<arith::TruncFOp>(target_type, value);
}
return value;
}
absl::StatusOr<Value> EmitMulAdd(Value lhs, Value rhs, Value accumulator,
mlir::Type result_element_type,
mlir::Type accumulator_type,
ImplicitLocOpBuilder& b) {
if (mlir::isa<FloatType>(result_element_type)) {
if (result_element_type.isBF16()) {
lhs = b.create<arith::ExtFOp>(b.getF32Type(), lhs);
rhs = b.create<arith::ExtFOp>(b.getF32Type(), rhs);
}
TF_ASSIGN_OR_RETURN(
Value casted,
EmitFloatCast(b.create<arith::MulFOp>(lhs, rhs), accumulator_type, b));
return b.create<arith::AddFOp>(accumulator, casted);
}
if (result_element_type.isInteger(1)) {
return b.create<arith::OrIOp>(accumulator,
b.create<arith::AndIOp>(lhs, rhs));
}
return b.create<arith::AddIOp>(accumulator,
b.create<arith::MulIOp>(lhs, rhs));
}
absl::StatusOr<SmallVector<Value>> EmitDotLoop(
const HloInstruction* instr, mlir::Type result_element_type,
ValueRange indices, const OperandProvider& operand_provider,
ImplicitLocOpBuilder& b) {
HloInstructionIndexing indexing =
ComputeOutputToInputIndexing(instr, 0, b.getContext());
const IndexingMap& lhs_indexing_map = *indexing.indexing_maps.at(0).begin();
const IndexingMap& rhs_indexing_map = *indexing.indexing_maps.at(1).begin();
const mlir::Type accumulator_type =
result_element_type.isBF16() ? b.getF32Type() : result_element_type;
Value accum_init_value =
b.create<ConstantOp>(b.getZeroAttr(accumulator_type)).getResult();
size_t rhs_symbol_count = rhs_indexing_map.GetSymbolCount();
auto body =
[&](ValueRange iter_args, ValueRange dim_values,
ValueRange symbol_values) -> absl::StatusOr<SmallVector<Value>> {
auto lhs_indices =
ApplyIndexing(lhs_indexing_map, dim_values, symbol_values, b);
auto rhs_indices =
ApplyIndexing(rhs_indexing_map, dim_values,
symbol_values.take_front(rhs_symbol_count), b);
TF_ASSIGN_OR_RETURN(Value lhs_value, GetSingleOperandValue(
operand_provider, instr,
0, lhs_indices));
TF_ASSIGN_OR_RETURN(Value rhs_value, GetSingleOperandValue(
operand_provider, instr,
1, rhs_indices));
Value accum = iter_args[0];
TF_ASSIGN_OR_RETURN(
accum, EmitMulAdd(lhs_value, rhs_value, accum, result_element_type,
accumulator_type, b));
return {{accum}};
};
TF_ASSIGN_OR_RETURN(SmallVector<Value> results,
EmitLoopNestWithStatus(b, indices, {accum_init_value},
lhs_indexing_map, body));
if (result_element_type.isBF16()) {
results[0] = b.create<arith::TruncFOp>(b.getBF16Type(), results[0]);
}
return results;
}
absl::StatusOr<SmallVector<Value>> EmitDot(
const HloInstruction* instr, mlir::Type result_element_type,
ValueRange indices, const OperandProvider& operand_provider,
ImplicitLocOpBuilder& b) {
VLOG(1) << "EmitDot: " << instr->ToString() << " "
<< llvm_ir::DumpToString(result_element_type);
if (!algorithm_util::IsSupportedByElementalIrEmitter(
instr->precision_config().algorithm())) {
return absl::InvalidArgumentError(
absl::StrFormat("Algorithm not supported by the ElementalIrEmitter: %s",
PrecisionConfig::Algorithm_Name(
instr->precision_config().algorithm())));
}
auto* dot = DynCast<HloDotInstruction>(instr);
TF_RET_CHECK(dot != nullptr);
if (dot->sparse_operands()) {
return absl::UnimplementedError(
"Sparse dot is supported by Triton emitter only.");
}
return EmitDotLoop(instr, result_element_type, indices, operand_provider, b);
}
absl::StatusOr<SmallVector<Value>> EmitConvolution(
const HloInstruction* instr, mlir::Type result_element_type,
ValueRange indices, const OperandProvider& operand_provider,
ImplicitLocOpBuilder& b) {
VLOG(1) << "EmitConvolution: " << instr->ToString() << | #include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include <functional>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/AsmParser/AsmParser.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/DLTI/DLTI.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/launch_dim.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace mlir_converter {
namespace {
class ElementalHloToMlirTest : public HloTestBase {
public:
ElementalHloToMlirTest() {
context_.loadDialect<mlir::tensor::TensorDialect, mlir::func::FuncDialect,
mlir::affine::AffineDialect, mlir::arith::ArithDialect,
mlir::math::MathDialect, mlir::scf::SCFDialect,
mlir::mhlo::MhloDialect, mlir::LLVM::LLVMDialect,
mlir::DLTIDialect, xla::gpu::XlaGpuDialect>();
}
absl::Status Run(const std::string& hlo, const std::string& filecheck_str,
std::function<EpilogueSpecification(HloComputation* entry)>
epilogue_spec_fn = nullptr) {
auto hlo_module = ParseAndReturnVerifiedModule(hlo).value();
mlir::ImplicitLocOpBuilder builder(mlir::UnknownLoc::get(&context_),
&context_);
auto module = llvm_ir::CreateMlirModuleOp(builder.getLoc());
(*module)->setAttr(
mlir::DLTIDialect::kDataLayoutAttrName,
mlir::parseAttribute("#dlti.dl_spec<#dlti.dl_entry<index,32:i32>>",
builder.getContext()));
builder.setInsertionPointToStart(module->getBody());
auto* entry_computation = hlo_module->entry_computation();
std::vector<EpilogueSpecification> epilogue_spec;
if (epilogue_spec_fn) {
epilogue_spec.push_back(epilogue_spec_fn(entry_computation));
}
PartitionedComputations partitioned_computations(entry_computation,
&context_, epilogue_spec);
auto fns = partitioned_computations.DeclareFunctions(module.get());
auto entry_func = fns[&partitioned_computations
.FindPartitionedComputation(entry_computation)
.GetRootSubgraph()];
auto& entry_pc =
partitioned_computations.FindPartitionedComputation(entry_computation);
auto call_targets = partitioned_computations.CreateCallTargetProvider(fns);
TF_RETURN_IF_ERROR(SubgraphToMlirFunction(
entry_pc, entry_pc.GetRootSubgraph(), entry_func, call_targets));
if (!partitioned_computations.epilogues().empty()) {
const auto& epilogue = partitioned_computations.epilogues().front();
TF_RETURN_IF_ERROR(SubgraphToMlirFunction(entry_pc, epilogue,
fns[&epilogue], call_targets));
}
mlir::PassManager pm(&context_);
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::createCSEPass());
TF_RET_CHECK(pm.run(module.get()).succeeded());
std::string out;
llvm::raw_string_ostream stream(out);
stream << module.get();
TF_ASSIGN_OR_RETURN(auto filecheck_result,
RunFileCheck(out, filecheck_str));
TF_RET_CHECK(filecheck_result);
return absl::OkStatus();
}
mlir::MLIRContext context_;
};
TEST_F(ElementalHloToMlirTest, Reduce) {
TF_EXPECT_OK(Run(R"(
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT sum = f32[] add(p0, p1)
}
ENTRY main {
p0 = f32[10,20,30,40] parameter(0)
p1 = f32[] parameter(1)
ROOT r = f32[10,30] reduce(p0, p1), dimensions={1,3},
to_apply=add
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, ReduceUnsigned) {
TF_EXPECT_OK(Run(R"(
add {
p0 = u32[] parameter(0)
p1 = u32[] parameter(1)
ROOT sum = u32[] add(p0, p1)
}
ENTRY main {
p0 = u32[10,20,30,40] parameter(0)
p1 = u32[] parameter(1)
ROOT r = u32[10,30] reduce(p0, p1), dimensions={1,3},
to_apply=add
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, ReduceWindow) {
TF_EXPECT_OK(Run(R"(
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT sum = f32[] add(p0, p1)
}
ENTRY main {
p0 = f32[42,12,8] parameter(0)
p1 = f32[] parameter(1)
ROOT r = f32[42,3,8] reduce-window(p0, p1), window={
size=1x1x7
stride=1x4x1
pad=0_0x0_0x3_3
},
to_apply=add
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, ReduceWindowWithRescaling) {
TF_EXPECT_OK(Run(R"(
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT sum = f32[] add(p0, p1)
}
ENTRY main {
p0 = f32[42,12,8] parameter(0)
p1 = f32[] parameter(1)
ROOT r = f32[19,12,8] reduce-window(p0, p1), window={
size=8x1x1
stride=4x1x1
pad=0_0x0_0x0_0
lhs_dilate=2x1x1
},
to_apply=add
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, Concatenate) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = f32[10,20,30] parameter(0)
p1 = f32[10,15,30] parameter(1)
p2 = f32[10,3,30] parameter(2)
ROOT r = f32[10,38,30] concatenate(p0, p1, p2), dimensions={1}
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, ConcatenateUnsigned) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = u32[10,20,30] parameter(0)
p1 = u32[10,15,30] parameter(1)
ROOT r = u32[10,35,30] concatenate(p0, p1), dimensions={1}
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, Gather) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
operand = f32[33,34] parameter(0)
indices = s32[1806,1] parameter(1)
ROOT r = f32[1806,7,8] gather(operand, indices), offset_dims={1,2},
collapsed_slice_dims={}, start_index_map={0},
index_vector_dim=1, slice_sizes={7,8}
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, GatherWithImplicitVectorDim) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
operand = f32[33,34] parameter(0)
indices = s32[1806] parameter(1)
ROOT r = f32[1806,7,8] gather(operand, indices), offset_dims={1,2},
collapsed_slice_dims={}, start_index_map={0},
index_vector_dim=1, slice_sizes={7,8}
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, Pad) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = f32[4, 4] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[12, 16] pad(p0, p1), padding=1_4_1x4_8_0
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, PadUnsigned) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = u32[4, 4] parameter(0)
p1 = u32[] parameter(1)
ROOT pad = u32[12, 16] pad(p0, p1), padding=1_4_1x4_8_0
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, DotWithF32Type) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = f32[3, 4] parameter(0)
p1 = f32[4, 5] parameter(1)
ROOT dot = f32[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, DotWithBF16Type) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = bf16[3, 4] parameter(0)
p1 = bf16[4, 5] parameter(1)
ROOT dot = bf16[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, DotWithS32Type) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = s32[3, 4] parameter(0)
p1 = s32[4, 5] parameter(1)
ROOT dot = s32[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, DotWithU32Type) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = u32[3, 4] parameter(0)
p1 = u32[4, 5] parameter(1)
ROOT dot = u32[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})",
R"(
)"));
}
TEST_F(ElementalHloToMlirTest, DotWithPredType) {
TF_EXPECT_OK(Run(R"(
ENTRY main {
p0 = pred[3, 4] parameter(0)
p1 = pred[4, 5] parameter(1)
ROOT dot = pred[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})",
R"( | 2,137 |
#ifndef XLA_SERVICE_GPU_FUSIONS_MLIR_COMPUTATION_PARTITIONER_H_
#define XLA_SERVICE_GPU_FUSIONS_MLIR_COMPUTATION_PARTITIONER_H_
#include <functional>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
namespace mlir_converter {
struct EpilogueSpecification {
static EpilogueSpecification FromIdentityIndexing(
const HloInstruction* hero, const HloInstruction* root,
mlir::MLIRContext* mlir_context);
static EpilogueSpecification FromOutputIndexing(
const HloFusionAnalysis& analysis,
const std::vector<const HloInstruction*>& heroes,
const std::vector<const HloInstruction*>& roots,
const KernelFusionInterface& fusion, mlir::MLIRContext* mlir_context);
std::vector<const HloInstruction*> heroes;
std::vector<const HloInstruction*> roots;
std::vector<int64_t> index_ranges;
std::vector<IndexingMap> root_indexing;
};
class PartitionedComputation {
public:
explicit PartitionedComputation(const HloComputation* computation,
mlir::MLIRContext* mlir_context,
std::function<bool(const HloInstruction*)>
is_subgraph_root = HloPredicateFalse);
struct Subgraph {
std::string name;
absl::flat_hash_set<const HloInstruction*> instructions;
std::vector<const HloInstruction*> roots;
std::vector<int64_t> index_ranges;
std::vector<IndexingMap> root_indexing;
absl::flat_hash_map<const HloInstruction*, int> injected_value_starts;
int num_injected_values = 0;
std::string ToString(int indentation = 0) const;
static Subgraph ForEpilogue(const EpilogueSpecification& epilogue);
};
absl::Span<const Subgraph> subgraphs() const { return subgraphs_; }
const HloComputation& computation() const { return *computation_; }
const Subgraph& GetRootSubgraph() const {
return FindSubgraph(computation_->root_instruction());
}
const Subgraph& FindSubgraph(const HloInstruction* instr) const {
return *instructions_to_subgraphs_.at(instr);
}
std::string ToString(int indentation = 0) const;
private:
const HloComputation* computation_;
std::vector<Subgraph> subgraphs_;
absl::flat_hash_map<const HloInstruction*, const Subgraph*>
instructions_to_subgraphs_;
};
using CallTargetProvider =
std::function<mlir::func::FuncOp(const HloInstruction* instr)>;
class PartitionedComputations {
public:
explicit PartitionedComputations(
const HloComputation* fusion, mlir::MLIRContext* mlir_context,
std::vector<EpilogueSpecification> epilogues = {});
const PartitionedComputation& FindPartitionedComputation(
const HloComputation* computation) const {
return *computation_to_partitioning_.at(computation);
}
const PartitionedComputation::Subgraph& FindSubgraph(
const HloInstruction* instr) const;
absl::Span<const PartitionedComputation> partitioned_computations() const {
return partitioned_computations_;
}
const std::vector<PartitionedComputation::Subgraph>& epilogues() const {
return epilogues_;
}
const HloComputation* fusion() const { return fusion_; }
CallTargetProvider CreateCallTargetProvider(
const absl::flat_hash_map<const PartitionedComputation::Subgraph*,
mlir::func::FuncOp>& subgraph_to_func) const;
absl::flat_hash_map<const PartitionedComputation::Subgraph*,
mlir::func::FuncOp>
DeclareFunctions(mlir::ModuleOp module) const;
std::string ToString() const;
private:
std::vector<PartitionedComputation> partitioned_computations_;
absl::flat_hash_map<const HloComputation*, const PartitionedComputation*>
computation_to_partitioning_;
const HloComputation* fusion_;
std::vector<PartitionedComputation::Subgraph> epilogues_;
};
mlir::func::FuncOp CreateSubgraphMlirFunction(
const PartitionedComputation::Subgraph& subgraph,
mlir::ImplicitLocOpBuilder& b);
}
}
}
#endif
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include <cstdint>
#include <functional>
#include <iterator>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/node_hash_map.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/Types.h"
#include "mlir/IR/Value.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusions/mlir/type_util.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
namespace mlir_converter {
namespace {
int Arity(const Shape& shape) {
return shape.IsTuple() ? shape.tuple_shapes_size() : 1;
}
const Shape& TupleShape(const Shape& shape, int index) {
return shape.IsTuple() ? shape.tuple_shapes(index) : shape;
}
}
EpilogueSpecification EpilogueSpecification::FromIdentityIndexing(
const HloInstruction* hero, const HloInstruction* root,
mlir::MLIRContext* mlir_context) {
EpilogueSpecification result;
absl::c_copy(root->shape().dimensions(),
std::back_inserter(result.index_ranges));
result.roots.push_back(root);
result.root_indexing.push_back(
CreateIdentityMap(root->shape(), mlir_context));
result.heroes.push_back(hero);
return result;
}
EpilogueSpecification EpilogueSpecification::FromOutputIndexing(
const HloFusionAnalysis& analysis,
const std::vector<const HloInstruction*>& heroes,
const std::vector<const HloInstruction*>& roots,
const KernelFusionInterface& fusion, mlir::MLIRContext* mlir_context) {
EpilogueSpecification result;
absl::flat_hash_map<const HloInstruction*, const HloInstruction*>
root_to_hero;
for (auto [root, hero] :
llvm::zip(analysis.fusion_roots(), analysis.fusion_heroes())) {
root_to_hero[&root.instruction()] = &hero.instruction();
}
absl::flat_hash_map<const HloInstruction*, int> root_to_index;
for (auto [index, root] : llvm::enumerate(analysis.fusion_roots())) {
root_to_index[&root.instruction()] = root_to_index.size();
}
result.root_indexing.reserve(roots.size());
for (auto* root : roots) {
auto indexing = fusion.ComputeThreadIdToOutputIndexing(root_to_index[root],
mlir_context);
if (result.index_ranges.empty()) {
result.index_ranges.reserve(indexing->GetDimensionCount() +
indexing->GetSymbolCount());
for (const auto& dim : indexing->GetDimensionBounds()) {
result.index_ranges.push_back(dim.upper + 1);
}
for (const auto& sym : indexing->GetSymbolBounds()) {
result.index_ranges.push_back(sym.upper + 1);
}
}
auto* hero = root_to_hero[root];
auto epilogue_indexing = ComputeEpilogueInputToOutputIndexing(
{*hero, &analysis.fusion()}, {*root, &analysis.fusion()}, mlir_context);
result.root_indexing.push_back(
ComposeIndexingMaps(*indexing, epilogue_indexing));
}
result.heroes = heroes;
result.roots = roots;
return result;
}
std::string PartitionedComputation::Subgraph::ToString(int indentation) const {
std::string indent(indentation, ' ');
std::ostringstream ss;
ss << indent << "SUBGRAPH " << name << " {\n";
for (auto* instr :
(*instructions.begin())->parent()->MakeInstructionPostOrder()) {
if (!instructions.contains(instr)) continue;
ss << indent << " ";
if (absl::c_linear_search(roots, instr)) {
ss << "ROOT ";
}
ss << instr->ToString() << "\n";
}
ss << indent << "}";
return ss.str();
}
std::string PartitionedComputation::ToString(int indentation) const {
std::ostringstream ss;
ss << "PartitionedComputation " << computation_->name() << ":";
for (const Subgraph& subgraph : subgraphs_) {
ss << "\n" << subgraph.ToString(indentation);
}
return ss.str();
}
std::string PartitionedComputations::ToString() const {
std::ostringstream ss;
ss << "PartitionedComputations:";
for (const auto& partitioned_computation : partitioned_computations_) {
ss << "\n" << partitioned_computation.ToString();
}
return ss.str();
}
template <typename C, typename F>
bool AllIdentical(const C& c, F&& f) {
auto begin = std::begin(c);
auto end = std::end(c);
if (begin == end || begin + 1 == end) {
return true;
}
auto v = f(*begin);
++begin;
for (; begin != end; ++begin) {
if (f(*begin) != v) {
return false;
}
}
return true;
}
bool IsEvaluatedMoreThanOnce(const HloInstruction* instr) {
return absl::c_any_of(instr->users(), [&](const HloInstruction* user) {
if (user->opcode() == HloOpcode::kGather &&
absl::c_linear_search(user->OperandIndices(instr), 1) &&
instr->shape().rank() >= 2 && instr->shape().dimensions(1) > 1) {
return true;
}
if (user->opcode() == HloOpcode::kConcatenate &&
user->OperandIndices(instr).size() > 1) {
return true;
}
return false;
});
}
PartitionedComputation::PartitionedComputation(
const HloComputation* computation, mlir::MLIRContext* mlir_context,
std::function<bool(const HloInstruction*)> is_subgraph_root)
: computation_(computation) {
CHECK_NE(computation, nullptr);
int next_function_id = 0;
int next_indexing_id = 0;
auto pre_order = computation->MakeInstructionPostOrder();
absl::c_reverse(pre_order);
absl::flat_hash_map<const HloInstruction*, int> instr_indices;
for (auto [i, instr] : llvm::enumerate(pre_order)) {
instr_indices[instr] = i;
}
std::vector<std::pair<int, int>> ids(pre_order.size());
auto allocate_new_function = [&](const HloInstruction* instr) {
ids[instr_indices[instr]] = {next_function_id++, next_indexing_id++};
};
for (auto [instr_index, instr] : llvm::enumerate(pre_order)) {
bool is_root = instr->user_count() == 0 || is_subgraph_root(instr);
bool users_have_consistent_indexing = AllIdentical(
instr->users(),
[&](const HloInstruction* user) { return ids[instr_indices[user]]; });
bool all_users_elementwise =
absl::c_all_of(instr->users(), [&](const HloInstruction* user) {
return HloInstruction::IsOpElementwise(user->opcode());
});
if (!is_root && users_have_consistent_indexing && all_users_elementwise) {
ids[instr_index] = ids[instr_indices[instr->users().front()]];
} else if (is_root || instr->user_count() > 1 ||
IsEvaluatedMoreThanOnce(instr)) {
allocate_new_function(instr);
} else {
ids[instr_index] = ids[instr_indices[instr->users().front()]];
ids[instr_index].second = next_indexing_id++;
}
}
std::vector<std::vector<const HloInstruction*>> functions(next_function_id);
for (auto [id, instr] : llvm::reverse(llvm::zip(ids, pre_order))) {
functions[id.first].push_back(instr);
}
subgraphs_.reserve(functions.size());
for (auto&& [function_id, instructions] : llvm::enumerate(functions)) {
auto is_different_function = [&, function_id = function_id](auto* user) {
return ids[instr_indices[user]].first != function_id;
};
std::vector<const HloInstruction*> roots;
std::vector<IndexingMap> root_indexing;
const xla::Shape* first_root_shape = nullptr;
for (auto* instruction : instructions) {
if (instruction->user_count() == 0 ||
absl::c_any_of(instruction->users(), is_different_function)) {
roots.push_back(instruction);
if (first_root_shape) {
CHECK(!instruction->shape().IsTuple())
<< "Internal tuples are not supported";
if (ShapeUtil::EqualIgnoringElementType(*first_root_shape,
instruction->shape())) {
root_indexing.push_back(root_indexing.front());
} else {
root_indexing.push_back(GetBitcastMap(
*first_root_shape, instruction->shape(), mlir_context));
}
} else {
first_root_shape = &instruction->shape();
while (first_root_shape->IsTuple()) {
first_root_shape = &first_root_shape->tuple_shapes()[0];
}
root_indexing.push_back(
CreateIdentityMap(*first_root_shape, mlir_context));
}
}
}
std::vector<int64_t> ranges{first_root_shape->dimensions().begin(),
first_root_shape->dimensions().end()};
CHECK(!roots.empty()) << "No roots found";
std::string name = llvm_ir::SanitizeFunctionName(absl::StrCat(
roots.front()->parent()->name(), "_",
absl::StrJoin(roots, "_", [](std::string* out, const auto* root) {
absl::StrAppend(out, root->name());
})));
subgraphs_.push_back(
Subgraph{.name = std::move(name),
.instructions = {instructions.begin(), instructions.end()},
.roots = std::move(roots),
.index_ranges = std::move(ranges),
.root_indexing = std::move(root_indexing)});
}
for (const auto& subgraph : subgraphs_) {
for (const auto* instruction : subgraph.instructions) {
instructions_to_subgraphs_[instruction] = &subgraph;
}
}
}
PartitionedComputation::Subgraph PartitionedComputation::Subgraph::ForEpilogue(
const EpilogueSpecification& epilogue) {
if (epilogue.roots.empty()) return {};
const auto* computation = epilogue.heroes.front()->parent();
PartitionedComputation::Subgraph subgraph;
subgraph.name = llvm_ir::SanitizeFunctionName(
absl::StrCat(computation->name(), "__epilogue__",
absl::StrJoin(epilogue.roots, "_",
[](std::string* out, const auto* root) {
absl::StrAppend(out, root->name());
})));
subgraph.roots = epilogue.roots;
int index = 0;
for (auto* hero : epilogue.heroes) {
if (subgraph.injected_value_starts.insert({hero, index}).second) {
index += Arity(hero->shape());
}
}
subgraph.num_injected_values = index;
absl::flat_hash_set<const HloInstruction*> seen;
std::function<void(const HloInstruction*)> visit;
visit = [&](const HloInstruction* instruction) {
if (subgraph.injected_value_starts.contains(instruction)) return;
if (!seen.insert(instruction).second) return;
for (auto [index, operand] : llvm::enumerate(instruction->operands())) {
visit(operand);
}
};
visit(computation->root_instruction());
subgraph.instructions = std::move(seen);
subgraph.index_ranges = epilogue.index_ranges;
subgraph.root_indexing = epilogue.root_indexing;
return subgraph;
}
PartitionedComputations::PartitionedComputations(
const HloComputation* fusion, mlir::MLIRContext* mlir_context,
std::vector<EpilogueSpecification> epilogues)
: fusion_(fusion) {
absl::flat_hash_set<const HloComputation*> seen;
std::vector<const HloComputation*> computations;
std::function<void(const HloComputation*)> visit;
visit = [&](const HloComputation* computation) {
if (!seen.insert(computation).second) return;
computations.push_back(computation);
for (auto* instr : computation->instructions()) {
absl::c_for_each(instr->called_computations(), visit);
}
};
visit(fusion);
absl::flat_hash_set<const HloInstruction*> roots;
epilogues_.reserve(epilogues.size());
for (const auto& epilogue : epilogues) {
epilogues_.push_back(
PartitionedComputation::Subgraph::ForEpilogue(epilogue));
roots.insert(epilogue.heroes.begin(), epilogue.heroes.end());
for (auto* instruction : epilogue.heroes) {
roots.insert(instruction->operands().begin(),
instruction->operands().end());
}
}
auto is_root = [&](const HloInstruction* instruction) {
return roots.contains(instruction);
};
partitioned_computations_.reserve(computations.size());
for (auto* computation : computations) {
computation_to_partitioning_[computation] =
&partitioned_computations_.emplace_back(
PartitionedComputation{computation, mlir_context, is_root});
}
}
absl::flat_hash_map<const PartitionedComputation::Subgraph*, mlir::func::FuncOp>
PartitionedComputations::DeclareFunctions(mlir::ModuleOp module) const {
absl::flat_hash_map<const PartitionedComputation::Subgraph*,
mlir::func::FuncOp>
mapping;
mlir::ImplicitLocOpBuilder builder(module.getLoc(), module->getContext());
builder.setInsertionPointToEnd(module.getBody());
auto create_funcs =
[&](absl::Span<const PartitionedComputation::Subgraph> subgraphs) {
for (const auto& subgraph : subgraphs) {
if (subgraph.roots.empty()) continue;
auto func_op = CreateSubgraphMlirFunction(subgraph, builder);
func_op->setAttr("llvm.linkage", mlir::LLVM::LinkageAttr::get(
module->getContext(),
mlir::LLVM::Linkage::Internal));
mapping[&subgraph] = func_op;
}
};
for (const auto& computation : partitioned_computations_) {
create_funcs(computation.subgraphs());
}
create_funcs(epilogues_);
return mapping;
}
const PartitionedComputation::Subgraph& PartitionedComputations::FindSubgraph(
const HloInstruction* instr) const {
return FindPartitionedComputation(instr->parent()).FindSubgraph(instr);
}
CallTargetProvider PartitionedComputations::CreateCallTargetProvider(
const absl::flat_hash_map<const PartitionedComputation::Subgraph*,
mlir::func::FuncOp>& subgraph_to_func) const {
return [&, this](const HloInstruction* instr) {
const auto& subgraph = FindSubgraph(instr);
CHECK(subgraph_to_func.contains(&subgraph))
<< "No function found for subgraph with instruction "
<< instr->ToString();
return subgraph_to_func.at(&subgraph);
};
}
mlir::func::FuncOp CreateSubgraphMlirFunction(
const PartitionedComputation::Subgraph& subgraph,
mlir::ImplicitLocOpBuilder& b) {
auto* computation = subgraph.roots.front()->parent();
llvm::SmallVector<mlir::Type> parameter_types;
llvm::SmallVector<mlir::Type> result_types;
auto element_type = [&](const auto& shape) {
return PrimitiveTypeToMlirType(shape.element_type(), b);
};
for (auto* root : subgraph.roots) {
for (auto ty : ShapeToMlirTypes(root->shape(), b)) {
result_types.push_back(
mlir::cast<mlir::RankedTensorType>(ty).getElementType());
}
}
llvm::SmallVector<mlir::DictionaryAttr> arg_attrs;
if (computation->IsFusionComputation() || computation->IsEntryComputation()) {
for (auto* param : computation->parameter_instructions()) {
parameter_types.push_back(TensorShapeToMlirType(param->shape(), b));
arg_attrs.emplace_back();
}
for (int64_t size : subgraph.index_ranges) {
parameter_types.push_back(b.getIndexType());
arg_attrs.emplace_back(mlir::DictionaryAttr::get(
b.getContext(),
{b.getNamedAttr("xla.range", b.getIndexArrayAttr({0, size - 1}))}));
}
int operand_offset = parameter_types.size();
parameter_types.resize(operand_offset + subgraph.num_injected_values);
arg_attrs.resize(parameter_types.size());
for (auto [value, start] : subgraph.injected_value_starts) {
for (int index = 0; index < Arity(value->shape()); ++index) {
parameter_types[operand_offset + start + index] =
element_type(TupleShape(value->shape(), index));
}
}
} else {
for (auto* param : computation->parameter_instructions()) {
parameter_types.push_back(element_type(param->shape()));
}
}
auto ty = b.getFunctionType(parameter_types, result_types);
auto func_op = b.create<mlir::func::FuncOp>(
subgraph.name, ty,
llvm::ArrayRef<mlir::NamedAttribute>{}, arg_attrs);
func_op.setPrivate();
return func_op;
}
}
}
} | #include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include <string>
#include <utility>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace mlir_converter {
namespace {
using ::testing::ElementsAre;
using ::testing::SizeIs;
using ::testing::UnorderedElementsAre;
class ComputationPartitionerTest : public HloTestBase {
protected:
ComputationPartitionerTest() {
mlir_context_.loadDialect<mlir::func::FuncDialect>();
}
mlir::MLIRContext mlir_context_;
};
std::string PrintAndErase(mlir::func::FuncOp func) {
std::string out;
llvm::raw_string_ostream os(out);
os << func;
func.erase();
return out;
}
TEST_F(ComputationPartitionerTest, PartitionDiamonds) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%param = f32[6] parameter(0)
%slice0.1 = f32[5] slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5] slice(f32[6]{0} %param), slice={[1:6]}
%add0 = f32[5] add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
%slice1.1 = f32[4] slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4] slice(f32[5]{0} %add0), slice={[1:5]}
%add1 = f32[4] add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
%slice2.1 = f32[3] slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3] slice(f32[4]{0} %add1), slice={[1:4]}
%add2 = f32[3] add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
%slice3.1 = f32[2] slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2] slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2] add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
constexpr auto kExpected = R"(PartitionedComputation fused_computation:
SUBGRAPH fused_computation_add3 {
%slice3.1 = f32[2]{0} slice(f32[3]{0} %add2), slice={[0:2]}
%slice3.2 = f32[2]{0} slice(f32[3]{0} %add2), slice={[1:3]}
ROOT %add3 = f32[2]{0} add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2)
}
SUBGRAPH fused_computation_add2 {
%slice2.1 = f32[3]{0} slice(f32[4]{0} %add1), slice={[0:3]}
%slice2.2 = f32[3]{0} slice(f32[4]{0} %add1), slice={[1:4]}
ROOT %add2 = f32[3]{0} add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2)
}
SUBGRAPH fused_computation_add1 {
%slice1.1 = f32[4]{0} slice(f32[5]{0} %add0), slice={[0:4]}
%slice1.2 = f32[4]{0} slice(f32[5]{0} %add0), slice={[1:5]}
ROOT %add1 = f32[4]{0} add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2)
}
SUBGRAPH fused_computation_add0 {
%slice0.1 = f32[5]{0} slice(f32[6]{0} %param), slice={[0:5]}
%slice0.2 = f32[5]{0} slice(f32[6]{0} %param), slice={[1:6]}
ROOT %add0 = f32[5]{0} add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2)
}
SUBGRAPH fused_computation_param {
ROOT %param = f32[6]{0} parameter(0)
})";
EXPECT_EQ(computation.ToString(6), kExpected);
}
TEST_F(ComputationPartitionerTest, SimpleConcatenate) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%param1 = f32[6] parameter(0)
%param2 = f32[3] parameter(1)
%neg = f32[6] negate(%param1)
%exp = f32[3] exponential(%param2)
ROOT %concat = f32[9] concatenate(%neg, %exp), dimensions={0}
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
EXPECT_THAT(computation.subgraphs(), SizeIs(1));
}
TEST_F(ComputationPartitionerTest, DiamondConcatenate) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%param1 = f32[6] parameter(0)
%param2 = f32[6] parameter(1)
%log = f32[6] log(%param1)
%add = f32[6] add(%log, %param2)
%neg = f32[6] negate(%log)
%exp = f32[6] exponential(%add)
ROOT %concat = f32[12] concatenate(%neg, %exp), dimensions={0}
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
constexpr auto kExpected = R"(PartitionedComputation fused_computation:
SUBGRAPH fused_computation_concat {
%neg = f32[6]{0} negate(f32[6]{0} %log)
%param2 = f32[6]{0} parameter(1)
%add = f32[6]{0} add(f32[6]{0} %log, f32[6]{0} %param2)
%exp = f32[6]{0} exponential(f32[6]{0} %add)
ROOT %concat = f32[12]{0} concatenate(f32[6]{0} %neg, f32[6]{0} %exp), dimensions={0}
}
SUBGRAPH fused_computation_log {
%param1 = f32[6]{0} parameter(0)
ROOT %log = f32[6]{0} log(f32[6]{0} %param1)
})";
EXPECT_EQ(computation.ToString(6), kExpected);
}
TEST_F(ComputationPartitionerTest, TupleRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[6] parameter(0)
%p1 = f32[6] parameter(1)
%add = f32[6] add(p0, p1)
%sub = f32[6] subtract(p0, p1)
ROOT %root = (f32[6], f32[6]) tuple(%add, %sub)
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
constexpr auto kExpected = R"(PartitionedComputation fused_computation:
SUBGRAPH fused_computation_root {
%add = f32[6]{0} add(f32[6]{0} %p0, f32[6]{0} %p1)
%sub = f32[6]{0} subtract(f32[6]{0} %p0, f32[6]{0} %p1)
ROOT %root = (f32[6]{0}, f32[6]{0}) tuple(f32[6]{0} %add, f32[6]{0} %sub)
}
SUBGRAPH fused_computation_p1 {
ROOT %p1 = f32[6]{0} parameter(1)
}
SUBGRAPH fused_computation_p0 {
ROOT %p0 = f32[6]{0} parameter(0)
})";
EXPECT_EQ(computation.ToString(6), kExpected);
}
TEST_F(ComputationPartitionerTest, Epilogue) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation {
p0 = f32[4] parameter(0)
c0 = f32[] constant(0)
reduce = f32[] reduce(p0, c0), dimensions={0}, to_apply=add
bitcast = f32[1] bitcast(reduce)
abs = f32[1] abs(bitcast)
log = f32[1] log(abs)
sign = f32[1] sign(bitcast)
ROOT tuple = (f32[1], f32[1]) tuple(log, sign)
})")
.value();
auto* fused_computation = module->GetComputationWithName("fused_computation");
EpilogueSpecification epilogue{
{fused_computation->GetInstructionWithName("reduce")},
{fused_computation->GetInstructionWithName("log"),
fused_computation->GetInstructionWithName("sign")},
{1, 42},
{CreateIdentityMap(
fused_computation->root_instruction()->shape().tuple_shapes(0),
&mlir_context_)}};
PartitionedComputations fusion(fused_computation, &mlir_context_, {epilogue});
mlir::ImplicitLocOpBuilder builder(mlir::UnknownLoc::get(&mlir_context_),
&mlir_context_);
EXPECT_EQ(
PrintAndErase(
CreateSubgraphMlirFunction(fusion.epilogues().front(), builder)),
"func.func private @fused_computation__epilogue__log_sign(tensor<4xf32>, "
"index {xla.range = [0 : index, 0 : index]}, "
"index {xla.range = [0 : index, 41 : index]}, "
"f32) -> (f32, f32)");
}
TEST_F(ComputationPartitionerTest, TransposeAsRoot) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[64, 32] parameter(0)
%p1 = f32[64, 32] parameter(1)
%add = f32[64, 32] add(p0, p1)
%transpose = f32[32, 64] transpose(%add), dimensions={1, 0}
%exp = f32[32, 64] exponential(%transpose)
ROOT %root = f32[32, 64] tanh(%exp)
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(
fusion, &mlir_context_, [](const HloInstruction* instr) {
return instr->opcode() == HloOpcode::kTranspose;
});
ASSERT_THAT(computation.subgraphs(), SizeIs(2));
EXPECT_THAT(computation.GetRootSubgraph().roots, SizeIs(1));
EXPECT_THAT(computation.GetRootSubgraph().instructions, SizeIs(2));
}
TEST_F(ComputationPartitionerTest, PartiallyMergable) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
fused_computation {
%p0 = f32[10,10] parameter(0)
%p1 = f32[10,10] parameter(1)
%add = f32[10,10] add(%p0, %p1)
%transpose = f32[10,10] transpose(%add), dimensions={1,0}
ROOT %sub = f32[10,10] subtract(%add, %transpose)
})")
.value();
auto* fusion = module->GetComputationWithName("fused_computation");
ASSERT_NE(fusion, nullptr);
PartitionedComputation computation(fusion, &mlir_context_);
auto transpose = fusion->GetInstructionWithName("transpose");
auto sub = fusion->GetInstructionWithName("sub");
ASSERT_THAT(computation.subgraphs(), SizeIs(2));
EXPECT_THAT(computation.GetRootSubgraph().instructions,
UnorderedElementsAre(transpose, sub));
}
TEST_F(ComputationPartitionerTest, SubgraphSignatures) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test_module
add {
%p0 = f32[] parameter(0)
%p1 = f32[] parameter(1)
ROOT %add = f32[] add(%p0, %p1)
}
fusion {
%p0 = f32[10,10]{0,1} parameter(0)
%p1 = f32[10,10]{1,0} parameter(1)
%c0 = f32[] constant(2)
%bc = f32[10,10]{0,1} bitcast(%p1)
%add = f32[10,10] add(%p0, %bc)
ROOT %reduce = f32[10] reduce(%add, %c0), dimensions={1}, to_apply=add
}
ENTRY main {
%p0 = f32[10,10] parameter(0)
%p1 = f32[10,10] parameter(1)
ROOT %fusion = f32[10] fusion(%p0, %p1), kind=kLoop, calls=fusion
})")
.value();
mlir::MLIRContext context;
context.loadDialect<mlir::func::FuncDialect>();
mlir::ImplicitLocOpBuilder builder(mlir::UnknownLoc::get(&context), &context);
PartitionedComputation fusion(module->GetComputationWithName("fusion"),
&mlir_context_);
EXPECT_EQ(
PrintAndErase(
CreateSubgraphMlirFunction(fusion.GetRootSubgraph(), builder)),
"func.func private @fusion_reduce(tensor<10x10xf32, dense<[0, 1]> : "
"tensor<2xi64>>, tensor<10x10xf32>, index {xla.range = [0 : index, 9 : "
"index]}) -> f32");
PartitionedComputation add(module->GetComputationWithName("add"),
&mlir_context_);
EXPECT_EQ(
PrintAndErase(CreateSubgraphMlirFunction(add.GetRootSubgraph(), builder)),
"func.func private @add_add(f32, f32) -> f32");
}
}
}
}
} | 2,138 |
#ifndef XLA_SERVICE_GPU_FUSIONS_MLIR_MLIR_FUSION_EMITTER_H_
#define XLA_SERVICE_GPU_FUSIONS_MLIR_MLIR_FUSION_EMITTER_H_
#include <functional>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Value.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Pass/PassManager.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/mlir/tools/mlir_replay/public/compiler_trace.pb.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class MlirFusionEmitterBase : public KernelFusionInterface {
public:
absl::StatusOr<FusionEmissionResult> Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const final;
absl::StatusOr<std::unique_ptr<llvm::Module>> CreateLLVMModule(
mlir::MLIRContext& mlir_context, llvm::LLVMContext& llvm_context,
const se::DeviceDescription& device, const HloFusionInstruction& fusion,
const std::string& entry_function_name,
const BufferAssignment* buffer_assignment) const;
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> CreateMLIRModule(
mlir::MLIRContext& context, const HloFusionInstruction& fusion,
const std::string& entry_function_name,
const BufferAssignment* buffer_assignment,
mlir::interpreter::MlirCompilationTrace* trace = nullptr) const;
protected:
virtual std::vector<mlir_converter::EpilogueSpecification> GetEpilogues(
const HloFusionInstruction& fusion,
mlir::MLIRContext* mlir_context) const {
return {};
}
virtual absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const = 0;
absl::flat_hash_map<const HloInstruction*, mlir::ValueRange> EmitEpilogue(
int epilogue_index,
const mlir_converter::PartitionedComputations& computations,
mlir::func::FuncOp entry_fn,
const absl::flat_hash_map<const HloInstruction*,
llvm::SmallVector<mlir::Value>>& injected,
mlir::ValueRange output_indices,
mlir::ImplicitLocOpBuilder& builder) const;
llvm::SmallVector<mlir::Value> EmitThreadLoopNest(
mlir::ImplicitLocOpBuilder& b, mlir::ValueRange outputs,
const IndexingMap& indexing_map,
const std::function<llvm::SmallVector<mlir::Value>(
mlir::ValueRange outputs, mlir::ValueRange dim_values,
mlir::ValueRange symbol_values)>& create_body,
bool vectorize = false) const;
mlir::Value EmitBlockId(mlir::ImplicitLocOpBuilder& builder, int dim) const;
mlir::Value EmitThreadId(mlir::ImplicitLocOpBuilder& builder, int dim) const;
llvm::SmallVector<mlir::Value> EmitThreadAndBlockIds(
mlir::ImplicitLocOpBuilder& builder) const;
private:
absl::Status EmitMlir(mlir::ModuleOp module,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const;
absl::Status RunPassPipeline(
mlir::ModuleOp module, mlir::PassManager& pm,
mlir::interpreter::MlirCompilationTrace* trace) const;
};
}
}
#endif
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include <cstdint>
#include <functional>
#include <iterator>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/Linker/Linker.h"
#include "llvm/Support/Casting.h"
#include "mlir/Conversion/AffineToStandard/AffineToStandard.h"
#include "mlir/Conversion/ComplexToStandard/ComplexToStandard.h"
#include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/ControlFlow/IR/ControlFlow.h"
#include "mlir/Dialect/DLTI/DLTI.h"
#include "mlir/Dialect/Func/Extensions/InlinerExtension.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/Attributes.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinAttributes.h"
#include "mlir/IR/DialectRegistry.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/Location.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/OwningOpRef.h"
#include "mlir/IR/Types.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Support/LLVM.h"
#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Export.h"
#include "mlir/Transforms/Passes.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/mlir/tools/mlir_replay/public/compiler_trace.pb.h"
#include "xla/mlir/tools/mlir_replay/public/compiler_trace_instrumentation.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/mlir_hlo/mhlo/transforms/passes.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/dump.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h"
#include "xla/service/gpu/fusions/mlir/ir/xla_gpu_ops.h"
#include "xla/service/gpu/fusions/mlir/passes.h"
#include "xla/service/gpu/fusions/mlir/type_util.h"
#include "xla/service/gpu/ir_emitter_context.h"
#include "xla/service/gpu/kernel_arguments.h"
#include "xla/service/gpu/kernel_reuse_cache.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/runtime/kernel_thunk.h"
#include "xla/service/gpu/target_util.h"
#include "xla/service/llvm_ir/llvm_util.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tsl/framework/mlir/status_scoped_diagnostic_handler.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::Value;
using mlir::ValueRange;
void AddRanges(llvm::Function* func, const LaunchDimensions& launch_dims,
llvm::Module* module) {
for (auto& block : *func) {
for (auto& instr : block) {
if (auto* call = llvm::dyn_cast<llvm::CallInst>(&instr)) {
if (auto* callee = call->getCalledFunction()) {
switch (callee->getIntrinsicID()) {
case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x:
llvm_ir::AddRangeMetadata(
0, launch_dims.thread_counts_per_block().x, call, module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_y:
llvm_ir::AddRangeMetadata(
0, launch_dims.thread_counts_per_block().y, call, module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_z:
llvm_ir::AddRangeMetadata(
0, launch_dims.thread_counts_per_block().z, call, module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x:
llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().x, call,
module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_y:
llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().y, call,
module);
break;
case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_z:
llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().z, call,
module);
break;
}
}
}
}
}
}
bool Needs64Bits(const Shape& shape) {
return shape.IsArray() ? !IsInt32(ShapeUtil::ElementsIn(shape))
: absl::c_any_of(shape.tuple_shapes(), Needs64Bits);
}
bool Is64BitIndex(const HloInstruction* instr, int operand) {
const auto& shape = instr->operand(operand)->shape();
return shape.element_type() == PrimitiveType::S64 ||
shape.element_type() == PrimitiveType::U64;
}
bool Needs64BitIndices(const HloComputation* computation) {
for (auto* instr : computation->instructions()) {
switch (instr->opcode()) {
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
for (int i = 1; i < instr->operand_count(); ++i) {
if (Is64BitIndex(instr, i)) return true;
}
break;
case HloOpcode::kGather:
case HloOpcode::kScatter:
CHECK(instr->shape().IsArray()) << "Variadic scatter is unsupported.";
if (Is64BitIndex(instr, 1)) return true;
break;
default:
break;
}
if (Needs64Bits(instr->shape()) ||
absl::c_any_of(instr->called_computations(), Needs64BitIndices)) {
return true;
}
}
return false;
}
}
Value MlirFusionEmitterBase::EmitBlockId(mlir::ImplicitLocOpBuilder& builder,
int dim) const {
const auto& counts = launch_dimensions().block_counts();
int64_t count = dim == 0 ? counts.x : dim == 1 ? counts.y : counts.z;
auto block_id = builder.create<mlir::gpu::BlockIdOp>(
static_cast<mlir::gpu::Dimension>(dim));
block_id->setAttr("xla.range", builder.getIndexArrayAttr({0, count - 1}));
return block_id;
}
Value MlirFusionEmitterBase::EmitThreadId(mlir::ImplicitLocOpBuilder& builder,
int dim) const {
const auto& counts = launch_dimensions().thread_counts_per_block();
int64_t count = dim == 0 ? counts.x : dim == 1 ? counts.y : counts.z;
auto thread_id = builder.create<mlir::gpu::ThreadIdOp>(
static_cast<mlir::gpu::Dimension>(dim));
thread_id->setAttr("xla.range", builder.getIndexArrayAttr({0, count - 1}));
return thread_id;
}
llvm::SmallVector<Value> MlirFusionEmitterBase::EmitThreadAndBlockIds(
mlir::ImplicitLocOpBuilder& builder) const {
auto& b = builder;
return {EmitThreadId(b, 0), EmitThreadId(b, 1), EmitThreadId(b, 2),
EmitBlockId(b, 0), EmitBlockId(b, 1), EmitBlockId(b, 2)};
}
absl::StatusOr<FusionEmissionResult> MlirFusionEmitterBase::Emit(
IrEmitterContext& ir_emitter_context,
const HloFusionInstruction& fusion) const {
VLOG(5) << "Fusion: " << fusion.fused_instructions_computation()->ToString();
TF_ASSIGN_OR_RETURN(
auto args,
KernelArguments::Create(ir_emitter_context.buffer_assignment(), &fusion));
auto launch_dims = launch_dimensions();
auto [status_or_entry, cached] =
ir_emitter_context.kernel_cache().GetWithStatus(
fusion.fused_instructions_computation(), args.args(),
"",
[&]() -> absl::StatusOr<KernelReuseCache::Entry> {
std::string kernel_name =
ir_emitter_context.name_uniquer()->GetUniqueName(
llvm_ir::SanitizeFunctionName(std::string(fusion.name())));
if (ir_emitter_context.emit_kernels()) {
TF_ASSIGN_OR_RETURN(
auto module,
CreateLLVMModule(
*ir_emitter_context.mlir_context(),
ir_emitter_context.llvm_module()->getContext(),
ir_emitter_context.gpu_device_info(), fusion, kernel_name,
&ir_emitter_context.buffer_assignment()));
auto* kernel_func = module->getFunction(kernel_name);
AddRanges(kernel_func, launch_dims, module.get());
auto* target = ir_emitter_context.llvm_module();
module->setDataLayout(target->getDataLayout());
module->setTargetTriple(target->getTargetTriple());
llvm::IRBuilder<> builder(module->getContext());
AnnotateFunctionAsGpuKernel(module.get(), kernel_func, &builder);
TF_RETURN_IF_ERROR(AnnotateKernelLaunchDimensions(
ir_emitter_context.gpu_device_info(), launch_dims,
kernel_name, module.get()));
CHECK(!llvm::Linker::linkModules(
*target, std::move(module),
llvm::Linker::Flags::OverrideFromSrc));
} else {
VLOG(3) << "Skipped kernel compilation.";
}
return KernelReuseCache::Entry{kernel_name, launch_dims,
std::nullopt,
0};
});
TF_ASSIGN_OR_RETURN(const KernelReuseCache::Entry* entry, status_or_entry);
if (cached) {
VLOG(3) << "Reuse: " << fusion.name() << " -> " << entry->kernel_name;
}
FusionEmissionResult result;
result.thunks.emplace_back(std::make_unique<KernelThunk>(
&fusion, entry->kernel_name, args.args(), launch_dims, entry->cluster_dim,
entry->shmem_bytes));
return result;
}
absl::StatusOr<std::unique_ptr<llvm::Module>>
MlirFusionEmitterBase::CreateLLVMModule(
mlir::MLIRContext& mlir_context, llvm::LLVMContext& llvm_context,
const se::DeviceDescription& device, const HloFusionInstruction& fusion,
const std::string& entry_function_name,
const BufferAssignment* buffer_assignment) const {
bool is_amd = std::holds_alternative<se::RocmComputeCapability>(
device.gpu_compute_capability());
HloModule* hlo_module = fusion.GetModule();
std::unique_ptr<mlir::interpreter::MlirCompilationTrace> trace = nullptr;
if (DumpingEnabledForHloModule(*hlo_module) &&
DumpingEnabledForHloPass("mlir-fusion-emitter",
hlo_module->config().debug_options())) {
trace = std::make_unique<mlir::interpreter::MlirCompilationTrace>();
}
TF_RET_CHECK(!is_amd) << "Unsupported device type: " << device.name();
TF_ASSIGN_OR_RETURN(
auto module, CreateMLIRModule(mlir_context, fusion, entry_function_name,
buffer_assignment));
mlir::PassManager pm(&mlir_context);
pm.addPass(CreateEraseDeadFunctionsPass());
pm.addPass(mlir::createCSEPass());
pm.addPass(CreateLowerXlaGpuToScfPass());
pm.addPass(mlir::createInlinerPass({}, [&](mlir::OpPassManager& pm) {
pm.addPass(mlir::createCSEPass());
}));
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::createCSEPass());
pm.addPass(mlir::mhlo::createConvertToSignlessPass());
pm.addPass(CreatePropagateSliceIndicesPass());
pm.addPass(mlir::createLoopInvariantCodeMotionPass());
pm.addNestedPass<mlir::func::FuncOp>(CreateUnswitchLoopsPass());
pm.addPass(mlir::createLoopInvariantCodeMotionPass());
pm.addNestedPass<mlir::func::FuncOp>(CreateVectorizeLoadsAndStoresPass());
pm.addNestedPass<mlir::func::FuncOp>(CreateOptimizeLoopsPass());
pm.addNestedPass<mlir::func::FuncOp>(CreateConvertPureCallOpsPass());
pm.addPass(CreateLowerTensorsPass(
is_amd, is_amd ? device.rocm_compute_capability().gcn_arch_name()
: device.cuda_compute_capability().ToString()));
pm.addPass(mlir::createConvertComplexToStandardPass());
pm.addPass(CreateMergePointersToSameSlicePass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::createCSEPass());
pm.addNestedPass<mlir::func::FuncOp>(CreateSimplifyArithPass());
pm.addPass(CreateSimplifyAffinePass());
pm.addPass(mlir::createLowerAffinePass());
pm.addPass(mlir::createLoopInvariantCodeMotionPass());
pm.addPass(mlir::createSymbolDCEPass());
pm.addPass(mlir::createCSEPass());
pm.addPass(CreateExpandFloatOpsPass(
!device.cuda_compute_capability().IsAtLeastAmpere()));
pm.addPass(CreateLowerToLLVMPass());
pm.addPass(mlir::createReconcileUnrealizedCastsPass());
auto pipeline_status = RunPassPipeline(module.get(), pm, trace.get());
if (trace) {
DumpPerModuleProtobufToFile(
*hlo_module, *trace, hlo_module->config().debug_options(),
absl::StrCat(entry_function_name, ".mlir-trace"));
}
TF_RETURN_IF_ERROR(pipeline_status);
auto llvm_module = mlir::translateModuleToLLVMIR(module.get(), llvm_context);
TF_RET_CHECK(llvm_module != nullptr)
<< "Failed to translate module to LLVM IR.";
return llvm_module;
}
absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>>
MlirFusionEmitterBase::CreateMLIRModule(
mlir::MLIRContext& context, const HloFusionInstruction& fusion,
const std::string& entry_function_name,
const BufferAssignment* buffer_assignment,
mlir::interpreter::MlirCompilationTrace* trace) const {
context.loadDialect<mlir::DLTIDialect, mlir::tensor::TensorDialect,
mlir::func::FuncDialect, mlir::affine::AffineDialect,
mlir::arith::ArithDialect, mlir::cf::ControlFlowDialect,
mlir::math::MathDialect, mlir::scf::SCFDialect,
mlir::mhlo::MhloDialect, mlir::gpu::GPUDialect,
mlir::vector::VectorDialect, mlir::NVVM::NVVMDialect,
xla::gpu::XlaGpuDialect>();
mlir::DialectRegistry registry;
mlir::func::registerInlinerExtension(registry);
mlir::registerBuiltinDialectTranslation(registry);
mlir::registerLLVMDialectTranslation(registry);
mlir::registerNVVMDialectTranslation(registry);
context.appendDialectRegistry(registry);
mlir::OpBuilder builder(&context);
auto loc = mlir::NameLoc::get(builder.getStringAttr(fusion.name()));
mlir::OwningOpRef<mlir::ModuleOp> module = llvm_ir::CreateMlirModuleOp(loc);
SmallVector<mlir::Type> param_types;
std::optional<KernelArguments> args;
if (buffer_assignment != nullptr) {
TF_ASSIGN_OR_RETURN(args,
KernelArguments::Create(*buffer_assignment, &fusion));
}
int next_slice_index = 0;
absl::flat_hash_map<BufferAllocation::Slice, std::optional<int>>
slice_indices;
auto get_arg_attrs = [&](int index) -> absl::StatusOr<mlir::Attribute> {
if (!args) {
return builder.getDictionaryAttr({builder.getNamedAttr(
"xla.slice_index", builder.getIndexAttr(next_slice_index++))});
}
const auto& arg = args->args()[index];
SmallVector<mlir::NamedAttribute> attrs;
attrs.push_back(builder.getNamedAttr(
"xla.slice_index", builder.getIndexAttr(arg.llvm_arg_index())));
attrs.push_back(
builder.getNamedAttr(mlir::LLVM::LLVMDialect::getAlignAttrName(),
builder.getIndexAttr(arg.alignment())));
attrs.push_back(builder.getNamedAttr(
mlir::LLVM::LLVMDialect::getDereferenceableAttrName(),
builder.getIndexAttr(arg.slice().size())));
if (!arg.written()) {
attrs.push_back(
builder.getNamedAttr("xla.invariant", builder.getUnitAttr()));
}
return builder.getDictionaryAttr(attrs);
};
SmallVector<mlir::Attribute> arg_attrs;
int arg_index = 0;
for (auto* param : fusion.operands()) {
param_types.push_back(
mlir_converter::TensorShapeToMlirType(param->shape(), builder));
TF_ASSIGN_OR_RETURN(arg_attrs.emplace_back(), get_arg_attrs(arg_index++));
}
auto result_types = mlir_converter::ShapeToMlirTypes(fusion.shape(), builder);
param_types.append(result_types.begin(), result_types.end());
TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus(
fusion.shape(), [&](const auto& shape, const ShapeIndex& index) {
if (shape.IsArray()) {
TF_ASSIGN_OR_RETURN(arg_attrs.emplace_back(),
get_arg_attrs(arg_index++));
}
return absl::OkStatus();
}));
builder.setInsertionPointToStart(module->getBody());
auto entry_func = builder.create<mlir::func::FuncOp>(
loc, entry_function_name,
mlir::FunctionType::get(&context, param_types, result_types),
mlir::StringAttr{},
mlir::ArrayAttr::get(&context, arg_attrs),
mlir::ArrayAttr{});
entry_func->setAttr("xla.entry", mlir::UnitAttr::get(&context));
TF_RETURN_IF_ERROR(EmitMlir(module.get(), entry_func, fusion));
mlir::PassManager pm(&context);
pm.addNestedPass<mlir::func::FuncOp>(CreateSimplifyArithPass());
pm.addPass(mlir::createCanonicalizerPass());
pm.addPass(mlir::createCSEPass());
TF_RETURN_IF_ERROR(RunPassPipeline(module.get(), pm, trace));
return module;
}
SmallVector<Value> MlirFusionEmitterBase::EmitThreadLoopNest(
mlir::ImplicitLocOpBuilder& b, ValueRange outputs,
const IndexingMap& indexing_map,
const std::function<
SmallVector<Value>(ValueRange outputs_tensors, ValueRange dim_values,
ValueRange symbol_values)>& create_body,
bool vectorize) const {
return mlir_converter::EmitLoopNest(b, EmitThreadAndBlockIds(b), outputs,
indexing_map, create_body, vectorize);
}
absl::Status MlirFusionEmitterBase::EmitMlir(
mlir::ModuleOp module, mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
std::vector<mlir_converter::EpilogueSpecification> epilogues =
GetEpilogues(fusion, module->getContext());
mlir_converter::PartitionedComputations computations(
fusion.fused_instructions_computation(), module->getContext(), epilogues);
auto subgraph_to_mlir_fn = computations.DeclareFunctions(module);
for (const auto& epilogue : epilogues) {
for (auto* custom : epilogue.heroes) {
if (custom->user_count() == 0) {
subgraph_to_mlir_fn.extract(&computations.FindSubgraph(custom))
.mapped()
.erase();
}
}
}
auto* root = fusion.fused_instructions_computation()->root_instruction();
if (root->opcode() == HloOpcode::kTuple && !epilogues.empty()) {
subgraph_to_mlir_fn.extract(&computations.FindSubgraph(root))
.mapped()
.erase();
}
auto call_targets =
computations.CreateCallTargetProvider(subgraph_to_mlir_fn);
for (const auto& comp : computations.partitioned_computations()) {
for (const auto& subgraph : comp.subgraphs()) {
if (subgraph_to_mlir_fn.contains(&subgraph)) {
TF_RETURN_IF_ERROR(mlir_converter::SubgraphToMlirFunction(
comp, subgraph, subgraph_to_mlir_fn[&subgraph], call_targets));
}
}
}
for (const auto& epilogue : computations.epilogues()) {
if (epilogue.roots.empty()) continue;
TF_RETURN_IF_ERROR(mlir_converter::SubgraphToMlirFunction(
computations.FindPartitionedComputation(
fusion.fused_instructions_computation()),
epilogue, subgraph_to_mlir_fn[&epilogue], call_targets));
}
int index_bitwidth =
Needs64BitIndices(fusion.fused_instructions_computation()) ? 64 : 32;
mlir::OpBuilder b(module->getContext());
auto index_layout = mlir::DataLayoutEntryAttr::get(
b.getIndexType(), b.getI32IntegerAttr(index_bitwidth));
module->setAttr(
mlir::DLTIDialect::kDataLayoutAttrName,
mlir::DataLayoutSpecAttr::get(module->getContext(), {index_layout}));
return EmitEntryFunction(computations, call_targets, entry_function, fusion);
}
absl::flat_hash_map<const HloInstruction*, ValueRange>
MlirFusionEmitterBase::EmitEpilogue(
int epilogue_index,
const mlir_converter::PartitionedComputations& computations,
mlir::func::FuncOp entry_fn,
const absl::flat_hash_map<const HloInstruction*, llvm::SmallVector<Value>>&
injected,
ValueRange output_indices, mlir::ImplicitLocOpBuilder& builder) const {
const auto& epilogue = computations.epilogues().at(epilogue_index);
if (epilogue.roots.empty()) {
return {};
}
auto epilogue_fn = mlir::cast<mlir::func::FuncOp>(
entry_fn->getParentOfType<mlir::ModuleOp>().lookupSymbol(epilogue.name));
SmallVector<Value> operands = ValueRange(entry_fn.getArguments().take_front(
computations.fusion()->num_parameters()));
absl::c_copy(output_indices, std::back_inserter(operands));
int injected_offset = operands.size();
operands.resize(injected_offset + epilogue.num_injected_values);
for (auto [injected_instruction, start] : epilogue.injected_value_starts) {
absl::c_copy(injected.at(injected_instruction),
operands.begin() + injected_offset + start);
}
ValueRange results =
builder.create<PureCallOp>(epilogue_fn, operands).getResults();
absl::flat_hash_map<const HloInstruction*, ValueRange> results_per_root;
for (auto* root : epilogue.roots) {
int arity =
root->shape().IsTuple() ? root->shape().tuple_shapes().size() : 1;
results_per_root[root] = results.take_front(arity);
results = results.drop_front(arity);
}
CHECK_EQ(results.size(), 0);
return results_per_root;
}
absl::Status MlirFusionEmitterBase::RunPassPipeline(
mlir::ModuleOp module, mlir::PassManager& pm,
mlir::interpreter::MlirCompilationTrace* trace) const {
if (VLOG_IS_ON(5)) {
module.getContext()->disableMultithreading();
pm.enableIRPrinting();
}
if (trace) {
module.getContext()->disableMultithreading();
pm.addInstrumentation(
std::make_unique<mlir::interpreter::MlirCompilerTraceInstrumentation>(
*trace));
}
tsl::StatusScopedDiagnosticHandler diagnostic_handler(module.getContext());
(void)pm.run(module);
return diagnostic_handler.consumeStatus();
}
}
} | #include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h"
#include <cstdint>
#include <optional>
#include <string>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h"
#include "mlir/Dialect/Complex/IR/Complex.h"
#include "mlir/Dialect/Func/Extensions/InlinerExtension.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Tensor/IR/Tensor.h"
#include "mlir/IR/ImplicitLocOpBuilder.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/mlir_hlo/mhlo/IR/hlo_ops.h"
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/filecheck.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class DummyCopyFusionEmitter : public MlirFusionEmitterBase {
public:
LaunchDimensions launch_dimensions() const final { return {1, 100}; }
std::optional<IndexingMap> ComputeThreadIdToOutputIndexing(
int64_t, mlir::MLIRContext*) const final {
return std::nullopt;
}
std::optional<IndexingMap> ComputeThreadIdToInputIndexing(
int64_t, int64_t, mlir::MLIRContext*) const final {
return std::nullopt;
}
protected:
absl::Status EmitEntryFunction(
const mlir_converter::PartitionedComputations& computations,
const mlir_converter::CallTargetProvider& call_targets,
mlir::func::FuncOp entry_function,
const HloFusionInstruction& fusion) const {
mlir::ImplicitLocOpBuilder b(entry_function.getLoc(), entry_function);
b.setInsertionPointToStart(entry_function.addEntryBlock());
auto thread_id = EmitThreadId(b, 0);
auto value = b.create<mlir::tensor::ExtractOp>(
entry_function.getArgument(0), mlir::ValueRange{thread_id});
auto result = b.create<mlir::tensor::InsertOp>(
value, entry_function.getArgument(1), mlir::ValueRange{thread_id});
b.create<mlir::func::ReturnOp>(result->getResults());
return absl::OkStatus();
}
};
class MlirFusionEmitterTest : public HloTestBase {
protected:
MlirFusionEmitterTest() {
context_.loadDialect<mlir::tensor::TensorDialect, mlir::func::FuncDialect,
mlir::affine::AffineDialect, mlir::arith::ArithDialect,
mlir::complex::ComplexDialect, mlir::math::MathDialect,
mlir::scf::SCFDialect, mlir::mhlo::MhloDialect,
mlir::gpu::GPUDialect, mlir::NVVM::NVVMDialect>();
mlir::DialectRegistry registry;
mlir::func::registerInlinerExtension(registry);
mlir::registerBuiltinDialectTranslation(registry);
mlir::registerLLVMDialectTranslation(registry);
mlir::registerNVVMDialectTranslation(registry);
context_.appendDialectRegistry(registry);
}
mlir::MLIRContext context_;
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
};
constexpr absl::string_view kModule = R"(
fused_computation {
ROOT %p0 = f32[100] parameter(0)
}
ENTRY main {
%p0 = f32[100] parameter(0)
ROOT fusion = f32[100] fusion(%p0), kind=kLoop, calls=fused_computation
})";
TEST_F(MlirFusionEmitterTest, CreateMlirModule) {
auto module = ParseAndReturnVerifiedModule(kModule).value();
DummyCopyFusionEmitter emitter;
TF_ASSERT_OK_AND_ASSIGN(
auto mlir_module,
emitter.CreateMLIRModule(
context_,
*Cast<HloFusionInstruction>(
module->entry_computation()->root_instruction()),
"fusion",
nullptr));
std::string out;
llvm::raw_string_ostream stream(out);
stream << *mlir_module;
TF_ASSERT_OK_AND_ASSIGN(auto filecheck_result, RunFileCheck(out, R"(
)"));
EXPECT_TRUE(filecheck_result);
}
TEST_F(MlirFusionEmitterTest, CreateLLVMModule) {
llvm::LLVMContext llvm_context;
auto module = ParseAndReturnVerifiedModule(kModule).value();
DummyCopyFusionEmitter emitter;
TF_ASSERT_OK_AND_ASSIGN(
auto llvm_module,
emitter.CreateLLVMModule(
context_, llvm_context, device_info_,
*Cast<HloFusionInstruction>(
module->entry_computation()->root_instruction()),
"fusion",
nullptr));
std::string out;
llvm::raw_string_ostream stream(out);
stream << *llvm_module;
TF_ASSERT_OK_AND_ASSIGN(auto filecheck_result, RunFileCheck(out, R"(
)"));
EXPECT_TRUE(filecheck_result);
}
}
}
} | 2,139 |
#ifndef XLA_SERVICE_GPU_MODEL_AFFINE_MAP_PRINTER_H_
#define XLA_SERVICE_GPU_MODEL_AFFINE_MAP_PRINTER_H_
#include <cstdint>
#include <ostream>
#include <string>
#include <string_view>
#include "absl/types/span.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
namespace xla {
namespace gpu {
class AffineMapPrinter {
public:
AffineMapPrinter() = default;
AffineMapPrinter(AffineMapPrinter&& other) = default;
AffineMapPrinter& operator=(AffineMapPrinter&& other) = default;
AffineMapPrinter(absl::Span<const std::string_view> dim_names,
absl::Span<const std::string_view> symbol_names);
void SetSymbolName(int64_t symbol_id, llvm::StringRef name);
void SetDimensionName(int64_t dim_id, llvm::StringRef name);
std::string GetSymbolName(int64_t symbol_id) const;
std::string GetDimensionName(int64_t dim_id) const;
void Print(std::ostream& out, mlir::AffineMap affine_map) const;
std::string ToString(mlir::AffineMap affine_map) const;
void Print(std::ostream& out, mlir::AffineExpr affine_expr) const;
std::string ToString(mlir::AffineExpr affine_expr) const;
private:
void PrintExprImpl(mlir::AffineExpr affine_expr, bool add_parentheses,
llvm::raw_ostream& os) const;
llvm::DenseMap<unsigned, std::string> dim_id_to_name_;
llvm::DenseMap<unsigned, std::string> symbol_id_to_name_;
};
}
}
#endif
#include "xla/service/gpu/model/affine_map_printer.h"
#include <cstdint>
#include <ostream>
#include <string>
#include <string_view>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/Support/LLVM.h"
namespace xla {
namespace gpu {
namespace {
using mlir::AffineBinaryOpExpr;
using mlir::AffineConstantExpr;
using mlir::AffineDimExpr;
using mlir::AffineExpr;
using mlir::AffineExprKind;
using mlir::AffineMap;
using mlir::AffineSymbolExpr;
}
AffineMapPrinter::AffineMapPrinter(
absl::Span<const std::string_view> dim_names,
absl::Span<const std::string_view> symbol_names) {
dim_id_to_name_.reserve(dim_names.size());
for (const auto& [index, name] : llvm::enumerate(dim_names)) {
dim_id_to_name_[index] = name;
}
symbol_id_to_name_.reserve(symbol_names.size());
for (const auto& [index, name] : llvm::enumerate(symbol_names)) {
symbol_id_to_name_[index] = name;
}
}
void AffineMapPrinter::Print(std::ostream& out, AffineMap affine_map) const {
out << ToString(affine_map);
}
std::string AffineMapPrinter::ToString(AffineMap affine_map) const {
std::string s;
llvm::raw_string_ostream ss(s);
if (dim_id_to_name_.empty() && symbol_id_to_name_.empty()) {
affine_map.print(ss);
return s;
}
int dim_count = affine_map.getNumDims();
ss << '(';
for (int i = 0; i < dim_count - 1; ++i) {
ss << GetDimensionName(i) << ", ";
}
if (dim_count >= 1) {
ss << GetDimensionName(dim_count - 1);
}
ss << ')';
int symbol_count = affine_map.getNumSymbols();
if (symbol_count != 0) {
ss << '[';
for (unsigned i = 0; i < symbol_count - 1; ++i) {
ss << GetSymbolName(i) << ", ";
}
if (affine_map.getNumSymbols() >= 1) {
ss << GetSymbolName(symbol_count - 1);
}
ss << ']';
}
ss << " -> (";
llvm::interleaveComma(affine_map.getResults(), ss, [&](AffineExpr expr) {
PrintExprImpl(expr, false, ss);
});
ss << ')';
return s;
}
void AffineMapPrinter::Print(std::ostream& out,
mlir::AffineExpr affine_expr) const {
out << ToString(affine_expr);
}
std::string AffineMapPrinter::ToString(mlir::AffineExpr affine_expr) const {
std::string s;
llvm::raw_string_ostream ss(s);
PrintExprImpl(affine_expr, false, ss);
return s;
}
void AffineMapPrinter::PrintExprImpl(const mlir::AffineExpr affine_expr,
bool add_parentheses,
llvm::raw_ostream& os) const {
const char* binopSpelling = nullptr;
switch (affine_expr.getKind()) {
case AffineExprKind::SymbolId: {
unsigned symbol_id =
mlir::cast<AffineSymbolExpr>(affine_expr).getPosition();
os << GetSymbolName(symbol_id);
return;
}
case AffineExprKind::DimId: {
unsigned dim_id = mlir::cast<AffineDimExpr>(affine_expr).getPosition();
os << GetDimensionName(dim_id);
return;
}
case AffineExprKind::Constant:
os << mlir::cast<AffineConstantExpr>(affine_expr).getValue();
return;
case AffineExprKind::Add:
binopSpelling = " + ";
break;
case AffineExprKind::Mul:
binopSpelling = " * ";
break;
case AffineExprKind::FloorDiv:
binopSpelling = " floordiv ";
break;
case AffineExprKind::CeilDiv:
binopSpelling = " ceildiv ";
break;
case AffineExprKind::Mod:
binopSpelling = " mod ";
break;
}
auto binOp = mlir::cast<AffineBinaryOpExpr>(affine_expr);
AffineExpr lhsExpr = binOp.getLHS();
AffineExpr rhsExpr = binOp.getRHS();
if (binOp.getKind() != AffineExprKind::Add) {
if (add_parentheses) {
os << '(';
}
auto rhsConst = mlir::dyn_cast<AffineConstantExpr>(rhsExpr);
if (rhsConst && binOp.getKind() == AffineExprKind::Mul &&
rhsConst.getValue() == -1) {
os << "-";
PrintExprImpl(lhsExpr, true, os);
if (add_parentheses) {
os << ')';
}
return;
}
PrintExprImpl(lhsExpr, true, os);
os << binopSpelling;
PrintExprImpl(rhsExpr, true, os);
if (add_parentheses) {
os << ')';
}
return;
}
if (add_parentheses) {
os << '(';
}
if (auto rhs = mlir::dyn_cast<AffineBinaryOpExpr>(rhsExpr)) {
if (rhs.getKind() == AffineExprKind::Mul) {
AffineExpr rrhsExpr = rhs.getRHS();
if (auto rrhs = mlir::dyn_cast<AffineConstantExpr>(rrhsExpr)) {
if (rrhs.getValue() == -1) {
PrintExprImpl(lhsExpr, false, os);
os << " - ";
if (rhs.getLHS().getKind() == AffineExprKind::Add) {
PrintExprImpl(rhs.getLHS(), true, os);
} else {
PrintExprImpl(rhs.getLHS(), false, os);
}
if (add_parentheses) {
os << ')';
}
return;
}
if (rrhs.getValue() < -1) {
PrintExprImpl(lhsExpr, false, os);
os << " - ";
PrintExprImpl(rhs.getLHS(), true, os);
os << " * " << -rrhs.getValue();
if (add_parentheses) {
os << ')';
}
return;
}
}
}
}
if (auto rhsConst = mlir::dyn_cast<AffineConstantExpr>(rhsExpr)) {
if (rhsConst.getValue() < 0) {
PrintExprImpl(lhsExpr, false, os);
os << " - " << -rhsConst.getValue();
if (add_parentheses) {
os << ')';
}
return;
}
}
PrintExprImpl(lhsExpr, false, os);
os << " + ";
PrintExprImpl(rhsExpr, false, os);
if (add_parentheses) {
os << ')';
}
}
void AffineMapPrinter::SetSymbolName(int64_t symbol_id, llvm::StringRef name) {
symbol_id_to_name_[symbol_id] = name;
}
void AffineMapPrinter::SetDimensionName(int64_t dim_id, llvm::StringRef name) {
dim_id_to_name_[dim_id] = name;
}
std::string AffineMapPrinter::GetSymbolName(int64_t symbol_id) const {
auto it = symbol_id_to_name_.find(symbol_id);
if (it == symbol_id_to_name_.end()) {
return absl::StrCat("s", symbol_id);
}
return it->second;
}
std::string AffineMapPrinter::GetDimensionName(int64_t dim_id) const {
auto it = dim_id_to_name_.find(dim_id);
if (it == dim_id_to_name_.end()) {
return absl::StrCat("d", dim_id);
}
return it->second;
}
}
} | #include "xla/service/gpu/model/affine_map_printer.h"
#include <gmock/gmock.h>
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineExpr;
using ::mlir::AffineMap;
using ::mlir::bindDims;
using ::mlir::bindSymbols;
using ::testing::HasSubstr;
class IndexingMapTest : public HloTestBase {
public:
mlir::MLIRContext mlir_context_;
AffineMapPrinter printer_;
};
TEST_F(IndexingMapTest, AffineMapPrinterTest) {
AffineExpr d0, d1, s0, s1;
bindDims(&mlir_context_, d0, d1);
bindSymbols(&mlir_context_, s0, s1);
auto map =
AffineMap::get(2, 2, {d0 + d1.floorDiv(8), s0 + s1 % 16}, &mlir_context_);
printer_.SetDimensionName(0, "offset");
printer_.SetSymbolName(1, "linear_index");
EXPECT_THAT(printer_.ToString(map),
HasSubstr("(offset, d1)[s0, linear_index] -> "
"(offset + d1 floordiv 8, s0 + linear_index mod 16)"));
}
}
}
} | 2,140 |
#ifndef XLA_SERVICE_GPU_MODEL_INDEXING_MAP_H_
#define XLA_SERVICE_GPU_MODEL_INDEXING_MAP_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/affine_map_printer.h"
namespace xla {
namespace gpu {
struct Interval {
std::string ToString() const;
void Print(std::ostream& out) const;
bool IsPoint() const { return lower == upper; }
bool IsFeasible() const { return lower <= upper; }
int64_t GetLoopTripCount() const;
bool Contains(int64_t value) const {
return value >= lower && value <= upper;
}
struct ComparisonResult {
std::optional<bool> result;
ComparisonResult operator!() const {
if (result) return {!*result};
return {result};
}
bool operator==(const ComparisonResult& other) const {
return result == other.result;
}
bool operator==(bool other) const { return result && *result == other; }
bool operator==(std::nullopt_t) const { return !result; }
bool operator!=(std::nullopt_t) const { return result.has_value(); }
bool operator*() const { return *result; }
};
ComparisonResult Gt(const Interval& b) const;
ComparisonResult Lt(const Interval& b) const { return b.Gt(*this); }
ComparisonResult Ge(const Interval& b) const { return !b.Gt(*this); }
ComparisonResult Le(const Interval& b) const { return !this->Gt(b); }
ComparisonResult Eq(const Interval& b) const;
ComparisonResult Ne(const Interval& b) const { return !this->Eq(b); }
Interval Intersect(const Interval& rhs) const {
Interval result{std::max(lower, rhs.lower), std::min(upper, rhs.upper)};
if (result.upper < result.lower) {
result.upper = result.lower - 1;
}
return result;
}
Interval Union(const Interval& rhs) const {
return {std::min(lower, rhs.lower), std::max(upper, rhs.upper)};
}
Interval operator+(const Interval& rhs) const;
Interval operator*(const Interval& rhs) const;
Interval min(const Interval& rhs) const {
return {std::min(lower, rhs.lower), std::min(upper, rhs.upper)};
}
Interval max(const Interval& rhs) const {
return {std::max(lower, rhs.lower), std::max(upper, rhs.upper)};
}
bool operator==(const Interval& rhs) const {
return lower == rhs.lower && upper == rhs.upper;
}
bool operator!=(const Interval& rhs) const { return !(*this == rhs); }
int64_t lower = 0;
int64_t upper = 0;
};
std::ostream& operator<<(std::ostream& out, const Interval& range);
template <typename H>
H AbslHashValue(H h, const Interval& range) {
return H::combine(std::move(h), range.lower, range.upper);
}
inline size_t hash_value(const Interval& range) {
return llvm::hash_combine(range.lower, range.upper);
}
class RangeEvaluator {
public:
RangeEvaluator(absl::Span<const Interval> dim_ranges,
absl::Span<const Interval> symbol_ranges,
mlir::MLIRContext* mlir_context);
bool IsAlwaysPositiveOrZero(mlir::AffineExpr expr);
bool IsAlwaysNegativeOrZero(mlir::AffineExpr expr);
Interval ComputeExpressionRange(mlir::AffineExpr expr);
mlir::MLIRContext* GetMLIRContext() const { return mlir_context_; }
private:
mlir::MLIRContext* mlir_context_;
llvm::DenseMap<mlir::AffineExpr, Interval> expression_ranges_cache_;
};
struct DimVar {
Interval bounds;
};
bool operator==(const DimVar& lhs, const DimVar& rhs);
inline bool operator!=(const DimVar& lhs, const DimVar& rhs) {
return !(lhs == rhs);
}
template <typename H>
H AbslHashValue(H h, const DimVar& dimension) {
return H::combine(std::move(h), dimension.bounds);
}
struct RangeVar {
Interval range;
};
bool operator==(const RangeVar& lhs, const RangeVar& rhs);
inline bool operator!=(const RangeVar& lhs, const RangeVar& rhs) {
return !(lhs == rhs);
}
template <typename H>
H AbslHashValue(H h, const RangeVar& range_var) {
return H::combine(std::move(h), range_var.range);
}
struct RTVar {
Interval feasible_values;
const HloInstruction* hlo;
mlir::AffineMap map;
};
bool operator==(const RTVar& lhs, const RTVar& rhs);
inline bool operator!=(const RTVar& lhs, const RTVar& rhs) {
return !(lhs == rhs);
}
template <typename H>
H AbslHashValue(H h, const RTVar& rt_var) {
llvm::hash_code map_hash = llvm::hash_combine(rt_var.map);
return H::combine(std::move(h), rt_var.feasible_values, rt_var.hlo,
static_cast<size_t>(map_hash));
}
std::vector<DimVar> DimVarsFromTensorSizes(
absl::Span<const int64_t> tensor_sizes);
std::vector<RangeVar> RangeVarsFromTensorSizes(
absl::Span<const int64_t> tensor_sizes);
class IndexingMap {
public:
IndexingMap(
mlir::AffineMap affine_map, std::vector<DimVar> dimensions,
std::vector<RangeVar> range_vars, std::vector<RTVar> rt_vars,
absl::Span<std::pair<mlir::AffineExpr, Interval>> constraints = {});
IndexingMap(mlir::AffineMap affine_map, std::vector<DimVar> dimensions,
std::vector<RangeVar> range_vars, std::vector<RTVar> rt_vars,
const llvm::DenseMap<mlir::AffineExpr, Interval>& constraints);
static IndexingMap GetUndefined() { return IndexingMap(); }
static IndexingMap FromTensorSizes(
mlir::AffineMap affine_map, absl::Span<const int64_t> dim_upper_bounds,
absl::Span<const int64_t> symbol_upper_bounds);
std::string ToString(
const AffineMapPrinter& printer = AffineMapPrinter()) const;
void Print(std::ostream& out, const AffineMapPrinter& printer) const;
bool Simplify();
mlir::MLIRContext* GetMLIRContext() const;
mlir::AffineMap GetAffineMap() const { return affine_map_; }
mlir::AffineMap& GetMutableAffineMap() { return affine_map_; }
RangeEvaluator GetRangeEvaluator() const;
const DimVar& GetDimVars(int64_t id) const { return dim_vars_[id]; }
const std::vector<DimVar>& GetDimVars() const { return dim_vars_; }
int64_t GetDimVarsCount() const { return dim_vars_.size(); }
const RangeVar& GetRangeVar(int64_t id) const { return range_vars_[id]; }
const std::vector<RangeVar>& GetRangeVars() const { return range_vars_; }
int64_t GetRangeVarsCount() const { return range_vars_.size(); }
const RTVar& GetRTVar(int64_t id) const { return rt_vars_[id]; }
const std::vector<RTVar>& GetRTVars() const { return rt_vars_; }
int64_t GetRTVarsCount() const { return rt_vars_.size(); }
const Interval& GetDimensionBound(int64_t dim_id) const;
Interval& GetMutableDimensionBound(int64_t dim_id);
std::vector<Interval> GetDimensionBounds() const;
int64_t GetDimensionCount() const { return affine_map_.getNumDims(); }
const Interval& GetSymbolBound(int64_t symbol_id) const;
Interval& GetMutableSymbolBound(int64_t symbol_id);
std::vector<Interval> GetSymbolBounds() const;
int64_t GetSymbolCount() const { return affine_map_.getNumSymbols(); }
const llvm::DenseMap<mlir::AffineExpr, Interval>& GetConstraints() const {
return constraints_;
}
int64_t GetConstraintsCount() const { return constraints_.size(); }
void AddConstraint(mlir::AffineExpr expr, Interval range);
bool ConstraintsSatisfied(
llvm::ArrayRef<mlir::AffineExpr> dim_const_exprs,
llvm::ArrayRef<mlir::AffineExpr> symbol_const_exprs) const;
llvm::SmallVector<int64_t, 4> Evaluate(
llvm::ArrayRef<mlir::AffineExpr> dim_const_exprs,
llvm::ArrayRef<mlir::AffineExpr> symbol_const_exprs) const;
bool IsKnownEmpty() const { return is_known_empty_; }
bool IsUndefined() const { return affine_map_ == mlir::AffineMap(); }
llvm::SmallBitVector RemoveUnusedDimensions();
llvm::SmallBitVector RemoveUnusedSymbols();
llvm::SmallBitVector RemoveUnusedVars();
bool RescaleSymbols();
bool IsRangeVarSymbol(mlir::AffineSymbolExpr symbol) const;
bool IsRTVarSymbol(mlir::AffineSymbolExpr symbol) const;
IndexingMap GetSubMap(unsigned int result_index) const {
return {affine_map_.getSubMap({result_index}), dim_vars_, range_vars_,
rt_vars_, constraints_};
}
private:
IndexingMap() = default;
bool SimplifyConstraintExprs();
bool SimplifyConstraintRanges();
bool MergeModConstraints();
bool ReplaceConstantRTVars();
bool CompressVars(const llvm::SmallBitVector& unused_dims,
const llvm::SmallBitVector& unused_symbols);
void ResetToKnownEmpty();
bool VerifyVariableIntervals();
bool VerifyConstraintIntervals();
mlir::AffineMap affine_map_;
std::vector<DimVar> dim_vars_;
std::vector<RangeVar> range_vars_;
std::vector<RTVar> rt_vars_;
llvm::DenseMap<mlir::AffineExpr, Interval> constraints_;
bool is_known_empty_ = false;
};
std::ostream& operator<<(std::ostream& out, const IndexingMap& indexing_map);
bool operator==(const IndexingMap& lhs, const IndexingMap& rhs);
inline bool operator!=(const IndexingMap& lhs, const IndexingMap& rhs) {
return !(lhs == rhs);
}
IndexingMap operator*(const IndexingMap& lhs, const IndexingMap& rhs);
IndexingMap ComposeIndexingMaps(const IndexingMap& first,
const IndexingMap& second);
void PrintRTVars(const std::vector<RTVar>& rt_vars,
int first_rt_var_symbol_index, std::ostream& out,
const AffineMapPrinter& printer);
template <typename H>
H AbslHashValue(H h, const IndexingMap& indexing_map) {
llvm::hash_code affine_map_hash =
llvm::hash_combine(indexing_map.GetAffineMap());
llvm::SmallVector<size_t> constraint_hashes;
constraint_hashes.reserve(indexing_map.GetConstraintsCount());
for (const auto& [expr, interval] : indexing_map.GetConstraints()) {
constraint_hashes.push_back(llvm::hash_combine(expr, interval));
}
h = H::combine(std::move(h), static_cast<size_t>(affine_map_hash),
indexing_map.GetDimVars(), indexing_map.GetRangeVars(),
indexing_map.GetRTVars());
h = H::combine_unordered(std::move(h), constraint_hashes.begin(),
constraint_hashes.end());
return h;
}
int64_t FloorDiv(int64_t dividend, int64_t divisor);
int64_t CeilDiv(int64_t dividend, int64_t divisor);
}
}
#endif
#include "xla/service/gpu/model/indexing_map.h"
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <numeric>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/base/optimization.h"
#include "absl/types/span.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "tsl/platform/logging.h"
#ifdef __has_builtin
#define XLA_GPU_MODEL_HAS_BUILTIN(x) __has_builtin(x)
#else
#define XLA_GPU_MODEL_HAS_BUILTIN(x) 0
#endif
#if !XLA_GPU_MODEL_HAS_BUILTIN(__builtin_add_overflow) || \
!XLA_GPU_MODEL_HAS_BUILTIN(__builtin_mul_overflow)
#include "absl/numeric/int128.h"
#endif
namespace xla {
namespace gpu {
namespace {
using llvm::ArrayRef;
using llvm::SmallBitVector;
using llvm::SmallVector;
using mlir::AffineBinaryOpExpr;
using mlir::AffineConstantExpr;
using mlir::AffineDimExpr;
using mlir::AffineExpr;
using mlir::AffineExprKind;
using mlir::AffineMap;
using mlir::AffineSymbolExpr;
using mlir::getAffineBinaryOpExpr;
using mlir::getAffineConstantExpr;
using mlir::getAffineDimExpr;
using mlir::MLIRContext;
AffineExpr GetLhs(AffineExpr e) {
return mlir::cast<AffineBinaryOpExpr>(e).getLHS();
}
AffineExpr GetRhs(AffineExpr e) {
return mlir::cast<AffineBinaryOpExpr>(e).getRHS();
}
AffineExpr MapSummands(AffineExpr expr,
const std::function<AffineExpr(AffineExpr)>& fn) {
if (expr.getKind() == AffineExprKind::Add) {
auto add = mlir::dyn_cast<AffineBinaryOpExpr>(expr);
auto lhs = MapSummands(add.getLHS(), fn);
auto rhs = MapSummands(add.getRHS(), fn);
if (lhs == add.getLHS() && rhs == add.getRHS()) {
return add;
}
return lhs + rhs;
}
return fn(expr);
}
void VisitSummands(mlir::AffineExpr expr,
const std::function<void(mlir::AffineExpr)>& visit) {
if (expr.getKind() == AffineExprKind::Add) {
VisitSummands(GetLhs(expr), visit);
VisitSummands(GetRhs(expr), visit);
} else {
visit(expr);
}
}
class AffineExprSimplifier {
public:
explicit AffineExprSimplifier(RangeEvaluator* range_evaluator)
: range_evaluator_(range_evaluator) {}
mlir::AffineMap Simplify(mlir::AffineMap affine_map);
mlir::AffineExpr Simplify(mlir::AffineExpr expr);
private:
std::optional<int64_t> GetConstantRhs(mlir::AffineExpr expr,
AffineExprKind kind);
std::pair<mlir::AffineExpr, int64_t> ExtractMultiplier(
mlir::AffineExpr expr) {
if (auto mul = GetConstantRhs(expr, AffineExprKind::Mul)) {
return {GetLhs(expr), *mul};
}
return {expr, 1};
}
mlir::AffineExpr RewriteMod(mlir::AffineBinaryOpExpr mod);
mlir::AffineExpr RewriteFloorDiv(mlir::AffineBinaryOpExpr div);
AffineExpr SimplifyModDiv(AffineExpr dividend, int64_t divisor);
AffineExpr SimplifyDivDiv(AffineExpr dividend, int64_t divisor);
AffineExpr SimplifySumDiv(AffineExpr dividend, int64_t divisor);
mlir::AffineExpr SimplifyOnce(mlir::AffineExpr expr);
mlir::AffineExpr SimplifyWithMlir(mlir::AffineExpr expr, int num_dims,
int num_symbols);
mlir::AffineMap SimplifyWithMlir(mlir::AffineMap map) {
llvm::SmallVector<mlir::AffineExpr, 8> exprs;
for (auto e : map.getResults()) {
exprs.push_back(
SimplifyWithMlir(e, map.getNumDims(), map.getNumSymbols()));
}
return mlir::AffineMap::get(map.getNumDims(), map.getNumSymbols(), exprs,
map.getContext());
}
RangeEvaluator* range_evaluator_;
};
AffineExpr AffineExprSimplifier::RewriteMod(AffineBinaryOpExpr mod) {
auto rhs = range_evaluator_->ComputeExpressionRange(mod.getRHS());
if (!rhs.IsPoint()) {
return mod;
}
int64_t m = rhs.lower;
if (m == 0) {
return mlir::getAffineConstantExpr(0, mod.getContext());
}
auto lhs_simplified = SimplifyOnce(mod.getLHS());
auto lhs = range_evaluator_->ComputeExpressionRange(lhs_simplified);
if (0 <= lhs.lower && lhs.upper < rhs.lower) {
return lhs_simplified;
}
if (auto mul = GetConstantRhs(lhs_simplified, AffineExprKind::Mul);
mul && *mul > 0 && (m % *mul == 0)) {
return (GetLhs(lhs_simplified) % (m / *mul)) * *mul;
}
auto zero = getAffineConstantExpr(0, mod.getContext());
int64_t extracted_constant = 0;
auto new_lhs = MapSummands(lhs_simplified, [&](AffineExpr expr) {
if (auto cst = mlir::dyn_cast<AffineConstantExpr>(expr)) {
extracted_constant += cst.getValue();
return zero;
}
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul);
multiplier && (*multiplier % m == 0)) {
return zero;
}
return expr;
});
new_lhs = new_lhs + (extracted_constant % m);
Interval no_multiplier_range{0, 0};
std::optional<int64_t> multiplier_gcd = std::nullopt;
VisitSummands(new_lhs, [&](AffineExpr expr) {
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul)) {
if (multiplier_gcd.has_value()) {
multiplier_gcd = std::gcd(*multiplier_gcd, *multiplier);
} else {
multiplier_gcd = *multiplier;
}
} else {
auto range = range_evaluator_->ComputeExpressionRange(expr);
no_multiplier_range.lower += range.lower;
no_multiplier_range.upper += range.upper;
}
});
mlir::AffineExpr extracted = getAffineConstantExpr(0, mod.getContext());
if (multiplier_gcd.has_value()) {
if (m % *multiplier_gcd == 0 && no_multiplier_range.lower >= 0 &&
no_multiplier_range.upper < *multiplier_gcd) {
new_lhs = MapSummands(new_lhs, [&](AffineExpr expr) {
if (GetConstantRhs(expr, AffineExprKind::Mul)) {
return expr;
}
extracted = extracted + expr;
return zero;
});
}
}
return new_lhs % mod.getRHS() + extracted;
}
AffineExpr AffineExprSimplifier::SimplifyModDiv(AffineExpr dividend,
int64_t divisor) {
if (auto mod = GetConstantRhs(dividend, AffineExprKind::Mod);
mod && (*mod % divisor == 0)) {
return GetLhs(dividend).floorDiv(divisor) % (*mod / divisor);
}
return nullptr;
}
AffineExpr AffineExprSimplifier::SimplifyDivDiv(AffineExpr dividend,
int64_t divisor) {
if (auto inner_divisor = GetConstantRhs(dividend, AffineExprKind::FloorDiv)) {
return GetLhs(dividend).floorDiv(divisor * *inner_divisor);
}
return nullptr;
}
AffineExpr AffineExprSimplifier::SimplifySumDiv(AffineExpr dividend,
int64_t divisor) {
AffineExpr zero = getAffineConstantExpr(0, dividend.getContext());
AffineExpr extracted = zero;
auto new_dividend = MapSummands(dividend, [&](AffineExpr expr) {
if (auto multiplier = GetConstantRhs(expr, AffineExprKind::Mul)) {
if (*multiplier % divisor == 0) {
int64_t factor = *multiplier / divisor;
extracted = extracted + GetLhs(expr) * factor;
return zero;
}
}
return expr;
});
int64_t multiplier_divisor_gcd = divisor;
Interval no_multiplier_range{0, 0};
std::optional<int64_t> inner_divisor = std::nullopt;
int num_inner_divisors = 0;
VisitSummands(new_dividend, [&](AffineExpr summand) {
if (auto multiplier = GetConstantRhs(summand, AffineExprKind::Mul)) {
multiplier_divisor_gcd = std::gcd(multiplier_divisor_gcd, *multiplier);
} else {
no_multiplier_range = no_multiplier_range +
range_evaluator_->ComputeExpressionRange(summand);
}
if (auto divisor = GetConstantRhs(summand, AffineExprKind::FloorDiv)) {
inner_divisor = divisor;
++num_inner_divisors;
}
});
if (no_multiplier_range.lower >= 0 &&
no_multiplier_range.upper < multiplier_divisor_gcd) {
new_dividend = MapSummands(new_dividend, [&](AffineExpr summand) {
if (auto mult = GetConstantRhs(summand, AffineExprKind::Mul)) {
return GetLhs(summand) * (*mult / multiplier_divisor_gcd);
}
return zero;
});
divisor /= multiplier_divisor_gcd;
}
if (num_inner_divisors == 1) {
new_dividend = MapSummands(new_dividend, [&](AffineExpr summand) {
if (auto inner_divisor =
GetConstantRhs(summand, AffineExprKind::FloorDiv)) {
return GetLhs(summand).floorDiv(*inner_divisor / *inner_divisor);
}
return summand * *inner_divisor;
});
divisor *= *inner_divisor;
}
return new_dividend.floorDiv(divisor) + extracted;
}
AffineExpr AffineExprSimplifier::RewriteFloorDiv(AffineBinaryOpExpr div) {
auto rhs_range = range_evaluator_->ComputeExpressionRange(div.getRHS());
if (!rhs_range.IsPoint() || rhs_range.lower <= 0) {
return div;
}
int64_t d = rhs_range.lower;
auto lhs_simplified = SimplifyOnce(div.getLHS());
if (auto result = SimplifyModDiv(lhs_simplified, d)) {
return result;
}
if (auto result = SimplifyDivDiv(lhs_simplified, d)) {
return result;
}
if (auto result = SimplifySumDiv(lhs_simplified, d)) {
return result;
}
return div;
}
std::optional<int64_t> AffineExprSimplifier::GetConstantRhs(
AffineExpr expr, AffineExprKind kind) {
if (expr.getKind() != kind) {
return std::nullopt;
}
auto bound = range_evaluator_->ComputeExpressionRange(
mlir::cast<AffineBinaryOpExpr>(expr).getRHS());
if (!bound.IsPoint()) {
return std::nullopt;
}
return bound.lower;
}
int CompareExprs(AffineExpr a, AffineExpr b) {
if ((b.getKind() == AffineExprKind::Constant) !=
(a.getKind() == AffineExprKind::Constant)) {
return a.getKind() == AffineExprKind::Constant ? 1 : -1;
}
if (a.getKin | #include "xla/service/gpu/model/indexing_map.h"
#include <cstdint>
#include <limits>
#include <memory>
#include <optional>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/hash/hash_testing.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineMap;
using ::testing::AnyOf;
using ::testing::ElementsAre;
class IndexingMapTest : public HloTestBase {
public:
mlir::MLIRContext mlir_context_;
AffineMapPrinter printer_;
};
std::vector<bool> ConvertToSTL(const llvm::SmallBitVector& bit_vector) {
std::vector<bool> result;
result.reserve(bit_vector.size());
for (int i = 0; i < bit_vector.size(); ++i) {
result.push_back(bit_vector[i]);
}
return result;
}
TEST_F(IndexingMapTest, RTVar) {
auto zero_dim_map = AffineMap::get(&mlir_context_);
std::vector<RTVar> rt_vars{RTVar{Interval{0, 2},
nullptr, zero_dim_map},
RTVar({Interval{0, 7},
nullptr, zero_dim_map})};
IndexingMap indexing_map(
ParseAffineMap("(d0, d1)[s0, s1, s2] -> (d1, d0, s0 + s1, s1)",
&mlir_context_),
{DimVar{{0, 99}}, DimVar{{0, 43}}}, {RangeVar{{-99, 99}}},
std::move(rt_vars));
printer_.SetSymbolName(0, "range");
printer_.SetSymbolName(1, "rt_0");
printer_.SetSymbolName(2, "rt_1");
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0, d1)[range, rt_0, rt_1] -> (d1, d0, range + rt_0, rt_0)
domain:
d0 in [0, 100)
d1 in [0, 44)
range in [-99, 100)
rt_0 in [0, 3)
hlo: NULL
() -> ()
rt_1 in [0, 8)
hlo: NULL
() -> ()
)"));
}
TEST_F(IndexingMapTest, Evaluation) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (d1, d0, s1, s0)", &mlir_context_),
{4, 4}, {2, 2});
auto results = indexing_map.Evaluate(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({3, 4}, &mlir_context_));
EXPECT_THAT(results, ElementsAre(2, 1, 4, 3));
auto feasible = indexing_map.ConstraintsSatisfied(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({3, 4}, &mlir_context_));
EXPECT_TRUE(feasible);
indexing_map.AddConstraint(ParseAffineExpr("s0 mod 4", &mlir_context_),
Interval{0, 0});
auto infeasible = indexing_map.ConstraintsSatisfied(
mlir::getAffineConstantExprs({1, 2}, &mlir_context_),
mlir::getAffineConstantExprs({5, 4}, &mlir_context_));
EXPECT_FALSE(infeasible);
}
TEST_F(IndexingMapTest, Composition_Permutation) {
IndexingMap producer = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (d1, d0, s1, s0)", &mlir_context_),
{4, 4}, {2, 2});
IndexingMap consumer = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0)[s0] -> (d0, s0)", &mlir_context_), {4}, {4});
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0)
domain:
d0 in [0, 4)
s0 in [0, 2)
s1 in [0, 2)
s2 in [0, 4)
)"));
}
TEST_F(IndexingMapTest, Composition_RestrictedInterval) {
IndexingMap producer = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (d1, d0, s1, s0)", &mlir_context_),
{5, 6}, {7, 2});
IndexingMap consumer = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0)[s0] -> (d0, s0)", &mlir_context_), {10}, {8});
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0)
domain:
d0 in [0, 5)
s0 in [0, 7)
s1 in [0, 2)
s2 in [0, 6)
)"));
}
TEST_F(IndexingMapTest, Composition_ProducerAndConsumerHaveConstraints) {
IndexingMap producer = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (d1, d0, s1, s0)", &mlir_context_),
{50, 60}, {70, 20});
producer.AddConstraint(ParseAffineExpr("d0 mod 8", &mlir_context_),
Interval{0, 0});
producer.AddConstraint(ParseAffineExpr("s0 mod 3", &mlir_context_),
Interval{1, 1});
IndexingMap consumer = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0)[s0] -> (d0, s0)", &mlir_context_), {10}, {8});
consumer.AddConstraint(ParseAffineExpr("d0 + s0", &mlir_context_),
Interval{0, 20});
consumer.AddConstraint(ParseAffineExpr("s0 mod 4", &mlir_context_),
Interval{0, 0});
auto composed = ComposeIndexingMaps(consumer, producer);
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0)
domain:
d0 in [0, 10)
s0 in [0, 70)
s1 in [0, 20)
s2 in [0, 8)
d0 + s2 in [0, 21)
d0 mod 8 in [0, 1)
s0 mod 3 in [1, 2)
s2 mod 4 in [0, 1)
)"));
EXPECT_TRUE(composed.Simplify());
EXPECT_THAT(composed, MatchIndexingMap(R"(
(d0)[s0, s1, s2] -> (s2, d0, s1, s0)
domain:
d0 in [0, 9)
s0 in [1, 68)
s1 in [0, 20)
s2 in [0, 5)
d0 mod 8 in [0, 1)
s0 mod 3 in [1, 2)
s2 mod 4 in [0, 1)
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedDimensions_ConstraintUsesDim) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (d1, s0, s1)", &mlir_context_),
{50, 60}, {70, 20});
indexing_map.AddConstraint(ParseAffineExpr("s0 + d0", &mlir_context_),
Interval{1, 100});
indexing_map.AddConstraint(ParseAffineExpr("s0 mod 3", &mlir_context_),
Interval{0, 0});
indexing_map.RemoveUnusedDimensions();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d1, s0, s1)
domain:
d0 in [0, 50)
d1 in [0, 60)
s0 in [0, 70)
s1 in [0, 20)
d0 + s0 in [1, 101)
s0 mod 3 in [0, 1)
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedDimensions_ConstraintUsesOnlyUnusedDim) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (s0, d1, s1)", &mlir_context_),
{50, 60}, {70, 20});
indexing_map.AddConstraint(ParseAffineExpr("d0 mod 3", &mlir_context_),
Interval{0, 0});
indexing_map.RemoveUnusedDimensions();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, s1] -> (s0, d0, s1)
domain:
d0 in [0, 60)
s0 in [0, 70)
s1 in [0, 20)
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedDimensions_ConstraintsWithManyDims) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1, d2, d3, d4)[s0, s1] -> (s0 * 4 + d1 + d3 - 42)",
&mlir_context_),
{1, 2, 3, 4, 5}, {32, 64});
indexing_map.AddConstraint(
ParseAffineExpr("s0 * 4 + d1 + d3", &mlir_context_), Interval{24, 459});
indexing_map.RemoveUnusedDimensions();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d0 + s0 * 4 + d1 - 42)
domain:
d0 in [0, 2)
d1 in [0, 4)
s0 in [0, 32)
s1 in [0, 64)
d0 + s0 * 4 + d1 in [24, 460)
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedVars_ConstraintsWithManyDims) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(
"(d0, d1, d2, d3, d4)[s0, s1, s2] -> (s0 * 4 + d1 + d3 - 42)",
&mlir_context_),
{1, 2, 3, 4, 5}, {32, 64, 96});
indexing_map.AddConstraint(
ParseAffineExpr("s0 * 4 + d1 + d3", &mlir_context_), Interval{24, 459});
indexing_map.AddConstraint(ParseAffineExpr("s0 + s2", &mlir_context_),
Interval{0, 512});
auto unused_vars = indexing_map.RemoveUnusedVars();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d0 + s0 * 4 + d1 - 42)
domain:
d0 in [0, 2)
d1 in [0, 4)
s0 in [0, 32)
s1 in [0, 96)
d0 + s0 * 4 + d1 in [24, 460)
s0 + s1 in [0, 513)
)"));
EXPECT_THAT(ConvertToSTL(unused_vars),
::testing::ElementsAreArray(
{true, false, true, false, true, false, true, false}));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintUsesSymbol) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (d1, d0, s1)", &mlir_context_),
{50, 60}, {70, 20});
indexing_map.AddConstraint(ParseAffineExpr("s0 + s1", &mlir_context_),
Interval{1, 100});
indexing_map.AddConstraint(ParseAffineExpr("s0 mod 3", &mlir_context_),
Interval{0, 0});
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d1, d0, s1)
domain:
d0 in [0, 50)
d1 in [0, 60)
s0 in [0, 70)
s1 in [0, 20)
s0 + s1 in [1, 101)
s0 mod 3 in [0, 1)
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintUsesOnlyUnusedSymbols) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (d1, d0, s1)", &mlir_context_),
{50, 60}, {70, 20});
indexing_map.AddConstraint(ParseAffineExpr("s0 mod 3", &mlir_context_),
Interval{0, 0});
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0, d1)[s0] -> (d1, d0, s0)
domain:
d0 in [0, 50)
d1 in [0, 60)
s0 in [0, 20)
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintIsAConstantWithinRange) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_), {50}, {});
indexing_map.AddConstraint(ParseAffineExpr("0", &mlir_context_),
Interval{-10, 5});
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0) -> (d0)
domain:
d0 in [0, 50)
)"));
}
TEST_F(IndexingMapTest, KnownEmpty_CreatingIndexingMapWithInfeasibleRange) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_), {-1}, {});
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, KnownEmpty_AddingConstraintOutOfRange) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_), {50}, {});
indexing_map.AddConstraint(ParseAffineExpr("0", &mlir_context_),
Interval{10, 15});
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, KnownEmpty_Composition) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_), {50}, {});
IndexingMap known_empty = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (0)", &mlir_context_), {0}, {});
EXPECT_THAT(known_empty, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_THAT(indexing_map * known_empty, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_THAT(known_empty * indexing_map, MatchIndexingMap("KNOWN EMPTY"));
EXPECT_EQ((indexing_map * known_empty).GetAffineMap().getNumResults(), 1);
EXPECT_EQ((known_empty * indexing_map).GetAffineMap().getNumResults(), 1);
}
TEST_F(IndexingMapTest,
KnownEmpty_AddingConstraintOutOfRangeAfterSimplification) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (d1, d0, s1)", &mlir_context_),
{50, 60}, {70, 20});
indexing_map.AddConstraint(ParseAffineExpr("s1 floordiv 20", &mlir_context_),
Interval{2, 2});
EXPECT_THAT(indexing_map, MatchIndexingMap("KNOWN EMPTY"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintsWithManySymbols) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42)",
&mlir_context_),
{32}, {1, 2, 3, 4, 5});
indexing_map.AddConstraint(
ParseAffineExpr("d0 * 4 + s1 + s3", &mlir_context_), Interval{24, 459});
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, s1] -> (d0 * 4 + s0 + s1 - 42)
domain:
d0 in [0, 32)
s0 in [0, 2)
s1 in [0, 4)
d0 * 4 + s0 + s1 in [24, 460)
)"));
}
TEST_F(IndexingMapTest, RemoveUnusedSymbols_ConstraintsWithRTVars) {
auto zero_dim_map = AffineMap::get(&mlir_context_);
IndexingMap indexing_map(
ParseAffineMap("(d0)[s0, s1, s2, s3, s4] -> (d0 * 4 + s1 + s3 - 42)",
&mlir_context_),
{DimVar{{0, 31}}}, {RangeVar{{0, 0}}, RangeVar{{0, 1}}, RangeVar{{0, 2}}},
{RTVar{Interval{0, 3},
nullptr, zero_dim_map},
RTVar{Interval{0, 4},
nullptr, zero_dim_map}});
indexing_map.AddConstraint(
ParseAffineExpr("d0 * 4 + s1 + s3", &mlir_context_), Interval{24, 459});
indexing_map.RemoveUnusedSymbols();
EXPECT_THAT(indexing_map, MatchIndexingMap(R"(
(d0)[s0, s1] -> (d0 * 4 + s0 + s1 - 42)
domain:
d0 in [0, 32)
s0 in [0, 2)
s1 in [0, 4)
hlo: NULL
() -> ()
d0 * 4 + s0 + s1 in [24, 460)
)"));
}
TEST_F(IndexingMapTest, ConstraintIntervalSimplification_Sum) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_), {100}, {});
indexing_map.AddConstraint(ParseAffineExpr("(d0 mod 8) + 5", &mlir_context_),
Interval{50, 54});
EXPECT_THAT(indexing_map.ToString(), MatchIndexingString(R"(
(d0) -> (d0)
domain:
d0 in [0, 100)
d0 mod 8 in [45, 50)
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivPositiveDivisorPositiveBounds) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_), {100}, {});
indexing_map.AddConstraint(ParseAffineExpr("d0 floordiv 8", &mlir_context_),
Interval{5, 11});
EXPECT_THAT(indexing_map.ToString(), MatchIndexingString(R"(
(d0) -> (d0)
domain:
d0 in [40, 96)
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivPositiveDivisorNegativeBounds) {
IndexingMap indexing_map =
IndexingMap(ParseAffineMap("(d0)[s0] -> (d0)", &mlir_context_),
{DimVar{{0, 99}}}, {RangeVar{{-99, 99}}}, {});
indexing_map.AddConstraint(ParseAffineExpr("s0 floordiv 3", &mlir_context_),
Interval{-11, -5});
EXPECT_THAT(indexing_map.ToString(), MatchIndexingString(R"(
(d0)[s0] -> (d0)
domain:
d0 in [0, 100)
s0 in [-33, -12)
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_FloorDivNegativeDivisorNegativeBounds) {
IndexingMap indexing_map =
IndexingMap(ParseAffineMap("(d0)[s0] -> (d0)", &mlir_context_),
{DimVar{{0, 99}}}, {RangeVar{{-99, 99}}}, {});
indexing_map.AddConstraint(ParseAffineExpr("s0 floordiv -3", &mlir_context_),
Interval{-11, -5});
EXPECT_THAT(indexing_map.ToString(), MatchIndexingString(R"(
(d0)[s0] -> (d0)
domain:
d0 in [0, 100)
s0 in [15, 36)
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulPositiveMultiplierPositiveBounds) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0)", &mlir_context_), {100}, {});
indexing_map.AddConstraint(ParseAffineExpr("d0 * 8", &mlir_context_),
Interval{14, 33});
EXPECT_THAT(indexing_map.ToString(), MatchIndexingString(R"(
(d0) -> (d0)
domain:
d0 in [2, 5)
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulPositiveMultiplierNegativeBounds) {
IndexingMap indexing_map =
IndexingMap(ParseAffineMap("(d0)[s0] -> (d0)", &mlir_context_),
{DimVar{{0, 99}}}, {RangeVar{{-99, 99}}}, {});
indexing_map.AddConstraint(ParseAffineExpr("s0 * 3", &mlir_context_),
Interval{-11, -5});
EXPECT_THAT(indexing_map.ToString(), MatchIndexingString(R"(
(d0)[s0] -> (d0)
domain:
d0 in [0, 100)
s0 in [-3, -1)
)"));
}
TEST_F(IndexingMapTest,
ConstraintIntervalSimplification_MulNegativeMultiplierNegativeBounds) {
IndexingMap indexing_map =
IndexingMap(ParseAffineMap("(d0)[s0] -> (d0)", &mlir_context_),
{DimVar{{0, 99}}}, {RangeVar{{-99, 99}}}, {});
indexing_map.AddConstraint(ParseAffineExpr("s0 * -3", &mlir_context_),
Interval{-11, -5});
EXPECT_THAT(indexing_map.ToString(), MatchIndexingString(R"(
(d0)[s0] -> (d0)
domain:
d0 in [0, 100)
s0 in [2, 4)
)"));
}
TEST_F(IndexingMapTest, ConstraintMerge_Mod) {
IndexingMap indexing_map(
ParseAffineMap("(d0)[s0, s1] -> (d0, s1, s0)", &mlir_context_),
{DimVar{{0, 4}}}, {RangeVar{{-21, -1}}, RangeVar{{0, 10}}},
{});
indexing_map.AddConstraint(ParseAffineExpr("d0 mod 3", &mlir_context_),
Interval{0, 0});
indexing_map.AddConstraint(ParseAffineExpr("s0 mod 2", &mlir_context_),
Interval{0, 0});
indexing_map.AddConstraint(ParseAffineExpr("s0 mod 3", &mlir_context_),
Interval{0, 0});
indexing_map.AddConstraint(ParseAffineExpr("s1 mod 5", &mlir_context_),
Interval{1, 1});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(), MatchIndexingString(R"(
(d0)[s0, s1] -> (d0, s1, s0)
domain:
d0 in [0, 4)
s0 in [-18, -5)
s1 in [1, 7)
d0 mod 3 in [0, 1)
s0 mod 6 in [0, 1)
s1 mod 5 in [1, 2)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_ConstantDims) {
IndexingMap indexing_map =
IndexingMap(ParseAffineMap("(d0) -> (d0)", &mlir_context_),
{DimVar{{5, 5}}}, {}, {});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0) -> (5)
domain:
d0 in [5, 6)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SumOrderRegression) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1)[s0, s1] -> (((((d0 + (d0 mod 3)) floordiv 3) + "
"(s0 + ((s0 + s0) mod 3))) + (((d0 + s0) mod 3) + 0)))",
&mlir_context_),
{10, 20}, {30, 40});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest,
AffineMapSimplification_DivsAndModsIfSmallerThanDivisor) {
auto serialized_map = "(d0, d1) -> (d0 + d1 floordiv 16, d1 mod 16)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {8, 16}, {});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 8)
d1 in [0, 16)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsAndModsWithMultipliers) {
auto serialized_map =
"(d0, d1, d2) -> ((d0 * 100 + d1 * 10 + d2) floordiv 100, "
"((d0 * 100 + d1 * 10 + d2) mod 100) floordiv 10, "
"d2 mod 10)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {9, 9, 9}, {});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0, d1, d2) -> (d0, d1, d2)
domain:
d0 in [0, 9)
d1 in [0, 9)
d2 in [0, 9)
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_DivsAndModsWithDivisibleMultipliers) {
auto serialized_map =
"(d0, d1, d2) -> ((d0 * 16 + d1 * 4 + d2) floordiv 8, "
" (d0 * 16 + d1 * 4 + d2) mod 8)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {10, 10, 10}, {});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0, d1, d2) -> (d0 * 2 + (d1 * 4 + d2) floordiv 8,
(d1 * 4 + d2) mod 8)
domain:
d0 in [0, 10)
d1 in [0, 10)
d2 in [0, 10)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsAndModsWithReverse) {
auto serialized_map =
"(d0, d1) -> (-((d0 * -11 - d1 + 109) floordiv 11) + 9, "
"d0 * 11 + d1 + ((d0 * -11 - d1 + 109) floordiv 11) * 11 - 99)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {8, 9}, {});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 8)
d1 in [0, 9)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape) {
auto serialized_map =
"()[s0] -> ((s0 * 128) mod 715 + ((s0 * 128) floordiv 715) * 715)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {}, {128});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
()[s0] -> (s0 * 128)
domain: s0 in [0, 128)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape2) {
auto serialized_map =
"(d0, d1) -> ((d0 mod 8) * 128 + d1 + (d0 floordiv 8) * 1024)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {1024, 128}, {});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0, d1) -> (d0 * 128 + d1)
domain:
d0 in [0, 1024)
d1 in [0, 128)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape3) {
auto serialized_map =
"(d0, d1) -> (((d1 * 2 + d0 floordiv 64) mod 3) * 256 + (d0 mod 64) * 4 "
"+ ((d1 * 128 + d0) floordiv 192) * 768)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {128, 3072}, {});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0, d1) -> (d0 * 4 + d1 * 512)
domain:
d0 in [0, 128)
d1 in [0, 3072)
)"));
}
TEST_F(IndexingMapTest,
AffineMapSimplification_ModWithNegativeMultiplerDoesNotGetSimplified) {
auto serialized_map = "(d0) -> ((-d0) mod 2)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {128}, {});
EXPECT_FALSE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0) -> ((-d0) mod 2)
domain:
d0 in [0, 128)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyBitcastAndBack) {
auto serialized_map =
"(d0, d1) -> ((d0 floordiv 1536) * 786432 + (((d0 * 2 + d1 floordiv "
"64) floordiv 3) mod 1024) * 768 + ((d0 * 2 + d1 floordiv 64) mod 3) * "
"256 + (d1 mod 64) * 4)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {3072, 128}, {});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
(d0, d1) -> (d0 * 512 + d1 * 4)
domain:
d0 in [0, 3072)
d1 in [0, 128)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_SimplifyReshape_Regression) {
auto serialized_map =
"()[s0] -> ((s0 * 128) mod 715 + ((s0 * 64) floordiv 715) * 715)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {}, {128});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
()[s0] -> (((s0 * 64) floordiv 715) * 715 + (s0 * 128) mod 715)
domain: s0 in [0, 128)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivsInSequence) {
auto serialized_map =
"()[s0] -> (s0 - ((s0 floordiv 2) floordiv 7) * 14 + (s0 floordiv 14) * "
"14)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {}, {1234});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
()[s0] -> (s0)
domain:
s0 in [0, 1234)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivDiv) {
auto serialized_map = "()[s0, s1] -> ((s0 * 2 + s1 floordiv 64) floordiv 3)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {}, {1234, 128});
EXPECT_TRUE(indexing_map.Simplify());
EXPECT_THAT(indexing_map.ToString(printer_), MatchIndexingString(R"(
()[s0, s1] -> ((s0 * 128 + s1) floordiv 192)
domain:
s0 in [0, 1234)
s1 in [0, 128)
)"));
}
TEST_F(IndexingMapTest, AffineMapSimplification_DivSumDiv) {
auto serialized_map =
"()[s0, s1] -> ((s0 floordiv 3 + s1 floordiv 3) floordiv 6)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {}, {1234, 128});
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_NegativeDiv) {
auto serialized_map = "()[s0] -> ((s0 floordiv 2) floordiv -7)";
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap(serialized_map, &mlir_context_), {}, {1234});
EXPECT_FALSE(indexing_map.Simplify());
}
TEST_F(IndexingMapTest, AffineMapSimplification_ExtractFromMod) {
auto serialized_map =
"()[s0, s1, s2, s3] -> ((s0 * | 2,141 |
#ifndef XLA_SERVICE_GPU_MODEL_GPU_PERFORMANCE_MODEL_BASE_H_
#define XLA_SERVICE_GPU_MODEL_GPU_PERFORMANCE_MODEL_BASE_H_
#include <cstdint>
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/str_format.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
struct EstimateRunTimeData {
int64_t flops;
int64_t bytes_read;
int64_t bytes_written;
absl::Duration read_time;
absl::Duration write_time;
absl::Duration compute_time;
absl::Duration exec_time;
std::string ToString() const {
return absl::StrFormat(
"EstimateRunTimeData{\n"
" flops: %d\n"
" bytes_read: %d\n"
" bytes_written: %d\n"
" read_time: %s\n"
" write_time: %s\n"
" compute_time: %s\n"
" exec_time: %s\n"
"}",
flops, bytes_read, bytes_written, absl::FormatDuration(read_time),
absl::FormatDuration(write_time), absl::FormatDuration(compute_time),
absl::FormatDuration(exec_time));
}
};
class GpuPerformanceModelCache {
public:
std::optional<EstimateRunTimeData> Get(const HloInstruction& instruction);
std::optional<absl::Duration> Get(const HloInstruction& producer,
const HloInstruction& consumer);
void Set(const HloInstruction& instruction,
const EstimateRunTimeData& runtime_data);
void Set(const HloInstruction& producer, const HloInstruction& consumer,
absl::Duration runtime);
void Invalidate(const HloInstruction& instruction);
private:
absl::Mutex mutex_;
absl::flat_hash_map<const HloInstruction*, EstimateRunTimeData>
instruction_runtime_data_;
absl::flat_hash_map<
const HloInstruction*,
absl::flat_hash_map<const HloInstruction*, absl::Duration>>
fusion_runtime_data_;
};
struct GpuPerformanceModelOptions {
double memory_compute_parallelism = 1.0;
HloFusionAnalysisCache* fusion_analysis_cache = nullptr;
GpuPerformanceModelCache* gpu_performance_model_cache = nullptr;
static GpuPerformanceModelOptions Default() {
return GpuPerformanceModelOptions();
}
static GpuPerformanceModelOptions PriorityFusion(
HloFusionAnalysisCache* fusion_analysis_cache = nullptr,
GpuPerformanceModelCache* gpu_performance_model_cache = nullptr) {
GpuPerformanceModelOptions config;
config.fusion_analysis_cache = fusion_analysis_cache;
config.gpu_performance_model_cache = gpu_performance_model_cache;
config.memory_compute_parallelism = 0.95;
return config;
}
static GpuPerformanceModelOptions ForModule(const HloModule* module) {
return module->config().debug_options().xla_gpu_enable_priority_fusion()
? PriorityFusion()
: Default();
}
};
class GpuPerformanceModelBase {
public:
struct RunTimes {
absl::Duration time_unfused;
absl::Duration time_fused;
};
static constexpr absl::Duration kKernelLaunchOverhead = absl::Microseconds(1);
static constexpr absl::Duration kNcclKernelLaunchOverhead =
absl::Microseconds(5);
static constexpr float kL2CacheSpeedup = 2.5;
static constexpr float kL1CacheSpeedup = 8;
static LaunchDimensions EstimateFusionLaunchDimensions(
const HloFusionAnalysis& fusion_analysis);
static int64_t GetOperandBytesAccessed(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* instr,
const HloInstruction* operand);
static float GetOperandUtilization(const GpuHloCostAnalysis* cost_analysis,
const HloInstruction* instr,
const HloInstruction* operand);
static float GetCommonUtilization(const GpuHloCostAnalysis* cost_analysis,
const HloInstruction* producer,
int64_t producer_idx_of_operand,
const HloInstruction* consumer);
static int64_t GetSharedOperandBytesAccessed(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* producer,
const HloInstruction* consumer, const HloInstruction* operand);
static absl::Duration ReadTime(const se::DeviceDescription& gpu_device_info,
int64_t num_blocks, int64_t n_bytes_net,
int64_t n_bytes_total);
static absl::Duration ReadTimeWithDRAMHeuristic(
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
int64_t n_bytes_net, int64_t n_bytes_total, PrimitiveType element_type,
bool coalesced);
static absl::Duration ProducerInputAccessTime(
const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
const HloInstruction* producer, const HloFusionAnalysis& fusion_analysis,
const GpuPerformanceModelOptions& config,
const HloInstruction* fused_consumer = nullptr);
static absl::Duration WriteTime(const se::DeviceDescription& gpu_device_info,
int64_t bytes_written);
static absl::Duration ComputeTime(
const se::DeviceDescription& gpu_device_info, int64_t flops,
int64_t num_blocks, int64_t num_threads_per_block);
static absl::Duration CombineComputeAndMemoryAccessTime(
absl::Duration compute_time, absl::Duration memory_access_time,
const GpuPerformanceModelOptions& config);
static void VLogOperandRead(const HloInstruction* operand,
int64_t n_bytes_total, int64_t n_bytes_net,
bool coalesced);
};
}
}
#endif
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/synchronization/mutex.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/fusions/triton.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
namespace {
bool FusionUsesParameterElementwiseFromRoot(
const HloInstruction* fusion, int parameter_index,
const GpuHloCostAnalysis* cost_analysis) {
return cost_analysis->CommonElementwiseUtilization(
fusion->fused_parameter(parameter_index),
fusion->fused_expression_root()) == 1.f;
}
int GetCoalescingWasteFactor(PrimitiveType element_type,
const se::DeviceDescription& gpu_device_info) {
int64_t element_size_bytes =
element_type == PrimitiveType::TUPLE ||
element_type == PrimitiveType::TOKEN
? 4
: ShapeUtil::ByteSizeOfPrimitiveType(element_type);
return gpu_device_info.dram_to_l2_transaction_size_bytes() /
element_size_bytes;
}
float AdjustBandwidth(const se::DeviceDescription& gpu_device_info,
float bandwidth, int64_t num_blocks) {
float per_block_bandwidth = gpu_device_info.clock_rate_ghz() * 1.0e9f *
gpu_device_info.memory_transactions_per_clock();
float max_bandwidth = num_blocks * per_block_bandwidth;
return std::min(bandwidth, max_bandwidth);
}
}
std::optional<EstimateRunTimeData> GpuPerformanceModelCache::Get(
const HloInstruction& instruction) {
absl::MutexLock lock(&mutex_);
auto it = instruction_runtime_data_.find(&instruction);
if (it != instruction_runtime_data_.end()) {
return it->second;
}
return std::nullopt;
}
std::optional<absl::Duration> GpuPerformanceModelCache::Get(
const HloInstruction& producer, const HloInstruction& consumer) {
absl::MutexLock lock(&mutex_);
auto it = fusion_runtime_data_.find(&producer);
if (it != fusion_runtime_data_.end()) {
auto jt = it->second.find(&consumer);
if (jt != it->second.end()) {
return jt->second;
}
}
return std::nullopt;
}
void GpuPerformanceModelCache::Set(const HloInstruction& instruction,
const EstimateRunTimeData& runtime_data) {
absl::MutexLock lock(&mutex_);
instruction_runtime_data_[&instruction] = runtime_data;
}
void GpuPerformanceModelCache::Set(const HloInstruction& producer,
const HloInstruction& consumer,
absl::Duration runtime) {
absl::MutexLock lock(&mutex_);
fusion_runtime_data_[&producer][&consumer] = runtime;
}
void GpuPerformanceModelCache::Invalidate(const HloInstruction& instruction) {
absl::MutexLock lock(&mutex_);
instruction_runtime_data_.erase(&instruction);
fusion_runtime_data_.erase(&instruction);
for (auto* operand : instruction.operands()) {
if (operand->opcode() == HloOpcode::kGetTupleElement) {
operand = operand->mutable_operand(0);
}
auto it = fusion_runtime_data_.find(operand);
if (it != fusion_runtime_data_.end()) {
it->second.erase(&instruction);
}
}
}
LaunchDimensions GpuPerformanceModelBase::EstimateFusionLaunchDimensions(
const HloFusionAnalysis& fusion_analysis) {
auto emitter =
GetFusionEmitter(PreBufferAssignmentFusionInfo{fusion_analysis});
if (const auto* kernel_emitter =
dynamic_cast<const KernelFusionInterface*>(emitter.get())) {
return kernel_emitter->launch_dimensions();
}
if (const auto* triton_emitter =
dynamic_cast<const TritonFusion*>(emitter.get())) {
if (auto launch_config = triton_emitter->launch_config()) {
return launch_config->launch_dimensions;
}
}
VLOG(5) << "Using fallback launch dimensions estimate for "
<< fusion_analysis.fusion().ToString();
int64_t num_threads_per_block = 128;
int64_t estimated_num_threads =
ShapeUtil::ElementsInRecursive(fusion_analysis.fusion_root(0).shape());
int64_t num_blocks =
CeilOfRatio(estimated_num_threads, num_threads_per_block);
return LaunchDimensions(num_blocks, num_threads_per_block);
}
int64_t GpuPerformanceModelBase::GetOperandBytesAccessed(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* instr,
const HloInstruction* operand) {
if (!instr->IsUserOf(operand)) {
return 0;
}
return cost_analysis->operand_bytes_accessed(*instr,
instr->operand_index(operand));
}
float GpuPerformanceModelBase::GetOperandUtilization(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* instr,
const HloInstruction* operand) {
if (operand->IsMultiOutputFusion()) {
float res = 0.f;
for (int64_t i = 0; i < instr->operand_count(); ++i) {
if (instr->operand(i)->opcode() == HloOpcode::kGetTupleElement &&
instr->operand(i)->operand(0) == operand) {
res += cost_analysis->operand_utilization(*instr, i);
}
}
return res;
}
if (!instr->IsUserOf(operand)) {
return 0.f;
}
return cost_analysis->operand_utilization(*instr,
instr->operand_index(operand));
}
float GpuPerformanceModelBase::GetCommonUtilization(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* producer,
int64_t producer_idx_of_operand, const HloInstruction* consumer) {
const auto* operand = producer->operand(producer_idx_of_operand);
if (!consumer || !consumer->IsUserOf(operand)) {
return 0.f;
}
if (producer->IsElementwise() ||
(producer->opcode() == HloOpcode::kFusion &&
FusionUsesParameterElementwiseFromRoot(producer, producer_idx_of_operand,
cost_analysis))) {
if (consumer->opcode() == HloOpcode::kFusion) {
int64_t consumer_idx_of_common_operand = consumer->operand_index(operand);
float res = 0.f;
std::vector<int64_t> consumer_indices_of_producer;
if (producer->IsMultiOutputFusion()) {
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i)->opcode() == HloOpcode::kGetTupleElement &&
consumer->operand(i)->operand(0) == producer) {
consumer_indices_of_producer.push_back(i);
}
}
} else {
consumer_indices_of_producer.push_back(
consumer->operand_index(producer));
}
for (int64_t consumer_idx_of_producer : consumer_indices_of_producer) {
res += cost_analysis->CommonElementwiseUtilization(
consumer->fused_parameter(consumer_idx_of_common_operand),
consumer->fused_parameter(consumer_idx_of_producer));
}
return res;
} else if (consumer->IsElementwise()) {
return 1.f;
}
}
return 0.f;
}
int64_t GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
const GpuHloCostAnalysis* cost_analysis, const HloInstruction* producer,
const HloInstruction* consumer, const HloInstruction* operand) {
float producer_utilization_by_consumer =
GetOperandUtilization(cost_analysis, consumer, producer);
int64_t bytes_accessed_by_producer =
GetOperandBytesAccessed(cost_analysis, producer, operand);
int64_t bytes_accessed_by_consumer =
GetOperandBytesAccessed(cost_analysis, consumer, operand);
float common_utilization =
producer->IsUserOf(operand)
? GetCommonUtilization(cost_analysis, producer,
producer->operand_index(operand), consumer)
: 0.f;
int64_t operand_size = cost_analysis->GetShapeSize(operand->shape());
int64_t common_bytes_accessed =
std::llround(operand_size * common_utilization);
return std::llround(bytes_accessed_by_producer *
producer_utilization_by_consumer) +
bytes_accessed_by_consumer - common_bytes_accessed;
}
absl::Duration GpuPerformanceModelBase::ReadTime(
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
int64_t n_bytes_net, int64_t n_bytes_total) {
float bandwidth = gpu_device_info.memory_bandwidth();
if (n_bytes_net < gpu_device_info.l2_cache_size()) {
bandwidth *= kL2CacheSpeedup;
if (n_bytes_net <
gpu_device_info.l1_cache_size_per_SM() * gpu_device_info.core_count()) {
bandwidth *= kL1CacheSpeedup;
}
}
bandwidth = AdjustBandwidth(gpu_device_info, bandwidth, num_blocks);
return absl::Seconds(n_bytes_total / bandwidth);
}
absl::Duration GpuPerformanceModelBase::ReadTimeWithDRAMHeuristic(
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
int64_t n_bytes_net, int64_t n_bytes_total, PrimitiveType element_type,
bool coalesced) {
int waste_factor =
coalesced ? 1 : GetCoalescingWasteFactor(element_type, gpu_device_info);
float dram_bandwidth = gpu_device_info.memory_bandwidth() / waste_factor;
float rest_bandwidth = gpu_device_info.memory_bandwidth();
if (n_bytes_net < gpu_device_info.l2_cache_size()) {
rest_bandwidth *= kL2CacheSpeedup;
if (n_bytes_net <
gpu_device_info.l1_cache_size_per_SM() * gpu_device_info.core_count()) {
rest_bandwidth *= kL1CacheSpeedup;
}
} else {
rest_bandwidth /= waste_factor;
}
dram_bandwidth = AdjustBandwidth(gpu_device_info, dram_bandwidth, num_blocks);
rest_bandwidth = AdjustBandwidth(gpu_device_info, rest_bandwidth, num_blocks);
int64_t n_bytes_read_dram = std::min(n_bytes_net, n_bytes_total);
int64_t n_bytes_read_cache = n_bytes_total - n_bytes_read_dram;
return absl::Seconds(n_bytes_read_dram / dram_bandwidth) +
absl::Seconds(n_bytes_read_cache / rest_bandwidth);
}
absl::Duration GpuPerformanceModelBase::ProducerInputAccessTime(
const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info, int64_t num_blocks,
const HloInstruction* producer, const HloFusionAnalysis& fusion_analysis,
const GpuPerformanceModelOptions& config,
const HloInstruction* fused_consumer) {
absl::Duration ret = absl::ZeroDuration();
float producer_output_utilization =
fused_consumer
? GetOperandUtilization(cost_analysis, fused_consumer, producer)
: 1.f;
for (int i = 0; i < producer->operand_count(); ++i) {
int64_t operand_bytes_accessed =
cost_analysis->operand_bytes_accessed(*producer, i);
float operand_utilization =
cost_analysis->operand_utilization(*producer, i);
int64_t n_bytes_net = std::llround(operand_bytes_accessed /
std::max(operand_utilization, 1.0f));
float common_utilization = GetCommonUtilization(
cost_analysis, producer, i, fused_consumer);
CHECK_LE(common_utilization, producer_output_utilization);
float n_bytes_total = operand_bytes_accessed *
(producer_output_utilization - common_utilization);
ret += ReadTime(gpu_device_info, num_blocks, n_bytes_net, n_bytes_total);
}
return ret;
}
absl::Duration GpuPerformanceModelBase::WriteTime(
const se::DeviceDescription& gpu_device_info, int64_t bytes_written) {
return absl::Seconds(1.0f * bytes_written /
gpu_device_info.memory_bandwidth());
}
absl::Duration GpuPerformanceModelBase::ComputeTime(
const se::DeviceDescription& gpu_device_info, int64_t flops,
int64_t num_blocks, int64_t num_threads_per_block) {
int64_t n_active_fpus_per_core =
std::min<int64_t>(num_threads_per_block, gpu_device_info.fpus_per_core());
int64_t n_active_core =
std::min<int64_t>(num_blocks, gpu_device_info.core_count());
int64_t fpu_count = n_active_core * n_active_fpus_per_core;
int64_t flop_per_ns_per_fpu = gpu_device_info.clock_rate_ghz() * 2;
int64_t flop_per_ns_effective = flop_per_ns_per_fpu * fpu_count;
return absl::Nanoseconds(1.0f * flops / flop_per_ns_effective);
}
absl::Duration GpuPerformanceModelBase::CombineComputeAndMemoryAccessTime(
absl::Duration compute_time, absl::Duration memory_access_time,
const GpuPerformanceModelOptions& config) {
return compute_time + memory_access_time -
std::min(compute_time, memory_access_time) *
config.memory_compute_parallelism;
}
void GpuPerformanceModelBase::VLogOperandRead(const HloInstruction* operand,
int64_t n_bytes_total,
int64_t n_bytes_net,
bool coalesced) {
VLOG(8) << "operand " << operand->name()
<< ", n_bytes_total: " << n_bytes_total
<< ", n_bytes_net: " << n_bytes_net << ", coalesced: " << coalesced;
}
}
} | #include "xla/service/gpu/model/gpu_performance_model_base.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class GpuPerformanceModelBaseTest : public HloTestBase {
public:
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
GpuHloCostAnalysis::Options options_{ShapeSizeBytesFunction(),
{},
true};
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
GpuHloCostAnalysis analysis_{options_, &device_info_};
GpuPerformanceModelBaseTest() : HloTestBase() {}
};
TEST_F(GpuPerformanceModelBaseTest, SharedOperandBytesAccessed_InPlaceDUS) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[8,16] parameter(0)
param_1 = f32[4,4] parameter(1)
c_0 = s32[] constant(0)
log = f32[4,4] log(param_1)
ROOT dynamic-update-slice = f32[8,16] dynamic-update-slice(param_0, log, c_0, c_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto dus_consumer = computation->root_instruction();
auto log_producer = dus_consumer->mutable_operand(1);
auto get_shared_operand_bytes_accessed = [&](const HloInstruction* operand) {
return GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
&analysis_, log_producer, dus_consumer, operand);
};
EXPECT_EQ(get_shared_operand_bytes_accessed(dus_consumer->operand(0)), 0);
EXPECT_EQ(get_shared_operand_bytes_accessed(log_producer->operand(0)), 64);
}
TEST_F(GpuPerformanceModelBaseTest, SharedOperandBytesAccessed_DUS) {
absl::string_view hlo_string = R"(
HloModule m
ENTRY entry_computation {
param_0 = f32[8,16] parameter(0)
param_1 = f32[4,4] parameter(1)
c_0 = s32[] constant(0)
log = f32[8,16] log(param_0)
ROOT dynamic-update-slice = f32[8,16] dynamic-update-slice(log, param_1, c_0, c_0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto dus_consumer = computation->root_instruction();
auto log_producer = dus_consumer->mutable_operand(0);
auto get_shared_operand_bytes_accessed = [&](const HloInstruction* operand) {
return GpuPerformanceModelBase::GetSharedOperandBytesAccessed(
&analysis_, log_producer, dus_consumer, operand);
};
EXPECT_EQ(get_shared_operand_bytes_accessed(dus_consumer->operand(1)), 64);
EXPECT_EQ(get_shared_operand_bytes_accessed(log_producer->operand(0)), 448);
}
TEST_F(GpuPerformanceModelBaseTest,
ReduceBroadcastedDim_IncorrectBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
f1 {
p0 = f32[128] parameter(0)
c0 = f32[] constant(0)
broadcast = f32[128,256] broadcast(p0), dimensions={0}
ROOT reduce = f32[128] reduce(broadcast, c0), dimensions={1}, to_apply=add
}
ENTRY entry_computation {
param_0 = f32[128] parameter(0)
param_1 = f32[4,4] parameter(1)
ROOT fusion = f32[128] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto root = computation->root_instruction();
EXPECT_EQ(GpuPerformanceModelBase::GetOperandBytesAccessed(&analysis_, root,
root->operand(0)),
131072);
}
TEST_F(GpuPerformanceModelBaseTest, ElementwiseBitcast_IncorrectBytesAccessed) {
absl::string_view hlo_string = R"(
HloModule m
f1 {
p0 = f32[128] parameter(0)
bitcast.1 = f32[8,16] bitcast(p0)
log = f32[128] log(p0)
bitcast.2 = f32[8,16] bitcast(log)
ROOT add = f32[8,16] add(bitcast.1, bitcast.2)
}
ENTRY entry_computation {
param_0 = f32[128] parameter(0)
ROOT fusion = f32[8,16] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto computation = module->entry_computation();
ASSERT_IS_OK(computation->Accept(&analysis_));
auto root = computation->root_instruction();
EXPECT_EQ(GpuPerformanceModelBase::GetOperandBytesAccessed(&analysis_, root,
root->operand(0)),
1024);
}
TEST_F(GpuPerformanceModelBaseTest, EstimateFusionLaunchDimensions_LoopFusion) {
absl::string_view hlo_string = R"(
HloModule m
f1 {
p0 = f32[8,16,128] parameter(0)
log = f32[8,16,128] log(p0)
ROOT add = f32[8,16,128] add(p0, log)
}
ENTRY entry_computation {
param_0 = f32[8,16,128] parameter(0)
ROOT fusion = f32[8,16,128] fusion(param_0), kind=kLoop, calls=f1
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = AnalyzeFusion(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 16);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 1024);
}
TEST_F(GpuPerformanceModelBaseTest,
EstimateFusionLaunchDimensions_TritonSoftMaxFusion) {
absl::string_view hlo_string = R"(
max {
p1 = f32[] parameter(1)
p0 = f32[] parameter(0)
ROOT m = f32[] maximum(p0, p1)
}
triton_softmax_computation {
p0 = f32[16,970] parameter(0)
constant = f32[] constant(-inf)
reduce = f32[16] reduce(p0, constant), dimensions={1}, to_apply=max
broadcast = f32[16,970] broadcast(reduce), dimensions={0}
ROOT subtract = f32[16,970] subtract(p0, broadcast)
}
ENTRY e {
p0 = f32[16,970]{1,0} parameter(0)
ROOT r = f32[16,970]{1,0} fusion(p0), kind=kCustom,
calls=triton_softmax_computation,
backend_config={"fusion_backend_config": {kind: "__triton","block_level_fusion_config":{"output_tile_sizes":["1","970"],"num_warps":"2"}}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = AnalyzeFusion(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 16);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 64);
}
TEST_F(GpuPerformanceModelBaseTest,
EstimateFusionLaunchDimensions_CudnnFusion) {
absl::string_view hlo_string = R"(
fusion1 {
p0 = f32[32,96] parameter(0)
p1 = f32[96,256] parameter(1)
ROOT r = f32[32,256] dot(p0, p1),
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY e {
p0 = f32[32,96] parameter(0)
p1 = f32[96,256] parameter(1)
ROOT _ = f32[32,256] fusion(p0, p1), kind=kCustom, calls=fusion1,
backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
auto fusion_analysis = AnalyzeFusion(
*module->entry_computation()->root_instruction(), device_info_);
auto launch_dimensions =
GpuPerformanceModelBase::EstimateFusionLaunchDimensions(fusion_analysis);
EXPECT_EQ(launch_dimensions.num_blocks(), 64);
EXPECT_EQ(launch_dimensions.num_threads_per_block(), 128);
}
}
}
} | 2,142 |
#ifndef XLA_SERVICE_GPU_MODEL_GPU_COLLECTIVE_PERFORMANCE_MODEL_H_
#define XLA_SERVICE_GPU_MODEL_GPU_COLLECTIVE_PERFORMANCE_MODEL_H_
#include <array>
#include <cstdint>
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/stream_executor/device_description.h"
#if GOOGLE_CUDA
#include <dlfcn.h>
#include "third_party/gpus/cuda/nvml/include/nvml.h"
#define NVML_FUNCTOR(name, rettype, args) \
inline rettype(*xla_##name) args = nullptr;
NVML_FUNCTOR(nvmlInit, nvmlReturn_t, ())
NVML_FUNCTOR(nvmlShutdown, nvmlReturn_t, ())
NVML_FUNCTOR(nvmlDeviceGetHandleByIndex, nvmlReturn_t,
(unsigned int index, nvmlDevice_t* device))
NVML_FUNCTOR(nvmlDeviceGetNvLinkCapability, nvmlReturn_t,
(nvmlDevice_t device, unsigned int link,
nvmlNvLinkCapability_t capability, unsigned int* capResult))
#endif
namespace xla {
namespace gpu {
class GpuPerformanceWithCollectiveModel : public GpuPerformanceModelBase {
public:
enum CollectiveAlgo {
RING = 0,
TREE,
};
static constexpr std::array<double, 3> kLowLatencyMaxBandwidths = {
39.0 , 87.7 , 87.7
};
static constexpr std::array<double, 3> kPerChannelMaxRingLL128Bandwidths = {
20.0 ,
20.0 ,
36.7 ,
};
static constexpr double kSm60NvlinkBandwidth = 18.0;
static constexpr double kSm70NvlinkBandwidth = 20.0;
static constexpr double kSm80NvlinkBandwidth = 20.0;
static constexpr double kSm90NvlinkBandwidth = 20.0;
static constexpr double kPciBandwidth = 12.0;
static constexpr double kRingAlgorithmDiscountFactor = 0.92;
static constexpr std::array<double, 13> kIntraNodeSpeeds = {
40.0, 30.0, 20.0, 18.0, 15.0, 12.0, 10.0, 9.0, 7.0, 6.0, 5.0, 4.0, 3.0};
static constexpr std::array<double, 9> kIntraNodeSpeedsSm90 = {
60.0, 40.0, 30.0, 24.0, 20.0, 15.0, 12.0, 6.0, 3.0};
static constexpr int64_t kMaxNumChannelsRing = 16;
static constexpr int64_t kLL128NumThreads = 640;
static constexpr absl::Duration kNcclKernelLaunchOverhead =
absl::Microseconds(5);
static absl::Duration ComputeCollectiveTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info);
static float GetNvlinkBw(se::CudaComputeCapability compute_capability);
static bool InitNvml();
static bool ShutdownNvml();
static uint32_t CheckIfNvlinkSupportsP2P();
private:
static absl::Duration ComputeAllreduceTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info);
};
}
}
#endif
#include "xla/service/gpu/model/gpu_collective_performance_model.h"
#include <algorithm>
#include <cstdint>
#include <cstdlib>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/numbers.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_dataflow_analysis.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#if GOOGLE_CUDA
#include "third_party/gpus/cuda/nvml/include/nvml.h"
#endif
namespace xla {
namespace gpu {
namespace {
int64_t GetNcclMaxNumChannels(
GpuPerformanceWithCollectiveModel::CollectiveAlgo algorithm) {
int64_t max_nchannels = 0;
switch (algorithm) {
case GpuPerformanceWithCollectiveModel::RING:
case GpuPerformanceWithCollectiveModel::TREE:
max_nchannels = GpuPerformanceWithCollectiveModel::kMaxNumChannelsRing;
break;
}
const char* env = std::getenv("NCCL_MAX_NCHANNELS");
if (env != nullptr) {
int64_t max_nchannels_from_env;
if (absl::SimpleAtoi(env, &max_nchannels_from_env)) {
max_nchannels = std::min(max_nchannels_from_env, max_nchannels);
}
}
return max_nchannels;
}
int64_t GetMinNumberOfChannels(
GpuPerformanceWithCollectiveModel::CollectiveAlgo algorithm) {
int64_t min_nchannels = 0;
switch (algorithm) {
case GpuPerformanceWithCollectiveModel::RING:
case GpuPerformanceWithCollectiveModel::TREE:
min_nchannels = 1;
break;
}
const char* env = std::getenv("NCCL_MIN_NCHANNELS");
if (env != nullptr) {
int64_t min_nchannels_from_env;
if (absl::SimpleAtoi(env, &min_nchannels_from_env)) {
min_nchannels = std::min(min_nchannels_from_env, min_nchannels);
}
}
return min_nchannels;
}
int GetNumThreads(int warp_size, int min_num_threads, int max_num_threads,
int default_num_threads) {
int threads_from_env = default_num_threads;
const char* env = std::getenv("NCCL_NTHREADS");
if (env != nullptr) {
CHECK(absl::SimpleAtoi(env, &threads_from_env));
}
int num_threads = threads_from_env;
if (num_threads > 0) {
if (num_threads % warp_size != 0) {
num_threads = max_num_threads;
} else if (num_threads > max_num_threads) {
num_threads = max_num_threads;
} else if (num_threads < min_num_threads) {
num_threads = min_num_threads;
}
} else {
num_threads = default_num_threads;
}
return num_threads;
}
float GetMaxSysBwFromGpu(const se::CudaComputeCapability cc,
const double* bandwidths_table) {
switch (cc.major) {
case se::CudaComputeCapability::VOLTA:
return bandwidths_table[0];
case se::CudaComputeCapability::AMPERE:
return bandwidths_table[1];
case se::CudaComputeCapability::HOPPER:
return bandwidths_table[2];
}
return -1;
}
}
float GpuPerformanceWithCollectiveModel::GetNvlinkBw(
se::CudaComputeCapability compute_capability) {
return compute_capability.IsAtLeast(se::CudaComputeCapability::HOPPER)
? kSm90NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::AMPERE)
? kSm80NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::VOLTA)
? kSm70NvlinkBandwidth
: compute_capability.IsAtLeast(se::CudaComputeCapability::PASCAL_)
? kSm60NvlinkBandwidth
: kSm80NvlinkBandwidth;
}
bool GpuPerformanceWithCollectiveModel::InitNvml() {
#if GOOGLE_CUDA
void* libhandle = dlopen("libnvidia-ml.so.1", RTLD_NOW);
CHECK(libhandle != nullptr) << "Failed to open libnvidia-ml.so.1";
struct SymbolEntry {
void** functor;
char const* name;
};
std::vector<SymbolEntry> symbols = {
{(void**)&xla_nvmlInit, "nvmlInit_v2"},
{(void**)&xla_nvmlShutdown, "nvmlShutdown"},
{(void**)&xla_nvmlDeviceGetHandleByIndex, "nvmlDeviceGetHandleByIndex"},
{(void**)&xla_nvmlDeviceGetNvLinkCapability,
"nvmlDeviceGetNvLinkCapability"},
};
for (SymbolEntry se : symbols) {
*se.functor = dlsym(libhandle, se.name);
}
nvmlReturn_t init_result = xla_nvmlInit();
return init_result == NVML_SUCCESS;
#else
return false;
#endif
}
bool GpuPerformanceWithCollectiveModel::ShutdownNvml() {
#if GOOGLE_CUDA
nvmlReturn_t shutdown_result = xla_nvmlShutdown();
return shutdown_result == NVML_SUCCESS;
#else
return false;
#endif
}
uint32_t
GpuPerformanceWithCollectiveModel::CheckIfNvlinkSupportsP2P() {
#if GOOGLE_CUDA
CHECK(InitNvml()) << "NVML init failed.";
nvmlDevice_t nvml_device;
nvmlReturn_t get_device_result =
xla_nvmlDeviceGetHandleByIndex(0, &nvml_device);
CHECK(get_device_result == NVML_SUCCESS);
uint32_t supported_p2p = 0;
nvmlReturn_t nvlink_cap_result = xla_nvmlDeviceGetNvLinkCapability(
nvml_device, 0, NVML_NVLINK_CAP_P2P_SUPPORTED,
&supported_p2p);
CHECK(nvlink_cap_result == NVML_SUCCESS);
CHECK(ShutdownNvml()) << "NVML shutdown failed.";
return supported_p2p;
#else
return 0;
#endif
}
absl::Duration
GpuPerformanceWithCollectiveModel::ComputeAllreduceTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info) {
absl::Duration total_time = kNcclKernelLaunchOverhead;
stream_executor::CudaComputeCapability compute_cap =
gpu_device_info.cuda_compute_capability();
int64_t size_of_speed_array = kIntraNodeSpeeds.size();
int64_t size_of_sm90_speed_array = kIntraNodeSpeedsSm90.size();
int num_speeds = compute_cap.major >= se::CudaComputeCapability::HOPPER
? size_of_sm90_speed_array
: size_of_speed_array;
const double* speeds = compute_cap.major >= se::CudaComputeCapability::HOPPER
? kIntraNodeSpeedsSm90.data()
: kIntraNodeSpeeds.data();
int speed_index = 0;
float max_sys_bw =
GetMaxSysBwFromGpu(compute_cap, kLowLatencyMaxBandwidths.data());
CHECK_GT(max_sys_bw, 0);
while ((speed_index < num_speeds - 1) && speeds[speed_index] > max_sys_bw) {
speed_index++;
}
float bw_intra_node = speeds[speed_index];
int64_t num_devices = cost_analysis->NumOfDevices(instr);
int64_t min_nchannels =
std::max(num_devices, GetMinNumberOfChannels(CollectiveAlgo::RING));
int64_t num_channels =
std::max(min_nchannels, GetNcclMaxNumChannels(CollectiveAlgo::RING));
int default_threads =
(bw_intra_node * num_channels <= kPciBandwidth) ? 256 : kLL128NumThreads;
int warp_size = gpu_device_info.threads_per_warp();
int num_threads = GetNumThreads(warp_size, kLL128NumThreads / 4,
kLL128NumThreads, default_threads);
absl::Duration compute_time_per_channel = ComputeTime(
gpu_device_info, cost_analysis->flop_count(instr) / num_channels,
num_channels, num_threads);
total_time += compute_time_per_channel;
uint32_t supported_p2p = CheckIfNvlinkSupportsP2P();
if (supported_p2p == 0) {
VLOG(8) << "Nvlink doesn't support p2p communication. Model will "
"continue using default system bandwidth.";
} else {
VLOG(8) << "Nvlink supports p2p communication, setting intra node "
"bandwidth to nvlink bw.";
bw_intra_node = GetNvlinkBw(compute_cap);
}
double bus_bandwidth = bw_intra_node * num_channels;
double per_channel_ring_ll128_Bw =
GetMaxSysBwFromGpu(compute_cap, kPerChannelMaxRingLL128Bandwidths.data());
bus_bandwidth = std::min(bus_bandwidth * kRingAlgorithmDiscountFactor,
num_channels * per_channel_ring_ll128_Bw);
double actual_bandwidth = bus_bandwidth * cost_analysis->ScalingRatio(instr);
absl::Duration communication_time = absl::Milliseconds(
cost_analysis->bytes_accessed(instr) / (1e6 * actual_bandwidth));
total_time += communication_time;
return total_time;
}
absl::Duration
GpuPerformanceWithCollectiveModel::ComputeCollectiveTime(
const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis,
const se::DeviceDescription& gpu_device_info) {
if (cost_analysis->NumOfDevices(instr) == 1) {
VLOG(8) << "Returning only kernel launch overhead for a single partition.";
return kNcclKernelLaunchOverhead;
}
if (HloDataflowAnalysis::IsAsynchronousOperationDone(instr.opcode())) {
VLOG(8) << "Returning 0 cost for async done op " << instr.name();
return absl::ZeroDuration();
}
switch (instr.opcode()) {
case HloOpcode::kAllReduce:
case HloOpcode::kAllReduceStart:
return ComputeAllreduceTime(instr, cost_analysis, gpu_device_info);
default: {
LOG(WARNING)
<< "Runtime estimate for " << instr.name()
<< " not implemented. Returning only the kernel launch time.";
return kNcclKernelLaunchOverhead;
}
}
}
}
} | #include <gtest/gtest.h>
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using GpuPerformanceWithCollectiveModelTest = HloTestBase;
TEST_F(GpuPerformanceWithCollectiveModelTest, TestNvmlLibraryLoading) {
#if GOOGLE_CUDA
EXPECT_TRUE(GpuPerformanceWithCollectiveModel::InitNvml());
nvmlDevice_t nvml_device;
nvmlReturn_t get_device_result =
xla_nvmlDeviceGetHandleByIndex(0, &nvml_device);
EXPECT_TRUE(get_device_result == NVML_SUCCESS);
EXPECT_TRUE(GpuPerformanceWithCollectiveModel::InitNvml());
#endif
}
}
}
} | 2,143 |
#ifndef XLA_SERVICE_GPU_MODEL_FUSION_ANALYSIS_CACHE_H_
#define XLA_SERVICE_GPU_MODEL_FUSION_ANALYSIS_CACHE_H_
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/node_hash_map.h"
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/stream_executor/device_description.h"
namespace xla::gpu {
class HloFusionAnalysisCache {
public:
explicit HloFusionAnalysisCache(
const stream_executor::DeviceDescription& device_info)
: device_info_(device_info) {}
const HloFusionAnalysis& Get(const HloInstruction& instruction);
const HloFusionAnalysis& Get(const HloInstruction& producer,
const HloInstruction& consumer);
void Invalidate(const HloInstruction& instruction);
void Clear();
private:
const stream_executor::DeviceDescription& device_info_;
absl::Mutex mutex_;
absl::node_hash_map<int, HloFusionAnalysis> analyses_;
absl::node_hash_map<std::pair<int, int>, HloFusionAnalysis>
producer_consumer_analyses_;
absl::flat_hash_map<int, std::vector<int>> consumers_for_producers_;
absl::flat_hash_map<int, std::vector<int>> producers_for_consumers_;
};
}
#endif
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include <utility>
#include "absl/synchronization/mutex.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
namespace xla::gpu {
const HloFusionAnalysis& HloFusionAnalysisCache::Get(
const HloInstruction& instruction) {
{
absl::MutexLock lock(&mutex_);
auto it = analyses_.find(instruction.unique_id());
if (it != analyses_.end()) {
return it->second;
}
}
HloFusionAnalysis analysis = AnalyzeFusion(instruction, device_info_);
absl::MutexLock lock(&mutex_);
auto it = analyses_.find(instruction.unique_id());
if (it != analyses_.end()) {
return it->second;
}
return analyses_.emplace(instruction.unique_id(), std::move(analysis))
.first->second;
}
const HloFusionAnalysis& HloFusionAnalysisCache::Get(
const HloInstruction& producer, const HloInstruction& consumer) {
std::pair<int, int> key{producer.unique_id(), consumer.unique_id()};
{
absl::MutexLock lock(&mutex_);
auto it = producer_consumer_analyses_.find(key);
if (it != producer_consumer_analyses_.end()) {
return it->second;
}
}
HloFusionAnalysis analysis =
AnalyzeProducerConsumerFusion(producer, consumer, device_info_);
absl::MutexLock lock(&mutex_);
auto it = producer_consumer_analyses_.find(key);
if (it != producer_consumer_analyses_.end()) {
return it->second;
}
producers_for_consumers_[consumer.unique_id()].push_back(
producer.unique_id());
consumers_for_producers_[producer.unique_id()].push_back(
consumer.unique_id());
return producer_consumer_analyses_.emplace(key, std::move(analysis))
.first->second;
}
void HloFusionAnalysisCache::Invalidate(const HloInstruction& instruction) {
absl::MutexLock lock(&mutex_);
analyses_.erase(instruction.unique_id());
if (auto consumers =
consumers_for_producers_.extract(instruction.unique_id())) {
for (const auto consumer : consumers.mapped()) {
producer_consumer_analyses_.erase({instruction.unique_id(), consumer});
}
}
if (auto producers =
producers_for_consumers_.extract(instruction.unique_id())) {
for (const auto producer : producers.mapped()) {
producer_consumer_analyses_.erase({producer, instruction.unique_id()});
}
}
}
void HloFusionAnalysisCache::Clear() {
absl::MutexLock lock(&mutex_);
analyses_.clear();
producer_consumer_analyses_.clear();
consumers_for_producers_.clear();
producers_for_consumers_.clear();
}
} | #include "xla/service/gpu/model/fusion_analysis_cache.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/hlo_parser.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla::gpu {
namespace {
class FusionAnalysisCacheTest : public HloTestBase {
public:
stream_executor::DeviceDescription device_{
TestGpuDeviceInfo::RTXA6000DeviceInfo()};
HloFusionAnalysisCache cache_{device_};
};
TEST_F(FusionAnalysisCacheTest, CachesAndInvalidates) {
absl::string_view hlo_string = R"(
HloModule m
f {
c0 = f32[] constant(0)
b0 = f32[1000] broadcast(c0)
ROOT n0 = f32[1000] negate(b0)
}
ENTRY e {
ROOT r.1 = f32[1000] fusion(), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto* computation = module->GetComputationWithName("f");
auto* broadcast = computation->GetInstructionWithName("b0");
auto* negate = computation->GetInstructionWithName("n0");
auto* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), negate);
computation->set_root_instruction(broadcast);
EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), negate)
<< "Analysis should be cached.";
cache_.Invalidate(*fusion);
EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), broadcast)
<< "Analysis should have been recomputed";
}
TEST_F(FusionAnalysisCacheTest, CachesAndInvalidatesProducerConsumerFusions) {
absl::string_view hlo_string = R"(
HloModule m
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
f {
c0 = f32[] constant(0)
b0 = f32[1000] broadcast(c0)
ROOT r0 = f32[] reduce(b0, c0), dimensions={0}, to_apply=add
}
ENTRY e {
f0 = f32[] fusion(), kind=kInput, calls=f
ROOT n0 = f32[] negate(f0)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(hlo_string));
auto* fusion = module->entry_computation()->GetInstructionWithName("f0");
auto* neg = module->entry_computation()->GetInstructionWithName("n0");
auto* computation = module->GetComputationWithName("f");
auto* constant = computation->GetInstructionWithName("c0");
EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction);
computation->set_root_instruction(constant);
EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kReduction)
<< "Analysis should be cached.";
cache_.Invalidate(*fusion);
EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(),
HloFusionAnalysis::EmitterFusionKind::kLoop)
<< "Analysis should have been recomputed";
}
}
} | 2,144 |
#ifndef XLA_SERVICE_GPU_MODEL_SYMBOLIC_TILED_HLO_INSTRUCTION_H_
#define XLA_SERVICE_GPU_MODEL_SYMBOLIC_TILED_HLO_INSTRUCTION_H_
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile.h"
namespace xla {
namespace gpu {
class SymbolicTiledHloInstruction {
public:
SymbolicTiledHloInstruction(const HloInstruction* hlo,
IndexingMap indexing_map,
SymbolicTile symbolic_tile)
: hlo_(hlo),
indexing_map_(std::move(indexing_map)),
symbolic_tile_(std::move(symbolic_tile)) {}
std::vector<int64_t> TileOffsets(
absl::Span<int64_t const> tile_parameters) const;
std::vector<int64_t> TileSizes(
absl::Span<int64_t const> tile_parameters) const;
std::vector<int64_t> TileStrides(
absl::Span<int64_t const> tile_parameters) const;
const HloInstruction* hlo() const { return hlo_; }
const IndexingMap& indexing_map() const { return indexing_map_; }
const SymbolicTile& symbolic_tile() const { return symbolic_tile_; }
const SymbolicTiledHloInstruction* operand(int64_t operand_id) const {
return operands_[operand_id];
}
SymbolicTiledHloInstruction* operand(int64_t operand_id) {
return operands_[operand_id];
}
const std::vector<SymbolicTiledHloInstruction*>& operands() const {
return operands_;
}
void AppendOperand(SymbolicTiledHloInstruction* operand) {
operands_.push_back(operand);
}
std::string ToString() const;
private:
const HloInstruction* hlo_;
IndexingMap indexing_map_;
SymbolicTile symbolic_tile_;
std::vector<SymbolicTiledHloInstruction*> operands_;
};
}
}
#endif
#include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include <cstdint>
#include <sstream>
#include <string>
#include <vector>
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "xla/service/gpu/model/symbolic_tile.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineExpr;
using ::mlir::AffineMap;
using ::mlir::SmallVector;
std::vector<int64_t> EvaluateTileMap(AffineMap affine_map,
absl::Span<int64_t const> parameters) {
CHECK_EQ(affine_map.getNumSymbols(), parameters.size());
CHECK_EQ(affine_map.getNumDims(), 0);
SmallVector<AffineExpr> symbol_replacements = llvm::to_vector(
llvm::map_range(parameters, [affine_map](const int64_t v) -> AffineExpr {
return mlir::getAffineConstantExpr(v, affine_map.getContext());
}));
AffineMap simplified_affine_map =
mlir::simplifyAffineMap(affine_map.replaceDimsAndSymbols(
{}, symbol_replacements, 0,
0));
SmallVector<int64_t> results = llvm::to_vector(llvm::map_range(
simplified_affine_map.getResults(), [](AffineExpr result) -> int64_t {
return llvm::cast<mlir::AffineConstantExpr>(result).getValue();
}));
return std::vector<int64_t>(results.begin(), results.end());
}
}
std::vector<int64_t> SymbolicTiledHloInstruction::TileOffsets(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateTileMap(symbolic_tile_.offset_map(), tile_parameters);
}
std::vector<int64_t> SymbolicTiledHloInstruction::TileSizes(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateTileMap(symbolic_tile_.size_map(), tile_parameters);
}
std::vector<int64_t> SymbolicTiledHloInstruction::TileStrides(
absl::Span<int64_t const> tile_parameters) const {
return EvaluateTileMap(symbolic_tile_.stride_map(), tile_parameters);
}
std::string SymbolicTiledHloInstruction::ToString() const {
std::stringstream ss;
ss << "\thlo: " << hlo_->ToString() << "\n";
ss << "\t" << symbolic_tile_.ToString() << "\n";
ss << "\tindexing map: " << indexing_map_.ToString() << "\n";
return ss.str();
}
}
} | #include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include <cstdint>
#include <optional>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using SymbolicTiledHloInstructionTest = HloTestBase;
TEST_F(SymbolicTiledHloInstructionTest, TransposeTileSizesAreSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
fused_computation {
p0 = f32[16,32] parameter(0)
p1 = f32[32,16] parameter(1)
transpose = f32[32,16] transpose(p0), dimensions={1,0}
ROOT subtract = f32[32,16] subtract(transpose, p1)
}
ENTRY main {
p0 = f32[16,32] parameter(0)
p1 = f32[32,16] parameter(1)
ROOT root = f32[32,16] fusion(p0, p1), kind=kLoop, calls=fused_computation
}
)"));
mlir::MLIRContext mlir_ctx;
auto fusion = module->entry_computation()->root_instruction();
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(fusion);
auto output_to_input_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_ctx);
HloInstruction* subtract = fusion->fused_expression_root();
HloInstruction* p0 = subtract->mutable_operand(0)->mutable_operand(0);
HloInstruction* p1 = subtract->mutable_operand(1);
IndexingMap p0_indexing =
*output_to_input_indexing[fusion->operand(0)].begin();
std::optional<SymbolicTile> p0_symbolic_tile =
SymbolicTile::FromIndexingMap(p0_indexing);
ASSERT_TRUE(p0_symbolic_tile.has_value());
SymbolicTiledHloInstruction tiled_p0(p0, p0_indexing, *p0_symbolic_tile);
ASSERT_TRUE(p0_symbolic_tile.has_value());
IndexingMap p1_indexing =
*output_to_input_indexing[fusion->operand(1)].begin();
std::optional<SymbolicTile> p1_symbolic_tile =
SymbolicTile::FromIndexingMap(p1_indexing);
ASSERT_TRUE(p1_symbolic_tile.has_value());
SymbolicTiledHloInstruction tiled_p1(p1, p1_indexing, *p1_symbolic_tile);
std::vector<int64_t> output_tile_sizes = {8, 4};
auto p0_tile_sizes = tiled_p0.TileSizes(output_tile_sizes);
EXPECT_THAT(tiled_p0.TileSizes(output_tile_sizes), ElementsAre(4, 8));
EXPECT_THAT(tiled_p1.TileSizes(output_tile_sizes), ElementsAre(8, 4));
}
}
}
} | 2,145 |
#ifndef XLA_SERVICE_GPU_MODEL_HLO_OP_PROFILES_H_
#define XLA_SERVICE_GPU_MODEL_HLO_OP_PROFILES_H_
#include <cstdint>
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/hlo.pb.h"
#include "xla/stream_executor/device_description.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
class HloOpProfiles {
public:
using HloOpProfile =
absl::flat_hash_map<std::pair<HloOpcode, PrimitiveType>, int64_t>;
using ProfilesNestedMap =
absl::flat_hash_map<std::string,
HloOpProfile>;
static const HloOpProfiles& Singleton();
static std::string GetProfileName(const se::DeviceDescription* device_info);
static std::unique_ptr<HloOpProfiles> Load(
std::string_view profiles_text_proto,
std::string_view default_profile_name);
const HloOpProfile& GetProfile(
const se::DeviceDescription* device_info) const;
private:
HloOpProfiles(ProfilesNestedMap profiles,
std::string_view default_profile_name)
: profiles_(std::move(profiles)),
default_profile_(profiles_.at(default_profile_name)) {}
ProfilesNestedMap profiles_;
const HloOpProfile& default_profile_;
};
}
}
#endif
#include "xla/service/gpu/model/hlo_op_profiles.h"
#include <memory>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include "absl/memory/memory.h"
#include "absl/strings/str_cat.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/hlo_op_profiles_data.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace gpu {
const HloOpProfiles& HloOpProfiles::Singleton() {
static const auto* hlo_op_profiles =
HloOpProfiles::Load(kDeviceHloOpProfiles,
"sm_86")
.release();
return *hlo_op_profiles;
}
std::string HloOpProfiles::GetProfileName(
const se::DeviceDescription* device_info) {
if (device_info != nullptr) {
if (auto* ptr = std::get_if<stream_executor::CudaComputeCapability>(
&device_info->gpu_compute_capability()))
return absl::StrCat("sm_", ptr->major, ptr->minor);
}
return "<unknown>";
}
std::unique_ptr<HloOpProfiles> HloOpProfiles::Load(
std::string_view profiles_text_proto,
std::string_view default_profile_name) {
ProfilesNestedMap profiles_map;
DeviceHloInstructionProfiles all_device_profiles;
CHECK(tsl::protobuf::TextFormat::ParseFromString(
std::string(profiles_text_proto), &all_device_profiles));
for (const auto& device_profile : all_device_profiles.entries()) {
for (const auto& entry : device_profile.second.entries()) {
auto op_code = StringToHloOpcode(entry.instruction().opcode()).value();
auto element_type = entry.instruction().shape().element_type();
profiles_map[device_profile.first][std::make_pair(
op_code, element_type)] = entry.clock_cycles();
}
}
return absl::WrapUnique(
new HloOpProfiles(std::move(profiles_map), default_profile_name));
}
const HloOpProfiles::HloOpProfile& HloOpProfiles::GetProfile(
const se::DeviceDescription* device_info) const {
auto it = profiles_.find(GetProfileName(device_info));
if (it != profiles_.end()) return it->second;
return default_profile_;
}
}
} | #include "xla/service/gpu/model/hlo_op_profiles.h"
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
constexpr char kDeviceHloOpProfiles[] = R"pb(
entries {
key: "sm_90"
value {
entries {
instruction {
opcode: "divide"
shape { element_type: F32 }
}
clock_cycles: 32
}
}
}
entries {
key: "sm_80"
value {
entries {
instruction {
opcode: "multiply"
shape { element_type: F32 }
}
clock_cycles: 64
}
}
}
)pb";
using HloOpProfilesTest = ::testing::Test;
TEST_F(HloOpProfilesTest, GetProfile) {
auto hlo_op_profiles = HloOpProfiles::Load(kDeviceHloOpProfiles,
"sm_80");
auto device_info_sm_90 = TestGpuDeviceInfo::RTXA6000DeviceInfo(
stream_executor::CudaComputeCapability(9, 0));
const auto& op_profile = hlo_op_profiles->GetProfile(&device_info_sm_90);
ASSERT_TRUE(op_profile.contains(
std::make_pair(HloOpcode::kDivide, PrimitiveType::F32)));
EXPECT_EQ(
op_profile.at(std::make_pair(HloOpcode::kDivide, PrimitiveType::F32)),
32);
}
TEST_F(HloOpProfilesTest, GetProfileDefault) {
auto hlo_op_profiles = HloOpProfiles::Load(kDeviceHloOpProfiles,
"sm_80");
auto device_info_sm_85 = TestGpuDeviceInfo::RTXA6000DeviceInfo(
stream_executor::CudaComputeCapability(8, 5));
const auto& op_profile = hlo_op_profiles->GetProfile(&device_info_sm_85);
ASSERT_TRUE(op_profile.contains(
std::make_pair(HloOpcode::kMultiply, PrimitiveType::F32)));
EXPECT_EQ(
op_profile.at(std::make_pair(HloOpcode::kMultiply, PrimitiveType::F32)),
64);
}
}
}
} | 2,146 |
#ifndef XLA_SERVICE_GPU_MODEL_HLO_OP_PROFILER_H_
#define XLA_SERVICE_GPU_MODEL_HLO_OP_PROFILER_H_
#include <memory>
#include "absl/status/statusor.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/hlo_runner.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
class HloOpProfiler {
static std::unique_ptr<HloModule> MakeModuleForMeasurements(
HloOpcode op, PrimitiveType data_type, int chain_length);
absl::StatusOr<absl::Duration> MeasureOpChainDuration(HloOpcode op,
PrimitiveType data_type,
int chain_length);
public:
explicit HloOpProfiler(HloRunner& runner);
absl::StatusOr<HloInstructionProfile> MeasureClockCyclesPerOp(
HloOpcode op, PrimitiveType data_type);
private:
HloRunner& runner_;
const se::DeviceDescription& dev_info_;
absl::Duration min_duration_;
};
}
}
#endif
#include "xla/service/gpu/model/hlo_op_profiler.h"
#include <cstdint>
#include <memory>
#include <random>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/debug_options_flags.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/primitive_util.h"
#include "xla/service/executable.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/hlo_runner.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/test_utils.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#ifdef GOOGLE_CUDA
#include "xla/backends/profiler/gpu/cupti_collector.h"
#include "xla/backends/profiler/gpu/cupti_tracer.h"
#endif
namespace xla {
namespace gpu {
#ifdef GOOGLE_CUDA
class CuptiKernelTracer : public profiler::CuptiTraceCollector {
public:
CuptiKernelTracer()
: profiler::CuptiTraceCollector({}),
cupti_tracer_(profiler::CuptiTracer::GetCuptiTracerSingleton()) {
CHECK(cupti_tracer_->IsAvailable());
profiler::CuptiTracerOptions options;
options.cbids_selected.push_back(
CUPTI_DRIVER_TRACE_CBID_cu64GLMapBufferObject);
options.activities_selected.push_back(CUPTI_ACTIVITY_KIND_KERNEL);
cupti_tracer_->Enable(options, this);
}
uint64_t getMedianKernelTimeNs() && {
cupti_tracer_->Disable();
if (kernel_times_ns_.empty()) {
LOG(ERROR) << "No kernel events";
return 0;
}
std::sort(kernel_times_ns_.begin(), kernel_times_ns_.end());
size_t i = kernel_times_ns_.size() / 2;
if (kernel_times_ns_.size() % 2 != 0) {
return kernel_times_ns_[i];
}
return (kernel_times_ns_[i - 1] + kernel_times_ns_[i] + 1) / 2;
}
private:
void AddEvent(profiler::CuptiTracerEvent&& event) override {
if (event.type == profiler::CuptiTracerEventType::Kernel) {
kernel_times_ns_.push_back(event.end_time_ns - event.start_time_ns);
}
VLOG(5) << "CuptiTracerEvent: " << event.name << ", "
<< event.end_time_ns - event.start_time_ns << "ns";
}
void OnEventsDropped(const std::string& reason,
uint32_t num_events) override {
LOG(WARNING) << "Dropped " << num_events << " events: " << reason;
}
void Flush() override {}
profiler::CuptiTracer* cupti_tracer_;
std::vector<uint64_t> kernel_times_ns_;
};
#else
class CuptiKernelTracer {
public:
uint64_t getMedianKernelTimeNs() && {
LOG(FATAL) << "Not built with --config=cuda";
}
};
#endif
std::unique_ptr<HloModule> HloOpProfiler::MakeModuleForMeasurements(
HloOpcode op, PrimitiveType data_type, int chain_length) {
constexpr int64_t kInputSize = 1;
const Shape shape = ShapeUtil::MakeShape(data_type, {kInputSize});
HloModuleConfig config;
config.set_debug_options(GetDebugOptionsFromFlags());
auto module = std::make_unique<HloModule>("module", config);
HloComputation::Builder entry_builder("entry");
HloComputation::Builder fusion_builder("fusion");
HloInstruction* pf = fusion_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "pf"));
HloInstruction* last = pf;
for (int i = 0; i < chain_length; ++i) {
switch (HloOpcodeArity(op).value_or(0)) {
case 1:
last = fusion_builder.AddInstruction(
HloInstruction::CreateUnary(shape, op, last));
break;
case 2:
last = fusion_builder.AddInstruction(
HloInstruction::CreateBinary(shape, op, last, pf));
break;
default:
LOG(FATAL) << "Unsupported opcode: " << HloOpcodeString(op);
}
}
HloComputation* subcomp =
module->AddEmbeddedComputation(fusion_builder.Build());
HloInstruction* p0 = entry_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0"));
entry_builder.AddInstruction(HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {p0}, subcomp));
module->AddEntryComputation(entry_builder.Build());
VLOG(9) << module->ToString();
return module;
}
absl::StatusOr<absl::Duration> HloOpProfiler::MeasureOpChainDuration(
HloOpcode op, PrimitiveType data_type, int chain_length) {
#ifndef GOOGLE_CUDA
return FailedPrecondition("Not built with --config=cuda");
#endif
std::unique_ptr<HloModule> module =
MakeModuleForMeasurements(op, data_type, chain_length);
std::minstd_rand0 engine;
std::vector<Literal> args_small = MakeFakeArguments(module.get(), &engine,
false)
.value();
std::vector<Literal> args_large = MakeFakeArguments(module.get(), &engine,
true)
.value();
const absl::Time t_compile_start = absl::Now();
TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> ex,
runner_.CreateExecutable(std::move(module),
false));
if (absl::Now() - t_compile_start > absl::Seconds(10)) {
return ResourceExhausted("Too slow compilation");
}
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_small).status());
CuptiKernelTracer cupti_tracer;
for (int i = 0; i < 10; ++i) {
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_small).status());
TF_RETURN_IF_ERROR(
runner_.ExecuteWithExecutable(ex.get(), args_large).status());
}
return absl::Nanoseconds(std::move(cupti_tracer).getMedianKernelTimeNs());
}
HloOpProfiler::HloOpProfiler(HloRunner& runner)
: runner_(runner),
dev_info_(runner.backend().stream_executors()[0]->GetDeviceDescription()),
min_duration_(2 * MeasureOpChainDuration(HloOpcode::kNegate, F32, 0)
.value_or(absl::ZeroDuration())) {
VLOG(3) << "Minimum kernel duration: " << min_duration_;
CHECK_GT(min_duration_, absl::ZeroDuration())
<< "Failed to measure kernel runtime";
}
absl::StatusOr<HloInstructionProfile> HloOpProfiler::MeasureClockCyclesPerOp(
HloOpcode op, PrimitiveType data_type) {
VLOG(2) << "Measuring " << HloOpcodeString(op) << " "
<< primitive_util::LowercasePrimitiveTypeName(data_type);
constexpr int kMinOpChainLength = 16;
constexpr int kMaxOpChainLength = 8192;
absl::Duration duration = absl::ZeroDuration();
int chain_length = kMinOpChainLength;
do {
if (chain_length * 2 > kMaxOpChainLength) {
return FailedPrecondition("%s is too fast to measure",
HloOpcodeString(op));
}
TF_ASSIGN_OR_RETURN(duration,
MeasureOpChainDuration(op, data_type, chain_length));
VLOG(3) << chain_length << "\t" << duration;
chain_length *= 2;
} while (duration < min_duration_);
TF_ASSIGN_OR_RETURN(absl::Duration double_duration,
MeasureOpChainDuration(op, data_type, chain_length));
VLOG(3) << chain_length << "\t" << double_duration;
const absl::Duration time_per_op =
(double_duration - duration) * 2.0 / chain_length;
const float clocks_per_nanosecond =
dev_info_.clock_rate_ghz() * 2;
const int64_t n_clocks =
absl::ToInt64Nanoseconds(time_per_op) * clocks_per_nanosecond;
VLOG(3) << time_per_op << " = " << n_clocks << " clock cycles";
HloInstructionProfile profile;
profile.mutable_instruction()->mutable_opcode()->assign(HloOpcodeString(op));
profile.mutable_instruction()->mutable_shape()->set_element_type(data_type);
profile.set_clock_cycles(n_clocks);
return profile;
}
}
} | #include "xla/service/gpu/model/hlo_op_profiler.h"
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/tests/hlo_test_base.h"
namespace xla {
namespace gpu {
namespace {
using HloOpProfilerTest = HloTestBase;
TEST_F(HloOpProfilerTest, BasicMeasurementsAreCorrect) {
#ifndef GOOGLE_CUDA
GTEST_SKIP() << "Not built with --config=cuda";
#endif
HloOpProfiler profiler(test_runner_);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kAdd, F32)
.value()
.clock_cycles(),
0);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kDivide, F64)
.value()
.clock_cycles(),
400);
EXPECT_GT(profiler.MeasureClockCyclesPerOp(HloOpcode::kSqrt, C128)
.value()
.clock_cycles(),
1000);
}
}
}
} | 2,147 |
#ifndef XLA_SERVICE_GPU_MODEL_COALESCING_ANALYSIS_H_
#define XLA_SERVICE_GPU_MODEL_COALESCING_ANALYSIS_H_
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
class CoalescingAnalysis {
public:
CoalescingAnalysis(const HloInstruction* instr,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface = nullptr,
mlir::MLIRContext* mlir_context = nullptr,
bool use_heuristic = true);
CoalescingAnalysis(const HloInstruction* producer,
const HloInstruction* consumer,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface = nullptr,
mlir::MLIRContext* mlir_context = nullptr,
bool use_heuristic = true);
bool IsReadCoalesced(const HloInstruction* operand) const;
private:
bool ComputeCoalescingForAllOperands(
const HloFusionAdaptor& fusion_adaptor,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, mlir::MLIRContext* mlir_context);
absl::flat_hash_map<const HloInstruction*, bool> coalescing_per_operand_;
bool is_coalesced_computed_by_heuristic_ = false;
};
bool IsReadCoalescedHeuristic(HloFusionAnalysis::EmitterFusionKind fusion_kind,
const HloInstruction* producer,
const HloInstruction* consumer = nullptr);
}
}
#endif
#include "xla/service/gpu/model/coalescing_analysis.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <cstdlib>
#include <optional>
#include <stack>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/gpu_fusible.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
namespace xla {
namespace gpu {
bool IsReadCoalescedHeuristic(HloFusionAnalysis::EmitterFusionKind fusion_kind,
const HloInstruction* producer,
const HloInstruction* consumer) {
if (fusion_kind != HloFusionAnalysis::EmitterFusionKind::kTranspose) {
auto is_broadcast = [&](const HloInstruction* instr) {
while (true) {
if (instr->opcode() == HloOpcode::kBroadcast ||
instr->opcode() == HloOpcode::kIota) {
return true;
}
if (instr->operand_count() != 1) return false;
if (instr->opcode() != HloOpcode::kBitcast && !instr->IsElementwise()) {
return false;
}
instr = instr->operand(0);
}
};
auto is_bad_transpose = [&](const HloInstruction* instr) {
if (instr->opcode() == HloOpcode::kFusion) {
for (auto* instr : instr->fused_instructions()) {
if (TransposesMinorDimension(instr) &&
!is_broadcast(instr->operand(0))) {
return true;
}
}
return false;
}
return TransposesMinorDimension(instr) &&
!is_broadcast(instr->operand(0));
};
if (is_bad_transpose(producer)) return false;
if (consumer && is_bad_transpose(consumer)) return false;
}
if (fusion_kind == HloFusionAnalysis::EmitterFusionKind::kReduction &&
IsInputFusibleReduction(*producer) && consumer &&
IsInputFusibleReduction(*consumer)) {
return false;
}
return true;
}
namespace {
using ::mlir::AffineBinaryOpExpr;
using ::mlir::AffineConstantExpr;
using ::mlir::AffineDimExpr;
using ::mlir::AffineExpr;
using ::mlir::AffineExprKind;
using ::mlir::AffineMap;
using ::mlir::AffineSymbolExpr;
using ::mlir::getAffineConstantExpr;
using ::mlir::MLIRContext;
bool EstimateCoalescingViaMemoryTransactionsCount(
absl::Span<const Interval> intervals, PrimitiveType element_type) {
constexpr int64_t kBytesPerMemoryTransaction = 128;
int64_t type_size = ShapeUtil::ByteSizeOfPrimitiveType(element_type);
int memory_transactions = 0;
int total_num_elements = 0;
for (const auto& range : intervals) {
int64_t num_elements = range.upper - range.lower + 1;
memory_transactions +=
CeilDiv(num_elements * type_size, kBytesPerMemoryTransaction);
total_num_elements += num_elements;
}
if (memory_transactions == 0) {
return true;
}
int memory_transactions_lower_bound =
CeilDiv(total_num_elements * type_size, kBytesPerMemoryTransaction);
constexpr float kIsCoalescedThreshold = 0.9;
return memory_transactions_lower_bound >
memory_transactions * kIsCoalescedThreshold;
}
Shape GetLinearizedShape(const Shape& shape) {
if (shape.rank() == 0) {
return shape;
}
std::vector<int64_t> dims{ShapeUtil::ElementsIn(shape)};
auto result = Shape(shape.element_type(), dims,
absl::InlinedVector<bool, 4>(dims.size(), false), {});
*result.mutable_layout() = xla::Layout({0});
return result;
}
std::optional<GroupedByOpIndexingMap> GetThreadIdToInputMemoryLayoutsMaps(
const HloFusionAdaptor& fusion_adaptor,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context) {
GroupedByOpIndexingMap result;
for (const auto& [root_index, hero] :
llvm::enumerate(fusion_analysis.fusion_heroes())) {
for (const auto& [hero_operand_index, hero_operand] :
llvm::enumerate(hero.GetOperands())) {
if (hero_operand.shape().rank() == 0) {
continue;
}
std::optional<IndexingMap> thread_id_to_hero_operand_map =
fusion_interface->ComputeThreadIdToInputIndexing(
root_index, hero_operand_index, mlir_context);
if (!thread_id_to_hero_operand_map.has_value()) {
return std::nullopt;
}
GroupedByOpIndexingMap instr_indexing_keyed_by_operands =
ComputeGroupedOutputToInputIndexing(fusion_adaptor, hero_operand,
mlir_context);
for (const HloInstruction* operand : operands) {
auto operand_indexing_maps_it =
instr_indexing_keyed_by_operands.find(operand);
if (operand_indexing_maps_it ==
instr_indexing_keyed_by_operands.end()) {
continue;
}
const Shape& operand_shape = operand->shape();
IndexingMap operand_logical_to_physical_map =
GetIndexingMapFromLogicalToPhysicalLayout(operand_shape,
mlir_context);
IndexingMap operand_physical_to_linearized_shape = GetBitcastMap(
ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(
operand_shape),
GetLinearizedShape(operand_shape), mlir_context);
IndexingMap operand_logical_to_linearized_physical_shape =
operand_logical_to_physical_map *
operand_physical_to_linearized_shape;
operand_logical_to_linearized_physical_shape.Simplify();
for (const IndexingMap& operand_indexing_map :
operand_indexing_maps_it->second) {
if (operand_indexing_map.IsUndefined()) {
result[operand] = {operand_indexing_map};
break;
}
IndexingMap logical_output_to_linearized_physical_input_map =
operand_indexing_map *
operand_logical_to_linearized_physical_shape;
IndexingMap thread_id_to_linearized_physical_input_map =
*thread_id_to_hero_operand_map *
logical_output_to_linearized_physical_input_map;
thread_id_to_linearized_physical_input_map.Simplify();
result[operand].insert(thread_id_to_linearized_physical_input_map);
}
}
}
}
return result;
}
void AssignValuesToRTVars(IndexingMap* indexing_map) {
if (indexing_map->GetRTVarsCount() == 0) {
return;
}
MLIRContext* mlir_context = indexing_map->GetMLIRContext();
llvm::SmallVector<AffineExpr, 2> symbol_replacements;
for (int64_t symbol_id = 0; symbol_id < indexing_map->GetRangeVarsCount();
++symbol_id) {
symbol_replacements.push_back(
mlir::getAffineSymbolExpr(symbol_id, mlir_context));
}
for (const RTVar& rt_var : indexing_map->GetRTVars()) {
symbol_replacements.push_back(getAffineConstantExpr(
(rt_var.feasible_values.lower + rt_var.feasible_values.upper) / 2,
mlir_context));
}
AffineMap thread_x_to_input_no_dim_symbols =
indexing_map->GetAffineMap().replaceDimsAndSymbols(
{}, symbol_replacements, indexing_map->GetDimVarsCount(),
indexing_map->GetRangeVarsCount());
*indexing_map = IndexingMap{thread_x_to_input_no_dim_symbols,
indexing_map->GetDimVars(),
indexing_map->GetRangeVars(),
{}};
indexing_map->Simplify();
indexing_map->RemoveUnusedSymbols();
}
void AssignValuesToOuterLoopIVs(IndexingMap* indexing_map) {
if (indexing_map->GetRangeVarsCount() <= 1) {
return;
}
MLIRContext* mlir_context = indexing_map->GetMLIRContext();
llvm::SmallVector<AffineExpr, 2> symbol_replacements;
for (int64_t symbol_id = 0; symbol_id < indexing_map->GetRangeVarsCount() - 1;
++symbol_id) {
symbol_replacements.push_back(getAffineConstantExpr(
indexing_map->GetRangeVar(symbol_id).range.lower, mlir_context));
}
symbol_replacements.push_back(mlir::getAffineSymbolExpr(0, mlir_context));
AffineMap thread_x_to_input_no_dim_symbols =
indexing_map->GetAffineMap().replaceDimsAndSymbols(
{}, symbol_replacements, indexing_map->GetDimVarsCount(), 1);
*indexing_map = IndexingMap{thread_x_to_input_no_dim_symbols,
indexing_map->GetDimVars(),
{indexing_map->GetRangeVars().back()},
{}};
indexing_map->Simplify();
indexing_map->RemoveUnusedSymbols();
}
struct PartitionedExpr {
explicit PartitionedExpr(MLIRContext* mlir_context) {
AffineExpr zero = getAffineConstantExpr(0, mlir_context);
func_of_d0 = zero;
func_of_s0 = zero;
}
AffineExpr func_of_d0;
AffineExpr func_of_s0;
};
std::optional<PartitionedExpr> Partition(AffineExpr expr) {
PartitionedExpr result(expr.getContext());
std::vector<AffineExpr> summands;
std::stack<AffineExpr> dfs;
dfs.push(expr);
while (!dfs.empty()) {
auto top = dfs.top();
dfs.pop();
auto sum = mlir::dyn_cast<AffineBinaryOpExpr>(top);
if (sum && sum.getKind() == AffineExprKind::Add) {
dfs.push(sum.getLHS());
dfs.push(sum.getRHS());
continue;
}
bool depends_on_thread_x = top.isFunctionOfDim(0);
bool depends_on_range = top.isFunctionOfSymbol(0);
if (depends_on_thread_x && depends_on_range) {
return std::nullopt;
}
if (depends_on_thread_x) {
result.func_of_d0 = top + result.func_of_d0;
}
if (depends_on_range) {
result.func_of_s0 = top + result.func_of_s0;
}
}
return result;
}
int64_t EvaluateAffineExpr(AffineExpr expr,
const std::vector<int64_t>& dim_values,
const std::vector<int64_t>& symbol_values = {}) {
if (auto const_expr = mlir::dyn_cast<AffineConstantExpr>(expr)) {
return const_expr.getValue();
}
if (auto dim_expr = mlir::dyn_cast<AffineDimExpr>(expr)) {
return dim_values[dim_expr.getPosition()];
}
if (auto symbol_expr = mlir::dyn_cast<AffineSymbolExpr>(expr)) {
return symbol_values[symbol_expr.getPosition()];
}
auto binary_expr = mlir::cast<AffineBinaryOpExpr>(expr);
int64_t lhs =
EvaluateAffineExpr(binary_expr.getLHS(), dim_values, symbol_values);
int64_t rhs =
EvaluateAffineExpr(binary_expr.getRHS(), dim_values, symbol_values);
switch (binary_expr.getKind()) {
case AffineExprKind::Add:
return lhs + rhs;
case AffineExprKind::Mul:
return lhs * rhs;
case AffineExprKind::FloorDiv:
return FloorDiv(lhs, rhs);
case AffineExprKind::Mod:
return lhs % rhs;
default:
LOG(FATAL) << "Unsupported expression";
}
}
void FindAllIndices(AffineExpr expr, int dim_id, int symbol_id,
const std::vector<Interval>& dimension_ranges,
const std::vector<Interval>& symbol_ranges,
std::vector<int64_t>* dimensions,
std::vector<int64_t>* symbols,
std::vector<int64_t>* indices) {
if (dim_id < dimension_ranges.size()) {
Interval dim_range = dimension_ranges[dim_id];
for (int64_t dim_value = dim_range.lower; dim_value <= dim_range.upper;
++dim_value) {
dimensions->push_back(dim_value);
FindAllIndices(expr, dim_id + 1, symbol_id, dimension_ranges,
symbol_ranges, dimensions, symbols, indices);
dimensions->pop_back();
}
return;
}
if (symbol_id < symbol_ranges.size()) {
Interval symbol_range = symbol_ranges[symbol_id];
for (int64_t symbol_value = symbol_range.lower;
symbol_value <= symbol_range.upper; ++symbol_value) {
symbols->push_back(symbol_value);
FindAllIndices(expr, dim_id, symbol_id + 1, dimension_ranges,
symbol_ranges, dimensions, symbols, indices);
symbols->pop_back();
}
return;
}
indices->push_back(EvaluateAffineExpr(expr, *dimensions, *symbols));
}
std::vector<Interval> FindIntervals(
AffineExpr expr, const std::vector<Interval>& dimension_ranges,
const std::vector<Interval>& symbol_ranges = {}) {
std::vector<int64_t> dimensions, symbols;
std::vector<int64_t> linear_indices;
FindAllIndices(expr, 0, 0, dimension_ranges, symbol_ranges, &dimensions,
&symbols, &linear_indices);
std::sort(linear_indices.begin(), linear_indices.end());
linear_indices.erase(
std::unique(linear_indices.begin(), linear_indices.end()),
linear_indices.end());
std::vector<Interval> intervals;
for (int i = 0, start, end; i < linear_indices.size();) {
start = linear_indices[i++];
end = start;
while (i < linear_indices.size() && linear_indices[i] == end + 1) {
++end;
++i;
}
intervals.push_back(Interval{start, end});
}
return intervals;
}
std::vector<Interval> ExtendIntervals(const std::vector<Interval>& intervals,
int64_t length) {
std::vector<Interval> overlapped_intervals;
for (int i = 0; i < intervals.size();) {
int64_t lower = intervals[i].lower;
int64_t upper = intervals[i].upper + length;
++i;
while (i < intervals.size() && upper >= intervals[i].lower - 1) {
upper = std::max(upper, intervals[i].upper + length);
++i;
}
overlapped_intervals.push_back(Interval{lower, upper});
}
return overlapped_intervals;
}
std::vector<Interval> FindContiguousIntervals(
const PartitionedExpr& partitioned_expr, const IndexingMap& indexing_map) {
constexpr int64_t kNumThreadsPerWarp = 32;
MLIRContext* mlir_context = indexing_map.GetMLIRContext();
AffineExpr thread_x = mlir::getAffineDimExpr(0, mlir_context);
AffineExpr range = mlir::getAffineSymbolExpr(0, mlir_context);
if (partitioned_expr.func_of_d0 == thread_x) {
return {Interval{0, kNumThreadsPerWarp - 1}};
}
if (auto mul =
mlir::dyn_cast<AffineBinaryOpExpr>(partitioned_expr.func_of_d0);
mul && mul.getKind() == AffineExprKind::Mul) {
if (auto multiplier = mlir::dyn_cast<AffineConstantExpr>(mul.getRHS());
multiplier) {
if (multiplier.getValue() == -1) {
return {Interval{0, kNumThreadsPerWarp - 1}};
}
if (partitioned_expr.func_of_s0 == range) {
Interval range_interval = indexing_map.GetSymbolBound(0);
int64_t num_elems = range_interval.GetLoopTripCount();
if (num_elems >= std::abs(multiplier.getValue())) {
return {Interval{0, multiplier.getValue() * (kNumThreadsPerWarp - 1) +
num_elems - 1}};
}
std::vector<Interval> intervals;
for (int i = 0, dm = 0; i < kNumThreadsPerWarp;
++i, dm += multiplier.getValue()) {
intervals.push_back(
{range_interval.lower + dm, range_interval.upper + dm});
}
return intervals;
}
std::vector<Interval> intervals;
for (int i = 0, dm = 0; i < kNumThreadsPerWarp;
++i, dm += multiplier.getValue()) {
intervals.push_back({dm, dm});
}
return intervals;
}
}
auto intervals = FindIntervals(partitioned_expr.func_of_d0,
{indexing_map.GetDimVars(0).bounds});
if (partitioned_expr.func_of_s0 != range) {
return intervals;
}
Interval range_interval = indexing_map.GetSymbolBound(0);
return ExtendIntervals(intervals, range_interval.GetLoopTripCount() - 1);
}
bool IsIndexingCoalesced(IndexingMap& thread_x_to_linearized_input,
PrimitiveType element_type) {
if (thread_x_to_linearized_input.IsUndefined()) {
return false;
}
if (thread_x_to_linearized_input.GetAffineMap().getNumResults() == 0) {
return true;
}
AssignValuesToRTVars(&thread_x_to_linearized_input);
MLIRContext* mlir_context = thread_x_to_linearized_input.GetMLIRContext();
AffineExpr thread_x_dim = mlir::getAffineDimExpr(
KernelFusionInterface::kIndexingMapThreadIdxDims[0], mlir_context);
AffineExpr c0 = getAffineConstantExpr(0, mlir_context);
IndexingMap thread_x_first_32_elements{
AffineMap::get(1, 0, {thread_x_dim, c0, c0, c0, c0, c0}, mlir_context),
{DimVar{{0, 31}}},
{},
{}};
IndexingMap thread_x_to_input_sample =
thread_x_first_32_elements * thread_x_to_linearized_input;
thread_x_to_input_sample.Simplify();
thread_x_to_input_sample.RescaleSymbols();
thread_x_to_input_sample.RemoveUnusedSymbols();
if (thread_x_to_input_sample.IsKnownEmpty()) {
return true;
}
AssignValuesToOuterLoopIVs(&thread_x_to_input_sample);
auto partitioned_expr =
Partition(thread_x_to_input_sample.GetAffineMap().getResult(0));
if (!partitioned_expr.has_value()) {
return false;
}
if (thread_x_to_input_sample.GetConstraintsCount() > 1 ||
(thread_x_to_input_sample.GetConstraintsCount() == 1 &&
thread_x_to_input_sample.GetConstraints().begin()->first !=
partitioned_expr->func_of_d0 + partitioned_expr->func_of_s0)) {
return false;
}
return EstimateCoalescingViaMemoryTransactionsCount(
FindContiguousIntervals(*partitioned_expr, thread_x_to_input_sample),
element_type);
}
}
CoalescingAnalysis::CoalescingAnalysis(
const HloInstruction* instr,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context,
bool use_heuristic) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(instr);
if (!use_heuristic && ComputeCoalescingForAllOperands(
*fusion_adaptor, operands, fusion_analysis,
fusion_interface, mlir_context)) {
return;
}
is_coalesced_computed_by_heuristic_ =
IsReadCoalescedHeuristic(fusion_analysis.GetEmitterFusionKind(), instr);
}
CoalescingAnalysis::CoalescingAnalysis(
const HloInstruction* producer, const HloInstruction* consumer,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context,
bool use_heuristic) {
auto fusion_adaptor =
HloFusionAdaptor::ForProducerConsumer(producer, consumer);
if (!use_heuristic && ComputeCoalescingForAllOperands(
*fusion_adaptor, operands, fusion_analysis,
fusion_interface, mlir_context)) {
return;
}
is_coalesced_computed_by_heuristic_ = IsReadCoalescedHeuristic(
fusion_analysis.GetEmitterFusionKind(), producer, consumer);
}
bool CoalescingAnalysis::ComputeCoalescingForAllOperands(
const HloFusionAdaptor& fusion_adaptor,
absl::Span<const HloInstruction* const> operands,
const HloFusionAnalysis& fusion_analysis,
KernelFusionInterface* fusion_interface, MLIRContext* mlir_context) {
std::optional<GroupedByOpIndexingMap> thread_id_to_input_memory_layouts =
GetThreadIdToInputMemoryLayoutsMaps(fusion_adaptor, operands,
fusion_analysis, fusion_interface,
mlir_context);
if (!thread_id_to_input_memory_layouts.has_value()) {
return false;
}
for (const HloInstruction* operand : operands) {
if (operand->shape().rank() == 0) {
coalescing_per_operand_.insert({operand, true});
continue;
}
auto operand_indexing_maps =
thread_id_to_input_memory_layouts->find(operand);
if (operand_indexing_maps == thread_id_to_input_memory_layouts->end()) {
coalescing_per_operand_.insert({operand, true});
continue;
}
for (IndexingMap operand_indexing_map : operand_indexing_maps->second) {
bool is_coalesced = IsIndexingCoalesced(operand_indexing_map,
operand->shape().element_type());
auto [it, inserted] =
coalescing_per_operand_.insert({operand, is_coalesced});
if (!inserted) {
it->second &= is_coalesced;
}
if (!is_coalesced) break;
}
}
return true;
}
bool CoalescingAnalysis::IsReadCoalesced(const HloInstruction* operand) const {
auto it = coalescing_per_operand_.find(operand);
if (it == coalescing_per_operand_.end()) {
return is_coalesced_computed_by_heuristic_;
}
return it->second;
}
}
} | #include "xla/service/gpu/model/coalescing_analysis.h"
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/fusions/fusion_emitter.h"
#include "xla/service/gpu/fusions/fusions.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
class CoalescingTest : public HloTestBase {
public:
std::vector<bool> IsReadCoalescedPerOperand(absl::string_view hlo_string) {
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* root = module->entry_computation()->root_instruction();
return IsReadCoalescedPerOperand(root);
}
std::vector<bool> IsReadCoalescedPerOperand(const HloInstruction* root) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto analysis = AnalyzeFusion(*root, device_info_);
auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis});
auto fusion = dynamic_cast<KernelFusionInterface*>(emitter.get());
EXPECT_NE(fusion, nullptr);
CoalescingAnalysis coalescing_analysis(root, root->operands(), analysis,
fusion, &mlir_context_,
false);
std::vector<bool> results;
for (const HloInstruction* operand : root->operands()) {
results.push_back(coalescing_analysis.IsReadCoalesced(operand));
}
return results;
}
bool IsReadCoalescedHeuristic(absl::string_view hlo_string) {
auto module = ParseAndReturnVerifiedModule(hlo_string).value();
HloInstruction* root = module->entry_computation()->root_instruction();
auto analysis = AnalyzeFusion(*root, device_info_);
return xla::gpu::IsReadCoalescedHeuristic(analysis.GetEmitterFusionKind(),
root->operand(0), root);
}
protected:
stream_executor::DeviceDescription device_info_ =
TestGpuDeviceInfo::RTXA6000DeviceInfo();
mlir::MLIRContext mlir_context_;
};
TEST_F(CoalescingTest, IdentityLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200] parameter(0)
p1 = f32[100, 200] parameter(1)
ROOT adthread_x = f32[100, 200] add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200] parameter(0)
p1 = f32[100, 200] parameter(1)
ROOT fusion = f32[100, 200] fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true));
}
TEST_F(CoalescingTest, RhsTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT exp = f32[100, 200]{1, 0} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT fusion = f32[100, 200]{1, 0} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, false));
}
TEST_F(CoalescingTest, OutputTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{1, 0} parameter(1)
ROOT exp = f32[100, 200]{0, 1} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{1, 0} parameter(1)
ROOT fusion = f32[100, 200]{0, 1} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(false, false));
}
TEST_F(CoalescingTest, OutputAndLhsTransposedLayout) {
absl::string_view ir = R"(
HloModule m
fusion {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT exp = f32[100, 200]{1, 0} add(p0, p1)
}
ENTRY e {
p0 = f32[100, 200]{1, 0} parameter(0)
p1 = f32[100, 200]{0, 1} parameter(1)
ROOT fusion = f32[100, 200]{1, 0} fusion(p0, p1), kind=kInput, calls=fusion
}
)";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, false));
}
TEST_F(CoalescingTest, Transpose) {
absl::string_view ir = R"(
HloModule module
fusion {
%input = f32[100, 64, 32] parameter(0)
ROOT transpose = f32[32, 100, 64] transpose(%input), dimensions={2, 0, 1}
}
ENTRY entry {
%input = f32[100, 64, 32] parameter(0)
ROOT %fusion = f32[32, 100, 64] fusion(%input), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, TransposeOfBroadcastHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
input = f32[32, 100, 64] parameter(0)
ROOT slice = f32[32, 100, 1] slice(input), slice={[0:32:1], [0:100:1], [0:1:1]}
}
ENTRY entry {
p0 = f32[32] parameter(0)
broadcast = f32[100, 64, 32] broadcast(p0), dimensions={2}
transpose = f32[32, 100, 64] transpose(broadcast), dimensions={2, 0, 1}
ROOT %fusion = f32[32, 100, 1] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_TRUE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOfIotaHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[32, 100, 64] parameter(0)
ROOT slice = f32[32, 100, 1] slice(p0), slice={[0:32:1], [0:100:1], [0:1:1]}
}
ENTRY entry {
iota = f32[100, 64, 32] iota(), iota_dimension=1
transpose = f32[32, 100, 64] transpose(iota), dimensions={2, 0, 1}
ROOT %fusion = f32[32, 100, 1] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_TRUE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOfAddHeuristic) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[32, 100, 64] parameter(0)
ROOT slice = f32[32, 100, 1] slice(p0), slice={[0:32:1], [0:100:1], [0:1:1]}
}
ENTRY entry {
input = f32[100, 64, 32] parameter(0)
add = f32[100, 64, 32] add(input, input)
transpose = f32[32, 100, 64] transpose(add), dimensions={2, 0, 1}
ROOT %fusion = f32[32, 100, 1] fusion(transpose), kind=kLoop, calls=fusion
})";
EXPECT_FALSE(IsReadCoalescedHeuristic(ir));
}
TEST_F(CoalescingTest, TransposeOnlyOuterDims) {
absl::string_view ir = R"(
HloModule module
fusion {
%input = f32[100, 32, 64] parameter(0)
ROOT transpose = f32[32, 100, 64] transpose(%input), dimensions={1, 0, 2}
}
ENTRY entry {
%input = f32[100, 32, 64] parameter(0)
ROOT %fusion = f32[32, 100, 64] fusion(%input), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, PadOp) {
absl::string_view ir = R"(
HloModule module
fusion {
p0 = f32[997, 436] parameter(0)
p1 = f32[] parameter(1)
ROOT pad = f32[1024, 512] pad(p0, p1), padding=10_17x24_52
}
ENTRY entry {
p0 = f32[997, 436] parameter(0)
p1 = f32[] parameter(1)
ROOT %fusion = f32[1024, 512] fusion(p0, p1), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true));
}
TEST_F(CoalescingTest, RowReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,512] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,512] parameter(0)
ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, MultiRowReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,4] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,4] parameter(0)
ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, ColumnReduction) {
absl::string_view ir = R"(
HloModule module
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fusion {
%input = f32[100,64,32] parameter(0)
%c0 = f32[] constant(0)
ROOT reduce = f32[100,32] reduce(%input, %c0),
dimensions={1}, to_apply=add
}
ENTRY entry {
%input = f32[100,64,32] parameter(0)
ROOT %fusion = f32[100,32] fusion(%input), kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true));
}
TEST_F(CoalescingTest, VariadicReduceViaLoopEmitter) {
absl::string_view ir = R"(
HloModule module
max {
p0 = s32[] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
max01 = s32[] maximum(p0, p1)
max23 = s32[] maximum(p2, p3)
ROOT max = (s32[], s32[]) tuple(max01, max23)
}
fusion {
p0 = s32 [5696,10,4] parameter(0)
p1 = s32 [5696,10,4] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT reduce = (s32[5696,4], s32[5696,4]) reduce(s32[5696,10,4] p0,
s32[5696,10,4] p1, s32[] p2, s32[] p3), dimensions={1}, to_apply=max
}
ENTRY entry {
p0 = s32 [5696,10,4] parameter(0)
p1 = s32 [5696,10,4] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT f = (s32[5696,4], s32[5696,4]) fusion(p0, p1, p2, p3),
kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(false, false, true, true));
}
TEST_F(CoalescingTest, VariadicReduceViaReductionEmitter) {
absl::string_view ir = R"(
HloModule module
max {
p0 = s32[] parameter(0)
p1 = s32[] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
max01 = s32[] maximum(p0, p1)
max23 = s32[] maximum(p2, p3)
ROOT max = (s32[], s32[]) tuple(max01, max23)
}
fusion {
p0 = s32[32,40] parameter(0)
p1 = s32[32,40] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT reduce = (s32[32], s32[32])
reduce(s32[32,40] p0, s32[32,40] p1, s32[] p2, s32[] p3),
dimensions={1}, to_apply=max
}
ENTRY entry {
p0 = s32[32,40] parameter(0)
p1 = s32[32,40] parameter(1)
p2 = s32[] parameter(2)
p3 = s32[] parameter(3)
ROOT f = (s32[32], s32[32]) fusion(p0, p1, p2, p3),
kind=kInput, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(true, true, true, true));
}
TEST_F(CoalescingTest, Gather) {
absl::string_view ir = R"(
HloModule module
fusion {
operand = f32[33, 76, 70] parameter(0)
indices = s32[1806, 2] parameter(1)
ROOT gather = f32[1806, 7, 8, 4] gather(operand, indices),
offset_dims={1,2,3}, collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={7,8,4}
}
ENTRY entry {
p0 = f32[33, 76, 70] parameter(0)
p1 = s32[1806, 2] parameter(1)
ROOT %fusion = f32[1806, 7, 8, 4] fusion(p0, p1), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(false, true));
}
TEST_F(CoalescingTest, DynamicSlice) {
absl::string_view ir = R"(
HloModule module
fusion {
%src = s32[2,2,258] parameter(0)
%of1 = s32[] parameter(1)
%of2 = s32[] parameter(2)
%of3 = s32[] parameter(3)
ROOT %ds = s32[1,2,32] dynamic-slice(s32[2,2,258] %src,
s32[] %of1, s32[] %of2, s32[] %of3),
dynamic_slice_sizes={1, 2, 32}
}
ENTRY entry {
%p0 = s32[2,2,258] parameter(0)
%p1 = s32[] parameter(1)
%p2 = s32[] parameter(2)
%p3 = s32[] parameter(3)
ROOT %fusion = s32[1,2,32] fusion(p0, p1, p2, p3), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir),
ElementsAre(true, true, true, true));
}
TEST_F(CoalescingTest, UnusedParameter) {
Shape shape = ShapeUtil::MakeShape(F32, {100000});
auto module = std::make_unique<HloModule>("m", HloModuleConfig{});
HloComputation::Builder b("b");
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloComputation::Builder sub_builder("subcomp");
HloInstruction* p0f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0f"));
HloInstruction* p1f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1f"));
ASSERT_NE(p1f, nullptr);
sub_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0f));
HloComputation* subcomp = module->AddEmbeddedComputation(sub_builder.Build());
auto fusion = HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {p0, p1}, subcomp);
b.AddInstruction(std::move(fusion));
module->AddEntryComputation(b.Build());
EXPECT_THAT(IsReadCoalescedPerOperand(
module->entry_computation()->root_instruction()),
ElementsAre(true, true));
}
TEST_F(CoalescingTest, Param) {
absl::string_view ir = R"(
HloModule module
fusion {
%p0 = u32[48,2,1280] parameter(0)
%p1 = u32[48,1,1280] parameter(1)
%p2 = u32[48,1,1280] parameter(2)
%concat = u32[48,2,1280] concatenate(u32[48,1,1280] %p1,
u32[48,1,1280] %p2), dimensions={1}
ROOT %shift = u32[48,2,1280] shift-right-logical(
u32[48,2,1280] %concat, u32[48,2,1280] %p0)
}
ENTRY entry {
%p0 = u32[48,2,1280] parameter(0)
%p1 = u32[48,1,1280] parameter(1)
%p2 = u32[48,1,1280] parameter(2)
ROOT %fusion = u32[48,2,1280] fusion(p0, p1, p2), kind=kLoop, calls=fusion
})";
EXPECT_THAT(IsReadCoalescedPerOperand(ir), ElementsAre(true, true, true));
}
}
}
} | 2,148 |
#ifndef XLA_SERVICE_GPU_MODEL_GPU_COST_MODEL_STATS_COLLECTION_H_
#define XLA_SERVICE_GPU_MODEL_GPU_COST_MODEL_STATS_COLLECTION_H_
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class GpuCostModelStatsCollection : public HloModulePass {
public:
explicit GpuCostModelStatsCollection(
const se::DeviceDescription& d,
const GpuHloCostAnalysis::Options& cost_analysis_options)
: device_info_(d), cost_analysis_(cost_analysis_options, &device_info_) {}
absl::string_view name() const override {
return "gpu_cost_model_stats_collection";
}
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
se::DeviceDescription device_info_;
GpuHloCostAnalysis cost_analysis_;
};
}
}
#endif
#include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
absl::StatusOr<bool> GpuCostModelStatsCollection::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
for (auto* computation : module->MakeComputationPostOrder()) {
TF_CHECK_OK(computation->Accept(&cost_analysis_));
for (auto* fusion_instr : computation->instructions()) {
if (fusion_instr->opcode() != HloOpcode::kFusion) continue;
GpuPerformanceModel::RecordEstimatedRunTime(
fusion_instr, &cost_analysis_,
GpuPerformanceModelOptions::ForModule(module));
}
}
return false;
}
}
} | #include "xla/service/gpu/model/gpu_cost_model_stats_collection.h"
#include <stdint.h>
#include <memory>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class GpuCostModelStatsCollectionTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
GpuCostModelStatsCollection cost_model_stats_{
TestGpuDeviceInfo::RTXA6000DeviceInfo(),
GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(),
{},
true}};
};
TEST_F(GpuCostModelStatsCollectionTest, FusinInEntryComputation) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
ENTRY main {
%p0 = f32[16384] parameter(0)
ROOT %res = f32[16384]{0} fusion(p0), kind=kInput, calls=log
}
)"));
EXPECT_FALSE(cost_model_stats_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
root->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
EXPECT_TRUE(backend_config.has_reification_cost());
EXPECT_GT(backend_config.reification_cost().end_to_end_cycles(), 0);
}
TEST_F(GpuCostModelStatsCollectionTest, FusinInWhileComputation) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule test_module
cond {
p = f32[16384]{0} parameter(0)
ROOT %constant.2 = pred[] constant(true)
}
log {
p = f32[16384]{0} parameter(0)
ROOT l = f32[16384]{0} log(p)
}
loop {
%p0 = f32[16384] parameter(0)
ROOT %res = f32[16384]{0} fusion(p0), kind=kInput, calls=log
}
ENTRY main {
%p0 = f32[16384] parameter(0)
ROOT %while = f32[16384] while(%p0), body=%loop, condition=%cond
})"));
EXPECT_FALSE(cost_model_stats_.Run(module.get()).value());
HloInstruction* root = module->entry_computation()
->root_instruction()
->while_body()
->root_instruction();
TF_ASSERT_OK_AND_ASSIGN(auto gpu_config,
root->backend_config<GpuBackendConfig>());
const FusionBackendConfig& backend_config =
gpu_config.fusion_backend_config();
EXPECT_TRUE(backend_config.has_reification_cost());
EXPECT_GT(backend_config.reification_cost().end_to_end_cycles(), 0);
}
}
} | 2,149 |
#ifndef XLA_SERVICE_GPU_MODEL_GPU_HLO_COST_ANALYSIS_H_
#define XLA_SERVICE_GPU_MODEL_GPU_HLO_COST_ANALYSIS_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
class GpuHloCostAnalysis : public HloCostAnalysis {
static constexpr int64_t kMaxBasicBlockSplitsPerFusion = 10;
static constexpr int64_t kMaxIRSize = 10000;
public:
explicit GpuHloCostAnalysis(
const Options& options,
const se::DeviceDescription* device_info = nullptr)
: HloCostAnalysis(options), device_info_(device_info) {}
absl::Status Preprocess(const HloInstruction* hlo) override;
float ScalingRatio(const HloInstruction& hlo) const;
int64_t NumOfDevices(const HloInstruction& hlo) const;
absl::Status HandleCustomCall(const HloInstruction* call) override;
int64_t GetConvolutionFlops(const HloInstruction* convolution) override;
absl::Status HandleElementwiseOp(const HloInstruction* hlo);
absl::Status HandleElementwiseUnary(const HloInstruction* hlo) override;
absl::Status HandleElementwiseBinary(const HloInstruction* hlo) override;
absl::Status HandleConcatenate(const HloInstruction* hlo) override;
absl::Status HandleAllReduce(const HloInstruction* allreduce) override;
absl::Status HandleReduce(const HloInstruction* hlo) override;
bool ProducerConsumerMergedTooLarge(const HloInstruction& producer,
const HloInstruction& consumer);
float IrSize(const HloInstruction& hlo) const;
float CommonElementwiseUtilization(const HloInstruction* a,
const HloInstruction* b) const;
const se::DeviceDescription* device_info_;
protected:
std::unique_ptr<HloCostAnalysis> CreateNestedCostAnalysis() override;
int64_t FusionParameterReadBytes(const HloInstruction* hlo) const override;
absl::Status FusionCalculateUtilizations(
const HloInstruction* fusion) override;
size_t immediate_constant_max_elements() const override { return 8; }
bool KeyToCopyFromSubcomputation(absl::string_view key) const override;
float IrBasicBlockSplitCount(const HloInstruction& hlo) const;
absl::flat_hash_map<const HloInstruction*,
absl::flat_hash_set<const HloInstruction*>>
elementwise_use_roots_;
absl::flat_hash_map<const HloInstruction*, float> root_utilizations_;
};
}
}
#endif
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/match.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/map_util.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/elemental_ir_emitter.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/cublas_cudnn.h"
#include "xla/service/gpu/model/hlo_op_profile.pb.h"
#include "xla/service/gpu/model/hlo_op_profiles.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
static constexpr absl::string_view kIRSizeKey = HloCostAnalysis::kReserved0Key;
static constexpr absl::string_view kBasicBlockSplitCountKey =
HloCostAnalysis::kReserved1Key;
static constexpr absl::string_view kCollAlgoScaleRatioKey =
"Collective algorithm's scaling ratio";
static constexpr absl::string_view kCollNumDevicesKey =
"Number of devices of a collective group";
absl::Status GpuHloCostAnalysis::Preprocess(const HloInstruction* hlo) {
TF_RETURN_IF_ERROR(HloCostAnalysis::Preprocess(hlo));
current_properties_[kIRSizeKey] = 1;
current_properties_[kBasicBlockSplitCountKey] =
ElementalIrEmitter::OpInvalidatesCache(hlo);
return absl::OkStatus();
}
float GpuHloCostAnalysis::ScalingRatio(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kCollAlgoScaleRatioKey, hlo_properties_);
}
int64_t GpuHloCostAnalysis::NumOfDevices(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kCollNumDevicesKey, hlo_properties_);
}
int64_t GpuHloCostAnalysis::FusionParameterReadBytes(
const HloInstruction* hlo) const {
CHECK(hlo->IsFused() && (hlo->opcode() == HloOpcode::kParameter ||
hlo->opcode() == HloOpcode::kGetTupleElement));
float utilization = hlo_properties_.at(hlo)[kUtilizationKey];
if (!options_.count_multiple_input_accesses) {
utilization = fmin(utilization, 1.0);
}
return std::llround(GetShapeSize(hlo->shape()) * utilization);
}
absl::Status GpuHloCostAnalysis::FusionCalculateUtilizations(
const HloInstruction* fusion) {
const HloInstruction* root = fusion->fused_expression_root();
std::vector<HloInstruction*> instructions =
fusion->fused_instructions_computation()->MakeInstructionPostOrder();
absl::c_reverse(instructions);
absl::flat_hash_map<const HloInstruction*, int64_t> root_ir_sizes;
for (const HloInstruction* instr : instructions) {
hlo_properties_[instr][kUtilizationKey] = 0;
hlo_properties_[instr][kIRSizeKey] = 0;
elementwise_use_roots_[instr].clear();
root_utilizations_[instr] = 0;
}
root_utilizations_[root] = 1.0;
root_ir_sizes[root] = 1;
elementwise_use_roots_[root].insert(root);
current_properties_[kFlopsKey] = 0;
current_properties_[kBasicBlockSplitCountKey] = 0;
current_properties_[kIRSizeKey] = 0;
for (const HloInstruction* instr : instructions) {
VLOG(8) << instr->name() << ":";
VLOG(9) << "Elementwise use roots:";
Properties& instr_props = hlo_properties_[instr];
for (const HloInstruction* r : elementwise_use_roots_[instr]) {
VLOG(9) << "\t" << r->name() << ": " << root_utilizations_[r];
instr_props[kUtilizationKey] += root_utilizations_[r];
instr_props[kIRSizeKey] += root_ir_sizes[r];
}
float cur_instr_utilization = instr_props[kUtilizationKey];
VLOG(8) << "Total utilization: " << cur_instr_utilization;
float cur_instr_times_emitted = instr_props[kIRSizeKey];
VLOG(8) << "Times emitted: " << cur_instr_times_emitted;
current_properties_[kFlopsKey] +=
cur_instr_utilization * instr_props[kFlopsKey];
current_properties_[kIRSizeKey] += cur_instr_times_emitted;
current_properties_[kBasicBlockSplitCountKey] +=
cur_instr_times_emitted * ElementalIrEmitter::OpInvalidatesCache(instr);
for (int operand_idx = 0; operand_idx < instr->operand_count();
++operand_idx) {
const HloInstruction* operand = instr->operand(operand_idx);
if ((instr->IsElementwise()) || instr->opcode() == HloOpcode::kTuple ||
instr->opcode() == HloOpcode::kGetTupleElement) {
for (const HloInstruction* r : elementwise_use_roots_[instr]) {
elementwise_use_roots_[operand].insert(r);
}
} else {
elementwise_use_roots_[operand].insert(operand);
float cur_operand_utilization =
cur_instr_utilization * operand_utilization(*instr, operand_idx);
int64_t operand_elements =
ShapeUtil::ElementsInRecursive(operand->shape());
if (operand_elements == 0) {
cur_operand_utilization = 0;
} else {
cur_operand_utilization =
ceil(cur_operand_utilization * operand_elements) /
operand_elements;
}
root_utilizations_[operand] += cur_operand_utilization;
root_ir_sizes[operand] += cur_instr_times_emitted;
}
}
}
return absl::OkStatus();
}
float GpuHloCostAnalysis::CommonElementwiseUtilization(
const HloInstruction* a, const HloInstruction* b) const {
float ret = 0;
for (auto r : elementwise_use_roots_.at(a)) {
if (elementwise_use_roots_.at(b).count(r)) {
ret += root_utilizations_.at(r);
}
}
return ret;
}
bool GpuHloCostAnalysis::ProducerConsumerMergedTooLarge(
const HloInstruction& producer, const HloInstruction& consumer) {
int64_t producer_replication = 1;
if (consumer.opcode() == HloOpcode::kFusion) {
producer_replication =
IrSize(*consumer.fused_parameter(consumer.operand_index(&producer)));
}
VLOG(5) << producer.name() << " would be emitted by " << consumer.name()
<< " x" << producer_replication;
int64_t n_splits = producer_replication * IrBasicBlockSplitCount(producer) +
IrBasicBlockSplitCount(consumer);
VLOG(5) << "Basic block split counts: " << IrBasicBlockSplitCount(producer)
<< ", " << IrBasicBlockSplitCount(consumer) << " -> " << n_splits;
int64_t merged_ir_size =
(IrSize(producer) * producer_replication + IrSize(consumer));
if (producer.GetModule()
->config()
.debug_options()
.xla_gpu_mlir_emitter_level() < 4) {
if (n_splits > kMaxBasicBlockSplitsPerFusion) {
return true;
}
merged_ir_size *= (1 << n_splits);
}
VLOG(5) << "IR sizes: " << IrSize(producer) << ", " << IrSize(consumer)
<< " -> " << merged_ir_size;
return merged_ir_size > kMaxIRSize;
}
absl::Status GpuHloCostAnalysis::HandleCustomCall(
const HloInstruction* custom_call) {
if (IsCublasGemm(*custom_call)) {
TF_ASSIGN_OR_RETURN(auto gpu_config,
custom_call->backend_config<gpu::GpuBackendConfig>());
const gpu::GemmBackendConfig& gemm_config =
gpu_config.gemm_backend_config();
const Shape& output_shape = custom_call->shape().IsTuple()
? custom_call->shape().tuple_shapes(0)
: custom_call->shape();
current_properties_[kFlopsKey] =
GetDotFlops(custom_call->operand(0)->shape(), output_shape,
gemm_config.dot_dimension_numbers());
return absl::OkStatus();
}
if (IsCustomCallToDnnConvolution(*custom_call)) {
current_properties_[kFlopsKey] = GetConvolutionFlops(custom_call);
if (custom_call->shape().IsTuple()) {
float output_size =
options_.shape_size(custom_call->shape().tuple_shapes(0));
current_properties_[kBytesAccessedKey] -=
current_properties_.output_bytes_accessed();
current_properties_[kBytesAccessedKey] += output_size;
current_properties_.set_output_bytes_accessed(output_size);
}
return absl::OkStatus();
}
return HloCostAnalysis::HandleCustomCall(custom_call);
}
int64_t GpuHloCostAnalysis::GetConvolutionFlops(
const HloInstruction* convolution) {
auto lhs = convolution->operand(0);
auto rhs = convolution->operand(1);
const Shape& lhs_shape = lhs->shape();
const Shape& rhs_shape = rhs->shape();
const Shape& result_shape = [&]() -> const Shape& {
const Shape& shape = convolution->shape();
if (IsCustomCallToDnnConvolution(*convolution) &&
convolution->shape().IsTuple()) {
return shape.tuple_shapes(0);
}
return shape;
}();
return HloCostAnalysis::GetConvolutionFlops(convolution, lhs_shape, rhs_shape,
result_shape);
}
int64_t FlopsPerElement(const se::DeviceDescription* device_info,
const PrimitiveType type, const HloOpcode opcode) {
auto device_profile = HloOpProfiles::Singleton().GetProfile(device_info);
constexpr int64_t kDefaultFlopsPerElement = 3;
return FindOrDefault(device_profile, std::make_pair(opcode, type),
kDefaultFlopsPerElement);
}
int64_t GetFlopsForElementwiseOp(const se::DeviceDescription* gpu_device_info,
const HloOpcode op_code, const Shape& shape) {
int64_t flop_per_element =
FlopsPerElement(gpu_device_info, shape.element_type(), op_code);
return flop_per_element * ShapeUtil::ElementsInRecursive(shape);
}
int64_t GetFlopsForElementwiseOp(const se::DeviceDescription* gpu_device_info,
const HloInstruction* instr) {
return GetFlopsForElementwiseOp(gpu_device_info, instr->opcode(),
instr->shape());
}
absl::Status GpuHloCostAnalysis::HandleAllReduce(
const HloInstruction* allreduce) {
const HloModuleConfig& config = allreduce->GetModule()->config();
TF_ASSIGN_OR_RETURN(
CollectiveOpGroupMode group_mode,
GetCollectiveOpGroupMode(
allreduce->channel_id().has_value(),
Cast<HloAllReduceInstruction>(allreduce)->use_global_device_ids()));
int64_t num_devices = config.num_partitions();
int64_t num_replicas = config.replica_count();
TF_ASSIGN_OR_RETURN(
std::vector<int64_t> participant_counts,
GetPariticipantCountsForReplicaGroups(
num_replicas, num_devices, allreduce->replica_groups(), group_mode));
int64_t num_ranks = 1;
for (auto count : participant_counts) {
num_ranks = std::max(num_ranks, count);
}
VLOG(5) << "Computing cost for " << num_ranks << " ranks in "
<< allreduce->ToString();
int64_t output_bytes_accessed = 0;
ShapeUtil::ForEachSubshape(
allreduce->shape(), [&](const Shape& subshape, const ShapeIndex&) {
if (subshape.IsArray()) {
output_bytes_accessed += GetShapeSize(subshape);
}
});
int64_t bytes_accessed = output_bytes_accessed;
for (const HloInstruction* operand : allreduce->operands()) {
bytes_accessed += GetShapeSize(operand->shape());
}
current_properties_.set_output_bytes_accessed(output_bytes_accessed);
current_properties_[kBytesAccessedKey] = bytes_accessed;
current_properties_[kCollNumDevicesKey] = num_ranks;
current_properties_[kFlopsKey] = GetFlopsForElementwiseOp(
device_info_, allreduce->to_apply()->root_instruction()->opcode(),
allreduce->shape());
int num_intra_steps = 2 * (num_ranks - 1);
float scaling_ratio = (1.0 * num_ranks) / num_intra_steps;
current_properties_[kCollAlgoScaleRatioKey] = scaling_ratio;
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleConcatenate(const HloInstruction* hlo) {
int64_t flop_per_element = 6;
int64_t dim = Cast<HloConcatenateInstruction>(hlo)->concatenate_dimension();
if (dim > 0 && hlo->operand(0)->shape().dimensions()[dim] & 31) {
flop_per_element = 400;
}
current_properties_[kFlopsKey] =
flop_per_element * ShapeUtil::ElementsInRecursive(hlo->shape());
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleReduce(const HloInstruction* hlo) {
TF_RETURN_IF_ERROR(HloCostAnalysis::HandleReduce(hlo));
const HloReduceInstruction* reduce = DynCast<HloReduceInstruction>(hlo);
auto output_shape = reduce->shape().IsArray()
? reduce->shape()
: reduce->shape().tuple_shapes(0);
int64_t output_bytes_accessed = 0;
ShapeUtil::ForEachLeafShape(
reduce->shape(), [&](const Shape& sub_shape, const ShapeIndex& index) {
output_bytes_accessed += GetShapeSize(sub_shape);
});
current_properties_.set_output_bytes_accessed(output_bytes_accessed);
int64_t bytes_accessed = output_bytes_accessed;
for (int64_t input_operand_id = 0; input_operand_id < reduce->input_count();
++input_operand_id) {
bytes_accessed +=
current_properties_.operand_bytes_accessed(input_operand_id);
}
int64_t output_shape_size = ShapeUtil::ElementsIn(output_shape);
for (int64_t init_operand_id = reduce->input_count();
init_operand_id < reduce->operand_count(); ++init_operand_id) {
auto init_operand = reduce->operand(init_operand_id);
int64_t operand_bytes_accessed =
output_shape_size * GetShapeSize(init_operand->shape());
current_properties_.set_operand_bytes_accessed(init_operand_id,
operand_bytes_accessed);
current_properties_.set_operand_utilization(init_operand_id,
output_shape_size);
bytes_accessed += operand_bytes_accessed;
}
current_properties_[kBytesAccessedKey] = bytes_accessed;
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleElementwiseOp(
const HloInstruction* hlo) {
current_properties_[kFlopsKey] = GetFlopsForElementwiseOp(device_info_, hlo);
return absl::OkStatus();
}
absl::Status GpuHloCostAnalysis::HandleElementwiseUnary(
const HloInstruction* hlo) {
return HandleElementwiseOp(hlo);
}
absl::Status GpuHloCostAnalysis::HandleElementwiseBinary(
const HloInstruction* hlo) {
return HandleElementwiseOp(hlo);
}
std::unique_ptr<HloCostAnalysis>
GpuHloCostAnalysis::CreateNestedCostAnalysis() {
return std::make_unique<GpuHloCostAnalysis>(options_, device_info_);
}
bool GpuHloCostAnalysis::KeyToCopyFromSubcomputation(
absl::string_view key) const {
return !absl::StartsWith(key, kBytesAccessedKey) &&
!absl::StartsWith(key, kUtilizationKey) &&
!absl::StartsWith(key, kIRSizeKey) &&
!absl::StartsWith(key, kBasicBlockSplitCountKey);
}
float GpuHloCostAnalysis::IrBasicBlockSplitCount(
const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kBasicBlockSplitCountKey, hlo_properties_);
}
float GpuHloCostAnalysis::IrSize(const HloInstruction& hlo) const {
return GetPropertyForHlo(hlo, kIRSizeKey, hlo_properties_);
}
}
} | #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include <cstdint>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
class GpuHloCostAnalysisTest : public HloTestBase {
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
HloCostAnalysis::Options options_{ShapeSizeBytesFunction(),
{},
true};
GpuHloCostAnalysis analysis_{options_};
GpuHloCostAnalysisTest() : HloTestBase() {}
};
TEST_F(GpuHloCostAnalysisTest, ConvCustomCall) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
ENTRY entry {
p0 = s8[128,12,24,24,4]{4,3,2,1,0} parameter(0)
p1 = s8[16,12,5,5,4]{4,3,2,1,0} parameter(1)
p2 = f32[16]{0} parameter(2)
conv1 = (s8[128,4,24,24,4]{4,3,2,1,0}, u8[0]{0}) custom-call(p0, p1, p2),
window={size=5x5 pad=2_2x2_2},
dim_labels=bf01_oi01->bf01,
custom_call_target="__cudnn$convBiasActivationForward"
ROOT tuple = tuple(conv1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloComputation* comp = module->entry_computation();
const HloInstruction* conv1 = comp->GetInstructionWithName("conv1");
int op0_size = sizeof(int8_t) * 128 * 12 * 24 * 24 * 4;
int op1_size = sizeof(int8_t) * 16 * 12 * 5 * 5 * 4;
int op2_size = sizeof(float) * 16;
int out_size = sizeof(int8_t) * 128 * 4 * 24 * 24 * 4;
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 0), op0_size);
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 1), op1_size);
EXPECT_EQ(analysis_.operand_bytes_accessed(*conv1, 2), op2_size);
EXPECT_EQ(analysis_.output_bytes_accessed(*conv1), out_size);
EXPECT_EQ(analysis_.bytes_accessed(*conv1),
op0_size + op1_size + op2_size + out_size);
EXPECT_EQ(analysis_.flop_count(*conv1), 159694848);
}
TEST_F(GpuHloCostAnalysisTest, ReduceWindowWithOverlapsRepeatedReads) {
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
add {
a0 = f32[] parameter(0)
a1 = f32[] parameter(1)
ROOT _ = f32[] add(a0, a1)
}
ENTRY entry {
p0 = f32[8,8] parameter(0)
c0 = f32[] constant(0)
ROOT _ = f32[3,4] reduce-window(p0, c0), window={size=4x5 stride=2x1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
int n_output_elements = 3 * 4;
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.flop_count(), 3 * n_output_elements * (4 * 5 - 1));
EXPECT_EQ(analysis_.bytes_accessed(),
sizeof(float) * (8 * 8 + 1 + n_output_elements));
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0),
sizeof(float) * n_output_elements * 4 * 5);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 1), sizeof(float) * 1);
EXPECT_EQ(analysis_.output_bytes_accessed(*root),
sizeof(float) * n_output_elements);
}
TEST_F(GpuHloCostAnalysisTest, BroadcastWithRepeats) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[] parameter(0)
c1 = s8[] constant(0)
a1 = s8[] add(p1, c1)
b1 = s8[10000] broadcast(a1), dimensions={}
b2 = s8[10000] broadcast(c1), dimensions={}
ROOT r1 = s8[10000] add(b1, b2)
}
ENTRY e {
p0 = s8[] parameter(0)
ROOT r0 = s8[10000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 10000);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 10000);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2 * 10000);
EXPECT_EQ(analysis_.bytes_accessed(), 2 * 10000);
}
TEST_F(GpuHloCostAnalysisTest, WithoutRepeats) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[] parameter(0)
a1 = s8[] add(p1, p1)
b1 = s8[10000] broadcast(a1), dimensions={}
a2 = s8[10000] add(b1, b1)
slice1 = s8[8000] slice(a2), slice={[0:8000]}
slice2 = s8[8000] slice(a2), slice={[2000:10000]}
c = s8[10000] constant({...})
slicec1 = s8[8000] slice(c), slice={[0:8000]}
slicec2 = s8[8000] slice(c), slice={[2000:10000]}
a3 = s8[8000] add(slice1, slice2)
a4 = s8[8000] add(slicec1, slicec2)
ROOT a5 = s8[8000] add(a3, a4)
}
ENTRY e {
p0 = s8[] parameter(0)
ROOT r0 = s8[8000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
options_.count_multiple_input_accesses = false;
GpuHloCostAnalysis analysis{options_};
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis));
EXPECT_EQ(analysis.output_bytes_accessed(*root), 8000);
EXPECT_EQ(analysis.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis.bytes_accessed(*root), 1 + 8000 + 10000);
EXPECT_EQ(analysis.bytes_accessed(), 1 + 8000 + 10000);
}
TEST_F(GpuHloCostAnalysisTest, BroadcastFlops) {
absl::string_view hlo_string = R"(
HloModule m
f {
i0 = f32[1024] iota(), iota_dimension=0
m0 = f32[1024] add(i0, i0)
s0 = f32[1024] multiply(m0, m0)
b0 = f32[1024,1024] broadcast(s0), dimensions={0}
ROOT r0 = f32[1024,1024] negate(b0)
}
ENTRY e {
ROOT r = f32[1024,1024] fusion(), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto n_elements = 1024 * 1024;
EXPECT_EQ(analysis_.output_bytes_accessed(*root), n_elements * 4);
EXPECT_EQ(analysis_.bytes_accessed(*root), n_elements * 4);
EXPECT_EQ(analysis_.bytes_accessed(), n_elements * 4);
EXPECT_EQ(analysis_.flop_count(), n_elements * 3 * 3);
EXPECT_EQ(analysis_.IrSize(*root), 5);
}
TEST_F(GpuHloCostAnalysisTest, Slice) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[100000000] parameter(0)
i1 = s8[100000000] iota(), iota_dimension=0
a1 = s8[100000000] add(p1, i1)
ROOT r1 = s8[1] slice(a1), slice={[0:1]}
}
ENTRY e {
p0 = s8[100000000] parameter(0)
ROOT r0 = s8[1] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2);
EXPECT_EQ(analysis_.bytes_accessed(), 2);
EXPECT_EQ(analysis_.IrSize(*root), 4);
}
TEST_F(GpuHloCostAnalysisTest, TwoSlices) {
absl::string_view hlo_string = R"(
HloModule m
f {
p1 = s8[100] parameter(0)
i1 = s8[100] iota(), iota_dimension=0
a1 = s8[100] add(p1, i1)
slice1 = s8[1] slice(a1), slice={[0:1]}
slice2 = s8[1] slice(a1), slice={[3:4]}
ROOT r = s8[1] add(slice1, slice2)
}
ENTRY e {
p0 = s8[100] parameter(0)
ROOT r0 = s8[1] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
const HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 2);
EXPECT_EQ(analysis_.bytes_accessed(*root), 3);
EXPECT_EQ(analysis_.bytes_accessed(), 3);
EXPECT_EQ(analysis_.IrSize(*root), 9);
}
TEST_F(GpuHloCostAnalysisTest, MultipleTrivialUsers) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[] parameter(0)
m0 = s8[] multiply(p0, p0)
n0 = s8[] negate(p0)
ROOT a0 = s8[] add(m0, n0)
}
ENTRY e {
param0 = s8[] parameter(0)
ROOT r0 = s8[] fusion(param0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1);
EXPECT_EQ(analysis_.bytes_accessed(*root), 1 + 1);
EXPECT_EQ(analysis_.bytes_accessed(), 1 + 1);
EXPECT_EQ(analysis_.IrSize(*root), 4);
}
TEST_F(GpuHloCostAnalysisTest, MixedUsers) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
n0 = s8[10] negate(p0)
m0 = s8[10] multiply(n0, n0)
a0 = s8[10] add(n0, n0)
s0 = s8[5] slice(a0), slice={[0:5]}
s1 = s8[2] slice(n0), slice={[4:6]}
n1 = s8[2] negate(s1)
ROOT c0 = s8[17] concatenate(s0, m0, n1), dimensions={0}
}
ENTRY e {
param0 = s8[10] parameter(0)
ROOT r0 = s8[17] fusion(param0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 17);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 17);
EXPECT_EQ(analysis_.bytes_accessed(*root), 17 + 17);
EXPECT_EQ(analysis_.bytes_accessed(), 17 + 17);
EXPECT_EQ(analysis_.IrSize(*root->fused_parameter(0)), 3);
EXPECT_EQ(analysis_.IrSize(*root->fused_parameter(0)),
analysis_.IrSize(*root->fused_parameter(0)->users()[0]));
EXPECT_EQ(analysis_.IrSize(*root), 12);
}
TEST_F(GpuHloCostAnalysisTest, FractionalUseRoundingUp) {
absl::string_view hlo_string = R"(
HloModule m
add_s8 {
lhs = s8[] parameter(0)
rhs = s8[] parameter(1)
ROOT add = s8[] add(lhs, rhs)
}
f {
p0 = s8[] parameter(0)
b0 = s8[10] broadcast(p0), dimensions={}
c0 = s8[] constant(0)
r0 = s8[] reduce(b0, c0), dimensions={0}, to_apply=add_s8
bitcast0 = s8[1] bitcast(r0)
i0 = s8[5] iota(), iota_dimension=0
cat0 = s8[6] concatenate(bitcast0, i0), dimensions={0}
p1 = s32[] parameter(1)
ROOT s0 = s8[2] dynamic-slice(cat0, p1), dynamic_slice_sizes={2}
}
ENTRY e {
p0 = s8[] parameter(0)
p1 = s32[] parameter(1)
ROOT r = s8[2] fusion(p0, p1), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 2);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 10);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 1), 4);
EXPECT_EQ(analysis_.bytes_accessed(*root), 2 + 10 + 4);
EXPECT_EQ(analysis_.bytes_accessed(), 2 + 10 + 4);
}
TEST_F(GpuHloCostAnalysisTest, LargeConstant) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = s8[1000] parameter(0)
c0 = s8[1000] constant({...})
ROOT a0 = s8[1000] add(p0, c0)
}
ENTRY e {
p0 = s8[1000] parameter(0)
ROOT r = s8[1000] fusion(p0), kind=kInput, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
EXPECT_EQ(analysis_.output_bytes_accessed(*root), 1000);
EXPECT_EQ(analysis_.operand_bytes_accessed(*root, 0), 1000);
EXPECT_EQ(analysis_.bytes_accessed(*root), 3000);
EXPECT_EQ(analysis_.bytes_accessed(), 3000);
EXPECT_EQ(analysis_.IrSize(*root), 3);
}
TEST_F(GpuHloCostAnalysisTest, DynUpdateSliceUsingOperandData) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
to_update = s8[3,1,1,1] parameter(0)
update = s8[1,1,1,1] constant(0)
a = s32[] constant(0)
dus = s8[3,1,1,1] dynamic-update-slice(to_update, update, a, a, a, a)
ROOT _ = s8[3,1,1,1] negate(dus)
}
ENTRY _ {
to_update = s8[3,1,1,1] parameter(0)
ROOT _ = s8[3,1,1,1] fusion(to_update), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
ASSERT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_EQ(analysis_.operand_bytes_accessed(*fusion, 0), 3 - 1);
EXPECT_EQ(analysis_.output_bytes_accessed(*fusion), 3);
}
TEST_F(GpuHloCostAnalysisTest, DynUpdateSliceNotUsingOperandData) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
to_update = s8[3,1,1,1] parameter(0)
update = s8[1,1,1,1] constant(0)
a = s32[] constant(0)
ROOT dus = s8[3,1,1,1] dynamic-update-slice(to_update, update, a, a, a, a)
}
ENTRY _ {
to_update = s8[3,1,1,1] parameter(0)
ROOT _ = s8[3,1,1,1] fusion(to_update), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
ASSERT_EQ(fusion->opcode(), HloOpcode::kFusion);
EXPECT_EQ(analysis_.operand_bytes_accessed(*fusion, 0), 0);
EXPECT_EQ(analysis_.output_bytes_accessed(*fusion), 1);
}
TEST_F(GpuHloCostAnalysisTest, CommonElementwiseUseTwoParameters) {
const char* hlo_fusion_module_str = R"(
HloModule m
add {
p0 = s8[] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = s8[] add(p0, p1)
}
f {
p0 = s8[10] parameter(0)
p1 = s8[10] parameter(1)
a = s8[10] add(p0, p1)
c0 = s8[] constant(0)
r0 = s8[] reduce(a, c0), dimensions={0}, to_apply=add
c1 = s8[] constant(100)
r1 = s8[] reduce(a, c1), dimensions={0}, to_apply=add
ROOT _ = s8[] add(r0, r1)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[10] parameter(1)
ROOT _ = s8[] fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(fusion->fused_parameter(0),
fusion->fused_parameter(1)),
2.f);
}
TEST_F(GpuHloCostAnalysisTest, CommonElementwiseUseParameterAndRoot) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
p1b = s8[10] broadcast(p1)
a = s8[10] add(p0, p1b)
ROOT _ = s8[10] negate(a)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = s8[10] fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(0), fusion->fused_expression_root()),
1.f);
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(1), fusion->fused_expression_root()),
0.f);
}
TEST_F(GpuHloCostAnalysisTest,
CommonElementwiseUseParameterAndRootMultiOutputFusion) {
const char* hlo_fusion_module_str = R"(
HloModule m
f {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
p1b = s8[10] broadcast(p1)
a = s8[10] add(p0, p1b)
neg = s8[10] negate(a)
ROOT _ = (s8[10], s8[10]) tuple(a, neg)
}
ENTRY _ {
p0 = s8[10] parameter(0)
p1 = s8[] parameter(1)
ROOT _ = (s8[10], s8[10]) fusion(p0, p1), kind=kLoop, calls=f
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_fusion_module_str));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
HloInstruction* fusion = module->entry_computation()->root_instruction();
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(0), fusion->fused_expression_root()),
1.f);
EXPECT_EQ(analysis_.CommonElementwiseUtilization(
fusion->fused_parameter(1), fusion->fused_expression_root()),
0.f);
}
TEST_F(GpuHloCostAnalysisTest, Reduce) {
absl::string_view hlo_string = R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add.0 = f32[] add(param_0, param_1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(param_0.3, constant), dimensions={1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
int64_t input_bytes_accessed = 4 * 32 * 40;
int64_t init_bytes_accessed = 4 * 32;
int64_t output_bytes_accessed = 4 * 32;
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 0), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 1), init_bytes_accessed);
EXPECT_EQ(analysis_.output_bytes_accessed(*reduce), output_bytes_accessed);
EXPECT_EQ(analysis_.bytes_accessed(*reduce),
input_bytes_accessed + init_bytes_accessed + output_bytes_accessed);
EXPECT_EQ(analysis_.flop_count(*reduce), 32 * 39 * 3);
}
TEST_F(GpuHloCostAnalysisTest, VariadicReduce) {
absl::string_view hlo_string = R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add.0 = f32[] add(param_0, param_2)
add.1 = f32[] add(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
param_1.3 = f32[32,40]{1,0} parameter(1)
param_2.2 = f32[] parameter(2)
constant = f32[] constant(0)
ROOT reduce = (f32[32]{0}, f32[32]{0}) reduce(param_0.3, param_1.3, param_2.2, constant), dimensions={1}, to_apply=add
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
const HloInstruction* reduce =
module->entry_computation()->root_instruction();
int64_t input_bytes_accessed = 4 * 32 * 40;
int64_t init_bytes_accessed = 4 * 32;
int64_t output_bytes_accessed = 2 * 4 * 32;
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 0), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 1), input_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 2), init_bytes_accessed);
EXPECT_EQ(analysis_.operand_bytes_accessed(*reduce, 3), init_bytes_accessed);
EXPECT_EQ(analysis_.output_bytes_accessed(*reduce), output_bytes_accessed);
EXPECT_EQ(analysis_.bytes_accessed(*reduce), 2 * input_bytes_accessed +
2 * init_bytes_accessed +
output_bytes_accessed);
EXPECT_EQ(analysis_.flop_count(*reduce), 32 * 39 * 6);
}
}
} | 2,150 |
#ifndef XLA_SERVICE_GPU_MODEL_TILED_HLO_INSTRUCTION_H_
#define XLA_SERVICE_GPU_MODEL_TILED_HLO_INSTRUCTION_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
class TiledHloInstruction {
public:
struct PtrHash {
size_t operator()(const TiledHloInstruction* tiled_hlo) const;
};
struct PtrEqual {
bool operator()(const TiledHloInstruction* lhs,
const TiledHloInstruction* rhs) const;
};
static absl::StatusOr<std::unique_ptr<TiledHloInstruction>> Create(
const HloInstruction* hlo, std::vector<int64_t> tile_sizes,
std::vector<int64_t> tile_strides,
IndexingMap block_id_to_tile_offsets_indexing);
const HloInstruction* hlo() const { return hlo_; }
const std::vector<int64_t>& tile_sizes() const { return tile_sizes_; }
const std::vector<int64_t>& tile_strides() const { return tile_strides_; }
const IndexingMap& block_id_to_tile_offsets_indexing() const {
return block_id_to_tile_offsets_indexing_;
}
const TiledHloInstruction* operand(int64_t operand_id) const {
return operands_[operand_id];
}
const std::vector<TiledHloInstruction*>& operands() const {
return operands_;
}
void AppendOperand(TiledHloInstruction* operand) {
operands_.push_back(operand);
}
std::string ToString() const;
template <typename Sink>
friend void AbslStringify(Sink& sink, const TiledHloInstruction& tiled_hlo) {
sink.Append(tiled_hlo.ToString());
}
private:
TiledHloInstruction(const HloInstruction* hlo,
std::vector<int64_t> tile_sizes,
std::vector<int64_t> tile_strides,
IndexingMap block_id_to_tile_offsets_indexing)
: hlo_(hlo),
tile_sizes_(std::move(tile_sizes)),
tile_strides_(std::move(tile_strides)),
block_id_to_tile_offsets_indexing_(
std::move(block_id_to_tile_offsets_indexing)) {}
const HloInstruction* hlo_;
std::vector<int64_t> tile_sizes_;
std::vector<int64_t> tile_strides_;
IndexingMap block_id_to_tile_offsets_indexing_;
std::vector<TiledHloInstruction*> operands_;
};
bool operator==(const TiledHloInstruction& lhs, const TiledHloInstruction& rhs);
bool operator!=(const TiledHloInstruction& lhs, const TiledHloInstruction& rhs);
template <typename H>
H AbslHashValue(H h, const TiledHloInstruction& tiled_hlo_instruction) {
return H::combine(std::move(h), tiled_hlo_instruction.hlo(),
tiled_hlo_instruction.tile_sizes(),
tiled_hlo_instruction.tile_strides(),
tiled_hlo_instruction.block_id_to_tile_offsets_indexing());
}
}
}
#endif
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/hash/hash.h"
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/util.h"
namespace xla {
namespace gpu {
size_t TiledHloInstruction::PtrHash::operator()(
const TiledHloInstruction* tiled_hlo) const {
return absl::HashOf(*tiled_hlo);
}
bool TiledHloInstruction::PtrEqual::operator()(
const TiledHloInstruction* lhs, const TiledHloInstruction* rhs) const {
return *lhs == *rhs;
}
bool operator==(const TiledHloInstruction& lhs,
const TiledHloInstruction& rhs) {
return lhs.hlo() == rhs.hlo() && lhs.tile_sizes() == rhs.tile_sizes() &&
lhs.tile_strides() == rhs.tile_strides() &&
lhs.block_id_to_tile_offsets_indexing() ==
rhs.block_id_to_tile_offsets_indexing();
}
bool operator!=(const TiledHloInstruction& lhs,
const TiledHloInstruction& rhs) {
return !(lhs == rhs);
}
absl::StatusOr<std::unique_ptr<TiledHloInstruction>>
TiledHloInstruction::Create(const HloInstruction* hlo,
std::vector<int64_t> tile_sizes,
std::vector<int64_t> tile_strides,
IndexingMap block_id_to_tile_offsets_indexing) {
int rank = hlo->shape().rank();
if (tile_sizes.size() != rank) {
return absl::InvalidArgumentError(
absl::StrCat("Number of tile sizes must be equal to the rank of the "
"hlo shape. tile_sizes = ",
tile_sizes.size(), ", hlo = ", hlo->ToString()));
}
if (tile_strides.size() != rank) {
return absl::InvalidArgumentError(
absl::StrCat("Number of tile strides must be equal to the rank of the "
"hlo shape. tile_sizes = ",
tile_strides.size(), ", hlo = ", hlo->ToString()));
}
if (block_id_to_tile_offsets_indexing.GetDimensionCount() != 1 ||
block_id_to_tile_offsets_indexing.GetSymbolCount() != 0) {
return absl::InvalidArgumentError(absl::StrCat(
"block_id_to_tile_offsets_indexing must have 1 dim and 0 symbols. "
"block_id_to_tile_offsets_indexing = ",
block_id_to_tile_offsets_indexing.ToString()));
}
if (block_id_to_tile_offsets_indexing.GetAffineMap().getNumResults() !=
rank) {
return absl::InvalidArgumentError(absl::StrCat(
"block_id_to_tile_offsets_indexing must have the same number of "
"results as the rank of the hlo shape. "
"block_id_to_tile_offsets_indexing = ",
block_id_to_tile_offsets_indexing.ToString(),
", hlo = ", hlo->ToString()));
}
return absl::WrapUnique(new TiledHloInstruction(
hlo, std::move(tile_sizes), std::move(tile_strides),
std::move(block_id_to_tile_offsets_indexing)));
}
std::string TiledHloInstruction::ToString() const {
std::stringstream ss;
ss << "\thlo: " << hlo_->ToString() << "\n";
ss << "\ttile_sizes: (" << absl::StrJoin(tile_sizes_, ", ") << ")\n";
ss << "\ttile_strides: (" << absl::StrJoin(tile_strides_, ", ") << ")\n";
ss << "\tblock_id_to_tile_offsets_indexing: "
<< block_id_to_tile_offsets_indexing_;
return ss.str();
}
}
} | #include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class TiledHloInstructionTest : public HloTestBase {
public:
mlir::MLIRContext mlir_context_;
};
TEST_F(TiledHloInstructionTest, PtrHashAndPtrEqualWorkCorrectly) {
std::unique_ptr<HloInstruction> hlo = HloInstruction::CreateParameter(
0,
ShapeUtil::MakeShape(PrimitiveType::F32, {32, 64}), "p0");
IndexingMap block_id_to_tile_offsets_indexing = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0 floordiv 16, (d0 mod 16) * 16)",
&mlir_context_),
{8},
{});
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<TiledHloInstruction> tiled_hlo1,
TiledHloInstruction::Create(hlo.get(), {16, 16},
{1, 1},
block_id_to_tile_offsets_indexing));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<TiledHloInstruction> tiled_hlo2,
TiledHloInstruction::Create(hlo.get(), {16, 16},
{1, 1},
block_id_to_tile_offsets_indexing));
TF_ASSERT_OK_AND_ASSIGN(
std::unique_ptr<TiledHloInstruction> tiled_hlo3,
TiledHloInstruction::Create(hlo.get(), {16, 32},
{1, 1},
block_id_to_tile_offsets_indexing));
EXPECT_EQ(*tiled_hlo1, *tiled_hlo2);
EXPECT_NE(*tiled_hlo1, *tiled_hlo3);
absl::flat_hash_set<TiledHloInstruction*, TiledHloInstruction::PtrHash,
TiledHloInstruction::PtrEqual>
tiled_hlo_set = {tiled_hlo1.get(), tiled_hlo2.get(), tiled_hlo3.get()};
EXPECT_EQ(tiled_hlo_set.size(), 2);
}
TEST_F(TiledHloInstructionTest, TileSizesAndStridesShouldMatchHloShapeRank) {
std::unique_ptr<HloInstruction> hlo = HloInstruction::CreateParameter(
0,
ShapeUtil::MakeShape(PrimitiveType::F32, {32, 64}), "p0");
IndexingMap block_id_to_tile_offsets_indexing = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0 floordiv 16, (d0 mod 16) * 16)",
&mlir_context_),
{8},
{});
EXPECT_THAT(
TiledHloInstruction::Create(hlo.get(), {16},
{1, 1},
block_id_to_tile_offsets_indexing)
.status()
.message(),
::testing::HasSubstr("Number of tile sizes must be equal to the rank"));
EXPECT_THAT(
TiledHloInstruction::Create(hlo.get(), {16, 16},
{1, 1, 1},
block_id_to_tile_offsets_indexing)
.status()
.message(),
::testing::HasSubstr("Number of tile strides must be equal to the rank"));
}
TEST_F(TiledHloInstructionTest,
ShouldReturnErrorIfBlockIdToTileOffsetsIndexingIsInvalid) {
std::unique_ptr<HloInstruction> hlo = HloInstruction::CreateParameter(
0,
ShapeUtil::MakeShape(PrimitiveType::F32, {32, 64}), "p0");
IndexingMap block_id_to_tile_offsets_indexing1 = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0) -> (d0 floordiv 16)", &mlir_context_),
{8},
{});
EXPECT_THAT(
TiledHloInstruction::Create(hlo.get(), {16, 16},
{1, 1},
block_id_to_tile_offsets_indexing1)
.status()
.message(),
::testing::HasSubstr(
"must have the same number of results as the rank of the hlo shape"));
IndexingMap block_id_to_tile_offsets_indexing2 = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0)[s0] -> (d0 + s0, d0 floordiv 16)", &mlir_context_),
{8},
{8});
EXPECT_THAT(TiledHloInstruction::Create(hlo.get(), {16, 16},
{1, 1},
block_id_to_tile_offsets_indexing2)
.status()
.message(),
::testing::HasSubstr("must have 1 dim and 0 symbols"));
}
}
}
} | 2,151 |
#ifndef XLA_SERVICE_GPU_MODEL_GPU_INDEXING_PERFORMANCE_MODEL_H_
#define XLA_SERVICE_GPU_MODEL_GPU_INDEXING_PERFORMANCE_MODEL_H_
#include <cstddef>
#include <cstdint>
#include <variant>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/gpu/model/hlo_op_profiles.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/stream_executor/device_description.h"
namespace xla {
namespace gpu {
struct TiledRunTimeData {
EstimateRunTimeData runtime_data;
BlockLevelParameters block_level_parameters;
};
using TiledRunTimeDataOrError = std::variant<TiledRunTimeData, FusionDecision>;
class GpuPerformanceModelWithIndexingAnalysis : public GpuPerformanceModelBase {
public:
explicit GpuPerformanceModelWithIndexingAnalysis(
const se::DeviceDescription* device_info,
HloFusionAnalysisCache* fusion_analysis_cache,
HloCostAnalysis::ShapeSizeFunction shape_size,
mlir::MLIRContext* mlir_context)
: hlo_op_profile_(&HloOpProfiles::Singleton().GetProfile(device_info)),
device_info_(device_info),
fusion_analysis_cache_(fusion_analysis_cache),
shape_size_(shape_size),
mlir_context_(mlir_context) {}
EstimateRunTimeData EstimateRunTimeForFusion(
const HloFusionAnalysis& fusion_analysis, bool is_coalesced = true);
EstimateRunTimeData EstimateRunTimeForInstruction(
const HloInstruction* producer);
EstimateRunTimeData EstimateRunTimeForProducerConsumer(
const HloInstruction* producer, const HloInstruction* consumer);
RunTimes EstimateRunTimes(
const HloInstruction* producer,
absl::Span<const HloInstruction* const> fused_consumers = {});
EstimateRunTimeData EstimateRunTimeForTiledHloComputation(
const HloFusionAdaptor& fusion_adaptor,
const TiledHloComputation& tiled_hlo_computation,
const LaunchDimensions& launch_dimensions);
absl::StatusOr<EstimateRunTimeData> EstimateRunTimeForTiledFusion(
const HloFusionAdaptor& fusion_adaptor,
const LaunchDimensions& launch_dimensions,
absl::Span<const int64_t> output_tile_sizes);
absl::StatusOr<EstimateRunTimeData> EstimateRunTimeForTriton(
const HloInstruction* producer, const HloInstruction* consumer = nullptr);
absl::StatusOr<TiledRunTimeDataOrError> TryFindBestTilingForFusion(
const HloFusionAdaptor& fusion_adaptor);
private:
int64_t FlopsPerElement(const HloInstruction* instr) const;
int64_t GetShapeSizeRecursive(const Shape& shape) const;
const HloOpProfiles::HloOpProfile* hlo_op_profile_;
const se::DeviceDescription* device_info_;
HloFusionAnalysisCache* fusion_analysis_cache_;
HloCostAnalysis::ShapeSizeFunction shape_size_;
mlir::MLIRContext* mlir_context_;
};
}
}
#endif
#include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/fusions/triton.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/coalescing_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/instruction_fusion.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
int64_t GpuPerformanceModelWithIndexingAnalysis::FlopsPerElement(
const HloInstruction* instr) const {
GpuHloCostAnalysis::Options cost_analysis_options{
shape_size_,
{},
true};
GpuHloCostAnalysis cost_analysis(cost_analysis_options, device_info_);
TF_CHECK_OK(
cost_analysis.RevisitInstruction(const_cast<HloInstruction*>(instr)));
int64_t num_elements = [&] {
if (instr->opcode() == HloOpcode::kReduce && instr->shape().IsTuple()) {
return ShapeUtil::ElementsInRecursive(instr->shape().tuple_shapes(0));
}
return ShapeUtil::ElementsInRecursive(instr->shape());
}();
return cost_analysis.flop_count(*instr) / num_elements;
}
int64_t GpuPerformanceModelWithIndexingAnalysis::GetShapeSizeRecursive(
const Shape& shape) const {
CHECK(shape.IsArray() || shape.IsTuple());
if (shape.IsArray()) {
return shape_size_(shape);
}
int64_t total_size = 0;
for (const auto& element_shape : shape.tuple_shapes()) {
total_size += GetShapeSizeRecursive(element_shape);
}
return total_size;
}
int64_t GetIterationSpaceSize(const IndexingMap& indexing_map,
const HloInstruction* instr) {
if (indexing_map.IsUndefined()) {
return ShapeUtil::ElementsInRecursive(instr->shape());
}
if (indexing_map.IsKnownEmpty()) {
return 0;
}
auto get_ranges_iteration_space_size =
[](const std::vector<Interval>& ranges) {
int64_t num_iters = 1;
for (const Interval& range : ranges) {
num_iters *= range.upper - range.lower + 1;
}
return num_iters;
};
return get_ranges_iteration_space_size(indexing_map.GetSymbolBounds()) *
get_ranges_iteration_space_size(indexing_map.GetDimensionBounds());
}
EstimateRunTimeData
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForFusion(
const HloFusionAnalysis& fusion_analysis, bool is_coalesced) {
auto& fusion_adaptor = fusion_analysis.fusion();
VLOG(5) << "EstimateRunTimeForFusion: " << fusion_adaptor.ToString();
auto roots = fusion_adaptor.GetRoots();
CHECK_EQ(roots.size(), 1)
<< "Indexing cost model doesn't support multi-output fusions.";
auto root_shape = roots.front().shape();
LaunchDimensions launch_dimensions =
EstimateFusionLaunchDimensions(fusion_analysis);
int64_t num_blocks = launch_dimensions.num_blocks();
auto grouped_fusion_indexing = ComputeGroupedOutputToInputIndexing(
fusion_adaptor, roots[0], mlir_context_);
int64_t flops = 0;
int64_t bytes_read = 0;
absl::Duration read_time = absl::ZeroDuration();
for (const auto& [instr, indexing_maps] : grouped_fusion_indexing) {
VLOG(10) << "instr: " << instr->name();
bool is_operand = !fusion_adaptor.ContainsInstruction(instr);
auto element_type = instr->shape().element_type();
int64_t n_bytes_total = 0;
for (const auto& indexing_map : indexing_maps) {
VLOG(10) << indexing_map.ToString();
int64_t num_iters = GetIterationSpaceSize(indexing_map, instr);
if (is_operand) {
int64_t type_size = ShapeUtil::ByteSizeOfPrimitiveType(element_type);
n_bytes_total += type_size * num_iters;
} else {
int64_t flops_per_element = FlopsPerElement(instr);
flops += flops_per_element * num_iters;
}
}
if (is_operand) {
int64_t operand_size = shape_size_(instr->shape());
int64_t n_bytes_net = std::min(operand_size, n_bytes_total);
bytes_read += n_bytes_total;
VLogOperandRead(instr, n_bytes_total, n_bytes_net, is_coalesced);
read_time +=
ReadTimeWithDRAMHeuristic(*device_info_, num_blocks, n_bytes_net,
n_bytes_total, element_type, is_coalesced);
}
}
int64_t bytes_written = GetShapeSizeRecursive(root_shape);
absl::Duration compute_time =
ComputeTime(*device_info_, flops, num_blocks,
launch_dimensions.num_threads_per_block());
absl::Duration write_time = WriteTime(*device_info_, bytes_written);
absl::Duration memory_access_time = read_time + write_time;
absl::Duration exec_time = CombineComputeAndMemoryAccessTime(
compute_time, memory_access_time,
GpuPerformanceModelOptions::PriorityFusion());
EstimateRunTimeData runtime_data = {flops, bytes_read, bytes_written,
read_time, write_time, compute_time,
exec_time};
VLOG(3) << "Runtime data for HLO fusion: " << fusion_adaptor.ToString()
<< "\n"
<< launch_dimensions.ToString() << "\n"
<< runtime_data.ToString();
return runtime_data;
}
EstimateRunTimeData
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForInstruction(
const HloInstruction* producer) {
if (producer->opcode() == HloOpcode::kBitcast) {
return EstimateRunTimeData{0,
0,
0,
absl::ZeroDuration(),
absl::ZeroDuration(),
absl::ZeroDuration(),
absl::ZeroDuration()};
}
auto fusion_analysis = AnalyzeFusion(*producer, *device_info_);
bool is_coalesced = IsReadCoalescedHeuristic(
fusion_analysis.GetEmitterFusionKind(), producer);
return EstimateRunTimeForFusion(fusion_analysis, is_coalesced);
}
EstimateRunTimeData
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForProducerConsumer(
const HloInstruction* producer, const HloInstruction* consumer) {
auto fusion_analysis =
AnalyzeProducerConsumerFusion(*producer, *consumer, *device_info_);
bool is_coalesced = IsReadCoalescedHeuristic(
fusion_analysis.GetEmitterFusionKind(), producer, consumer);
return EstimateRunTimeForFusion(fusion_analysis, is_coalesced);
}
GpuPerformanceModelWithIndexingAnalysis::RunTimes
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimes(
const HloInstruction* producer,
absl::Span<const HloInstruction* const> fused_consumers) {
auto producer_runtime = EstimateRunTimeForInstruction(producer);
absl::Duration time_unfused =
kKernelLaunchOverhead * (fused_consumers.size() + 1) +
producer_runtime.exec_time;
absl::Duration time_fused = kKernelLaunchOverhead * fused_consumers.size();
for (const auto& consumer : fused_consumers) {
time_unfused += EstimateRunTimeForInstruction(consumer).exec_time;
time_fused +=
EstimateRunTimeForProducerConsumer(producer, consumer).exec_time;
}
return {time_unfused, time_fused};
}
EstimateRunTimeData
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTiledHloComputation(
const HloFusionAdaptor& fusion_adaptor,
const TiledHloComputation& tiled_hlo_computation,
const LaunchDimensions& launch_dimensions) {
absl::flat_hash_map<const HloInstruction*, int64_t> n_bytes_total_map;
int64_t flops = 0;
int64_t bytes_read = 0;
for (const auto& tiled_hlo : tiled_hlo_computation.instructions()) {
int64_t num_blocks = tiled_hlo->block_id_to_tile_offsets_indexing()
.GetDimensionBound(0)
.GetLoopTripCount();
int64_t num_elements = num_blocks * Product(tiled_hlo->tile_sizes());
const HloInstruction* hlo = tiled_hlo->hlo();
if (fusion_adaptor.ContainsInstruction(hlo)) {
flops += FlopsPerElement(hlo) * num_elements;
} else {
int64_t element_type_size =
ShapeUtil::ByteSizeOfPrimitiveType(hlo->shape().element_type());
int64_t tile_bytes_read = element_type_size * num_elements;
bytes_read += tile_bytes_read;
n_bytes_total_map[hlo] += tile_bytes_read;
}
}
int64_t num_blocks = launch_dimensions.num_blocks();
absl::Duration read_time = absl::ZeroDuration();
for (const auto& [hlo, n_bytes_total] : n_bytes_total_map) {
int64_t operand_size = shape_size_(hlo->shape());
int64_t n_bytes_net = std::min(operand_size, n_bytes_total);
read_time += ReadTimeWithDRAMHeuristic(
*device_info_, num_blocks, n_bytes_net, n_bytes_total,
hlo->shape().element_type(),
true);
}
int64_t bytes_written =
GetShapeSizeRecursive(tiled_hlo_computation.GetRoot()->hlo()->shape());
absl::Duration compute_time =
ComputeTime(*device_info_, flops, launch_dimensions.num_blocks(),
launch_dimensions.num_threads_per_block());
absl::Duration write_time = WriteTime(*device_info_, bytes_written);
absl::Duration memory_access_time = read_time + write_time;
absl::Duration exec_time = CombineComputeAndMemoryAccessTime(
compute_time, memory_access_time,
GpuPerformanceModelOptions::PriorityFusion());
return EstimateRunTimeData{flops,
bytes_read,
bytes_written,
read_time,
write_time,
compute_time,
exec_time};
}
absl::StatusOr<EstimateRunTimeData>
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTiledFusion(
const HloFusionAdaptor& fusion_adaptor,
const LaunchDimensions& launch_dimensions,
absl::Span<const int64_t> tile_sizes) {
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeFusion(fusion_adaptor, mlir_context_);
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&analysis_or_error)) {
return absl::FailedPreconditionError(absl::StrCat(
"SymbolicTileAnalysis failed. ", fusion_decision->Explain()));
}
SymbolicTileAnalysis analysis =
std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
TF_ASSIGN_OR_RETURN(TiledHloComputation tiled_hlo_computation,
analysis.ComputeTiledHloInstructions(tile_sizes));
return EstimateRunTimeForTiledHloComputation(
fusion_adaptor, tiled_hlo_computation, launch_dimensions);
}
absl::StatusOr<EstimateRunTimeData>
GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTriton(
const HloInstruction* producer, const HloInstruction* consumer) {
const auto& fusion_analysis =
(consumer == nullptr) ? fusion_analysis_cache_->Get(*producer)
: fusion_analysis_cache_->Get(*producer, *consumer);
auto launch_config = TritonFusion(fusion_analysis).launch_config();
if (!launch_config.has_value()) {
return absl::InvalidArgumentError(
"Could not get launch config for Triton fusion.");
}
return EstimateRunTimeForTiledFusion(
fusion_analysis.fusion(), launch_config->launch_dimensions,
launch_config->block_level_parameters.output_tile_sizes);
}
int64_t GetNumWarps(int64_t tile_size) {
if (tile_size <= 512) return 1;
if (tile_size <= 1024) return 2;
if (tile_size <= 16384) return 4;
if (tile_size <= 32768) return 8;
if (tile_size <= 65536) return 16;
return 32;
}
LaunchDimensions GetLaunchDimensionsForTiledFusion(
const TiledHloComputation& tiled_hlo_computation) {
const auto* tiled_root = tiled_hlo_computation.GetRoot();
int64_t num_blocks = tiled_root->block_id_to_tile_offsets_indexing()
.GetDimensionBound(0)
.GetLoopTripCount();
int64_t num_warps = GetNumWarps(Product(tiled_root->tile_sizes()));
return {static_cast<uint64_t>(num_blocks),
static_cast<uint64_t>(num_warps * WarpSize())};
}
absl::StatusOr<TiledRunTimeDataOrError>
GpuPerformanceModelWithIndexingAnalysis::TryFindBestTilingForFusion(
const HloFusionAdaptor& fusion_adaptor) {
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeFusion(fusion_adaptor, mlir_context_);
if (const auto* fusion_decision =
std::get_if<FusionDecision>(&analysis_or_error)) {
return *fusion_decision;
}
SymbolicTileAnalysis analysis =
std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
TF_ASSIGN_OR_RETURN(auto tilings, analysis.GetGoodTilings());
std::optional<TiledRunTimeData> best_tiled_run_time_data;
for (const auto& tiling : tilings) {
TF_ASSIGN_OR_RETURN(TiledHloComputation tiled_hlo_computation,
analysis.ComputeTiledHloInstructions(tiling));
LaunchDimensions launch_dimensions =
GetLaunchDimensionsForTiledFusion(tiled_hlo_computation);
EstimateRunTimeData estimate_run_time_data =
EstimateRunTimeForTiledHloComputation(
fusion_adaptor, tiled_hlo_computation, launch_dimensions);
if (!best_tiled_run_time_data.has_value() ||
estimate_run_time_data.exec_time <
best_tiled_run_time_data->runtime_data.exec_time) {
BlockLevelParameters block_level_parameters;
block_level_parameters.output_tile_sizes =
std::vector<int64_t>(tiling.begin(), tiling.end());
block_level_parameters.num_warps =
launch_dimensions.num_threads_per_block() / WarpSize();
best_tiled_run_time_data =
TiledRunTimeData{estimate_run_time_data, block_level_parameters};
}
}
if (!best_tiled_run_time_data.has_value()) {
return FusionDecision("No valid tilings found.");
}
return *best_tiled_run_time_data;
}
}
} | #include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include <cstdint>
#include <memory>
#include <variant>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
class GpuIndexingPerformanceModelTest : public HloTestBase {
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
mlir::MLIRContext mlir_context_;
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
HloFusionAnalysisCache fusion_analysis_cache_{device_info_};
GpuPerformanceModelWithIndexingAnalysis indexing_cost_model_{
&device_info_, &fusion_analysis_cache_, ShapeSizeBytesFunction(),
&mlir_context_};
GpuIndexingPerformanceModelTest() : HloTestBase() {}
};
TEST_F(GpuIndexingPerformanceModelTest, BroadcastElementwise) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
R"(
HloModule extracted
ENTRY entry_computation {
param_0 = f32[32]{0} parameter(0)
broadcast = f32[32,1,768]{2,1,0} broadcast(param_0), dimensions={0}
param_1 = f32[32,1,768]{2,1,0} parameter(1)
ROOT multiply = f32[32,1,768]{2,1,0} multiply(broadcast, param_1)
}
)"));
auto producer =
module->entry_computation()->GetInstructionWithName("broadcast");
auto consumer =
module->entry_computation()->GetInstructionWithName("multiply");
auto runtime_data = indexing_cost_model_.EstimateRunTimeForProducerConsumer(
producer, consumer);
EXPECT_EQ(runtime_data.flops, 73728);
EXPECT_EQ(runtime_data.bytes_written, 98304);
EXPECT_NEAR(absl::ToInt64Nanoseconds(runtime_data.write_time), 128, 2);
EXPECT_NEAR(absl::ToInt64Nanoseconds(runtime_data.exec_time), 267, 2);
}
TEST_F(GpuIndexingPerformanceModelTest, Bitcast) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
R"(
HloModule m
ENTRY entry_computation {
param_0 = bf16[4,8,65,128]{3,2,1,0} parameter(0)
ROOT bitcast = bf16[8,4,65,128]{3,2,0,1} bitcast(param_0)
}
)"));
auto instruction =
module->entry_computation()->GetInstructionWithName("bitcast");
auto runtime_data =
indexing_cost_model_.EstimateRunTimeForInstruction(instruction);
EXPECT_EQ(runtime_data.flops, 0);
EXPECT_EQ(runtime_data.bytes_written, 0);
EXPECT_EQ(runtime_data.write_time, absl::ZeroDuration());
EXPECT_EQ(runtime_data.exec_time, absl::ZeroDuration());
}
TEST_F(GpuIndexingPerformanceModelTest, Reduce) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add.0 = f32[] add(param_0, param_1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[32]{0} reduce(param_0.3, constant), dimensions={1}, to_apply=add
}
)"));
auto instruction = module->entry_computation()->root_instruction();
auto runtime_data =
indexing_cost_model_.EstimateRunTimeForInstruction(instruction);
EXPECT_EQ(runtime_data.flops, 3744);
EXPECT_EQ(runtime_data.bytes_written, 128);
EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.write_time), 0, 1);
EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.exec_time), 29, 1);
}
TEST_F(GpuIndexingPerformanceModelTest, VariadicReduce) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(
R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add.0 = f32[] add(param_0, param_2)
add.1 = f32[] add(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
param_1.3 = f32[32,40]{1,0} parameter(1)
param_2.2 = f32[] parameter(2)
constant = f32[] constant(0)
ROOT reduce = (f32[32]{0}, f32[32]{0}) reduce(param_0.3, param_1.3, param_2.2, constant), dimensions={1}, to_apply=add
}
)"));
auto instruction = module->entry_computation()->root_instruction();
auto runtime_data =
indexing_cost_model_.EstimateRunTimeForInstruction(instruction);
EXPECT_EQ(runtime_data.flops, 7488);
EXPECT_EQ(runtime_data.bytes_written, 256);
EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.write_time), 0, 1);
EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.exec_time), 58, 1);
}
TEST_F(GpuIndexingPerformanceModelTest,
TritonSoftmaxFusionInstructionIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[512,911]{1,0} parameter(0)
param_1 = f32[911]{0} parameter(1)
broadcast_0 = f32[512,911]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[512,911]{1,0} multiply(param_0, broadcast_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[512]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[512,911]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[512,911]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[512,911]{1,0} parameter(0)
param_1 = f32[911]{0} parameter(1)
ROOT triton_softmax = f32[512,911]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","911"],"num_warps":"2"}}}
}
)"));
TF_ASSERT_OK_AND_ASSIGN(auto runtime_data,
indexing_cost_model_.EstimateRunTimeForTriton(
module->entry_computation()->root_instruction()));
constexpr int64_t kParam0SizeBytes = 512 * 911 * 4;
constexpr int64_t kParam1SizeBytes = 911 * 4;
constexpr int64_t kOutputSizeBytes = 512 * 911 * 4;
constexpr int64_t kExpectedBytesRead =
kParam0SizeBytes + 512 * kParam1SizeBytes;
EXPECT_EQ(runtime_data.bytes_read, kExpectedBytesRead);
EXPECT_EQ(runtime_data.bytes_written, kOutputSizeBytes);
EXPECT_NEAR(absl::ToDoubleMicroseconds(runtime_data.exec_time), 5, 1);
}
TEST_F(GpuIndexingPerformanceModelTest,
TritonSoftmaxProducerConsumerFusionIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
fusion {
param_0 = f32[512,911] parameter(0)
param_1 = f32[911] parameter(1)
broadcast = f32[512,911] broadcast(param_1), dimensions={1}
ROOT multiply = f32[512,911] multiply(param_0, broadcast)
}
triton_softmax_computation {
param_0 = f32[512,911] parameter(0)
constant_0 = f32[] constant(0)
reduce_0 = f32[512] reduce(param_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[512,911] broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[512,911] multiply(param_0, broadcast_4)
}
ENTRY main {
param_0 = f32[512,911] parameter(0)
param_1 = f32[911] parameter(1)
fusion.1 = f32[512,911] fusion(param_0, param_1), kind=kLoop, calls=fusion
ROOT triton_softmax = f32[512,911] fusion(fusion.1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","911"],"num_warps":"2"}}}
}
)"));
auto consumer = module->entry_computation()->root_instruction();
auto producer = consumer->operand(0);
TF_ASSERT_OK_AND_ASSIGN(
auto runtime_data,
indexing_cost_model_.EstimateRunTimeForTriton(producer, consumer));
constexpr int64_t kParam0SizeBytes = 512 * 911 * 4;
constexpr int64_t kParam1SizeBytes = 911 * 4;
constexpr int64_t kOutputSizeBytes = 512 * 911 * 4;
constexpr int64_t kExpectedBytesRead =
kParam0SizeBytes + 512 * kParam1SizeBytes;
EXPECT_EQ(runtime_data.bytes_read, kExpectedBytesRead);
EXPECT_EQ(runtime_data.bytes_written, kOutputSizeBytes);
EXPECT_NEAR(absl::ToDoubleMicroseconds(runtime_data.exec_time), 5, 1);
}
TEST_F(GpuIndexingPerformanceModelTest,
EstimateBestTiling_TritonSoftmax_IsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule m
add {
Arg_0 = f32[] parameter(0)
Arg_1 = f32[] parameter(1)
ROOT add = f32[] add(Arg_0, Arg_1)
}
triton_softmax_computation {
param_0 = f32[512,911]{1,0} parameter(0)
param_1 = f32[911]{0} parameter(1)
broadcast_0 = f32[512,911]{1,0} broadcast(param_1), dimensions={1}
multiply_0 = f32[512,911]{1,0} multiply(param_0, broadcast_0)
constant_0 = f32[] constant(0)
reduce_0 = f32[512]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add
broadcast_4 = f32[512,911]{1,0} broadcast(reduce_0), dimensions={0}
ROOT multiply = f32[512,911]{1,0} multiply(multiply_0, broadcast_4)
}
ENTRY main {
param_0 = f32[512,911]{1,0} parameter(0)
param_1 = f32[911]{0} parameter(1)
ROOT triton_softmax = f32[512,911]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}}
}
)"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
TF_ASSERT_OK_AND_ASSIGN(
auto tiling_result,
indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor));
ASSERT_TRUE(std::holds_alternative<TiledRunTimeData>(tiling_result));
auto tiled_runtime_data = std::get<TiledRunTimeData>(tiling_result);
constexpr int64_t kParam0SizeBytes = 512 * 911 * 4;
constexpr int64_t kParam1SizeBytes = 911 * 4;
constexpr int64_t kOutputSizeBytes = 512 * 911 * 4;
constexpr int64_t kExpectedBytesRead =
kParam0SizeBytes + 128 * kParam1SizeBytes;
EXPECT_THAT(tiled_runtime_data.block_level_parameters.output_tile_sizes,
ElementsAre(4, 911));
EXPECT_EQ(tiled_runtime_data.block_level_parameters.num_warps, 4);
EXPECT_EQ(tiled_runtime_data.runtime_data.bytes_read, kExpectedBytesRead);
EXPECT_EQ(tiled_runtime_data.runtime_data.bytes_written, kOutputSizeBytes);
EXPECT_NEAR(
absl::ToDoubleMicroseconds(tiled_runtime_data.runtime_data.exec_time), 5,
1);
}
TEST_F(
GpuIndexingPerformanceModelTest,
EstimateRunTimeForTiledFusion_NumberOfTilesLargerThanInt32Max_IsSupported) {
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"(
HloModule softmax
max_computation {
arg_0 = f16[] parameter(0)
arg_1 = f16[] parameter(1)
ROOT maximum = f16[] maximum(arg_0, arg_1)
}
softmax {
param_0 = f16[65538,32768]{1,0} parameter(0)
constant_neg_inf = f16[] constant(-inf)
reduce = f16[65538]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation
broadcast = f16[65538,32768]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = f16[65538,32768]{1,0} subtract(param_0, broadcast)
}
ENTRY main {
param_0 = f16[65538,32768]{1,0} parameter(0)
ROOT fusion = f16[65538,32768]{1,0} fusion(param_0), kind=kCustom, calls=softmax
}
)"));
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(
module->entry_computation()->root_instruction());
LaunchDimensions launch_dimensions{65538LL * 32768LL, 32};
TF_ASSERT_OK_AND_ASSIGN(
auto runtime_data,
indexing_cost_model_.EstimateRunTimeForTiledFusion(
*fusion_adaptor, launch_dimensions, {1, 1}));
EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.read_time), 183, 1);
EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.compute_time), 39, 1);
EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.exec_time), 185, 1);
}
}
}
} | 2,152 |
#ifndef XLA_SERVICE_GPU_MODEL_ANALYTICAL_LATENCY_ESTIMATOR_H_
#define XLA_SERVICE_GPU_MODEL_ANALYTICAL_LATENCY_ESTIMATOR_H_
#include <memory>
#include <optional>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/stream_executor/device_description.h"
#include "xla/xla.pb.h"
namespace xla {
namespace gpu {
class AnalyticalLatencyEstimator : public LatencyEstimator {
public:
AnalyticalLatencyEstimator(
const SchedulerConfig& config,
std::unique_ptr<LatencyEstimator> latency_estimator,
const se::DeviceDescription& gpu_info,
HloCostAnalysis::ShapeSizeFunction shape_size_function,
HloComputation* computation);
TimeCost GetLatencyBetween(const HloGraphNode& from,
const HloGraphNode& target) const override;
TimeCost NodeCost(const HloInstruction* instr) const override;
int CyclesPerMicrosecond() const override {
return latency_estimator_->CyclesPerMicrosecond();
}
static constexpr TimeCost kLowCost = 1.0;
static constexpr TimeCost kLowLatency = 1.0;
private:
const SchedulerConfig config_;
const se::DeviceDescription& gpu_info_;
std::optional<GpuHloCostAnalysis> cost_analysis_;
std::unique_ptr<LatencyEstimator> latency_estimator_;
HloCostAnalysis::ShapeSizeFunction shape_size_function_;
};
}
}
#endif
#include "xla/service/gpu/model/analytical_latency_estimator.h"
#include <memory>
#include <utility>
#include "absl/log/log.h"
#include "absl/time/time.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/service/gpu/model/gpu_collective_performance_model.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
LatencyEstimator::TimeCost AnalyticalLatencyEstimator::GetLatencyBetween(
const HloGraphNode& from, const HloGraphNode& target) const {
const HloOpcode from_op = from.GetInstr().opcode();
if (!config_.schedule_send_recvs &&
(from_op == HloOpcode::kSend || from_op == HloOpcode::kRecv)) {
return kLowLatency;
}
if (IsAsyncPair(from, target)) {
double coll_time = absl::ToDoubleMicroseconds(
GpuPerformanceWithCollectiveModel::ComputeCollectiveTime(
from.GetInstr(), &*cost_analysis_, gpu_info_));
VLOG(10) << "Analytical estimator calculated latency between "
<< from.GetInstr().name() << " and " << target.GetInstr().name()
<< " to be: " << coll_time << " us.";
return coll_time;
}
return latency_estimator_->GetLatencyBetween(from, target);
}
LatencyEstimator::TimeCost AnalyticalLatencyEstimator::NodeCost(
const HloInstruction* instr) const {
if (hlo_query::IsAsyncCollectiveStartOp(instr, true) ||
hlo_query::IsAsyncCollectiveDoneOp(instr, true)) {
return kLowCost;
}
absl::Duration total_estimated_time =
GpuPerformanceModel::EstimateRunTimeForInstruction(
instr, &*cost_analysis_,
GpuPerformanceModelOptions::ForModule(instr->GetModule()))
.exec_time;
LatencyEstimator::TimeCost cost_in_us =
absl::ToDoubleMicroseconds(total_estimated_time);
VLOG(10) << "Analytical estimator calculated cost for: " << instr->name()
<< ". Cost: " << cost_in_us;
return cost_in_us;
}
AnalyticalLatencyEstimator::AnalyticalLatencyEstimator(
const SchedulerConfig& config,
std::unique_ptr<LatencyEstimator> latency_estimator,
const se::DeviceDescription& gpu_info,
HloCostAnalysis::ShapeSizeFunction shape_size_function,
HloComputation* computation)
: config_(config),
gpu_info_(gpu_info),
latency_estimator_(std::move(latency_estimator)),
shape_size_function_(shape_size_function) {
cost_analysis_.emplace(
GpuHloCostAnalysis::Options{shape_size_function_,
{},
true},
&gpu_info_);
TF_CHECK_OK(computation->Accept(&cost_analysis_.value()));
}
}
} | #include "xla/service/gpu/model/analytical_latency_estimator.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/gpu/tests/gpu_codegen_test.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/latency_hiding_scheduler.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
int64_t GetInstructionIndexInSchedule(
absl::Span<HloInstruction* const> schedule, absl::string_view hlo_name) {
return std::find_if(schedule.begin(), schedule.end(),
[hlo_name](HloInstruction* instruction) {
return instruction->name() == hlo_name;
}) -
schedule.begin();
}
SchedulerConfig GetDefaultSchedulerConfig() {
SchedulerConfig scheduler_config;
return scheduler_config;
}
absl::StatusOr<bool> RunScheduler(
HloModule* module, const SchedulerConfig& sched_config,
std::unique_ptr<LatencyEstimator> latency_estimator =
std::make_unique<ApproximateLatencyEstimator>()) {
HloCostAnalysis::ShapeSizeFunction shape_size_bytes =
[&shape_size_bytes](const Shape& shape) -> int64_t {
int64_t shape_size = 0;
if (shape.IsTuple()) {
for (auto& sub_shape : shape.tuple_shapes()) {
shape_size += shape_size_bytes(sub_shape);
}
return shape_size;
}
return ShapeUtil::ByteSizeOfElements(shape);
};
auto async_tracker = std::make_unique<AsyncTracker>(sched_config);
auto scheduler_core = std::make_unique<DefaultSchedulerCore>(
shape_size_bytes, async_tracker.get(), latency_estimator.get(),
sched_config);
TF_ASSIGN_OR_RETURN(
bool value, LatencyHidingScheduler(
std::move(latency_estimator), std::move(async_tracker),
std::move(scheduler_core), shape_size_bytes)
.Run(module));
return value;
}
class AnalyticalLatencyHidingSchedulerTest : public GpuCodegenTest {
public:
absl::StatusOr<std::unique_ptr<HloModule>> ParseHloText(
absl::string_view hlo_string) {
return ParseAndReturnVerifiedModule(hlo_string, GetModuleConfigForTest());
}
se::CudaComputeCapability GetCudaComputeCapability() {
return backend()
.default_stream_executor()
->GetDeviceDescription()
.cuda_compute_capability();
}
HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
};
TEST_F(AnalyticalLatencyHidingSchedulerTest, TestAnalyticalLatencyEstimator) {
if (!GetCudaComputeCapability().IsAtLeast(
se::CudaComputeCapability::PASCAL_)) {
GTEST_SKIP() << "This test is for Pascal+ GPUs.";
}
const se::DeviceDescription dev_info =
backend().default_stream_executor()->GetDeviceDescription();
absl::string_view hlo_string = R"(
HloModule module, is_scheduled=true
region_20.995 {
Arg_1.997 = f32[] parameter(1)
Arg_0.996 = f32[] parameter(0)
ROOT add.589 = f32[] add(Arg_0.996, Arg_1.997)
}
ENTRY entry {
p0 = f32[16,64,256]{2,1,0} parameter(0)
p1 = f32[16,64,256]{2,1,0} parameter(1)
p2 = f32[1024,2048,2048]{2,1,0} parameter(2)
p3 = f32[2048,2048,2048]{2,1,0} parameter(3)
all-reduce-start.1 = f32[1024,2048,2048]{2,1,0} all-reduce-start(p2), channel_id=8, replica_groups={{0}}, to_apply=region_20.995, backend_config="{\"is_sync\":false}"
all-reduce-start.2 = f32[2048,2048,2048]{2,1,0} all-reduce-start(p3), channel_id=10, replica_groups={{0}}, to_apply=region_20.995, backend_config="{\"is_sync\":false}"
all-reduce-done.1 = f32[1024,2048,2048]{2,1,0} all-reduce-done(all-reduce-start.1)
all-reduce-done.2 = f32[2048,2048,2048]{2,1,0} all-reduce-done(all-reduce-start.2)
conv0 = f32[16,256,256]{2,1,0} convolution(p0, p1),
window={size=16 stride=15 lhs_dilate=16}, dim_labels=0fb_0io->0fb
ROOT tuple.2 = (f32[16,256,256]{2,1,0}, f32[1024,2048,2048]{2,1,0}, f32[2048,2048,2048]{2,1,0}) tuple(conv0, all-reduce-done.1, all-reduce-done.2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto hlo_module, ParseHloText(hlo_string));
hlo_module->mutable_config().set_num_partitions(8);
HloSchedule& module_schedule = hlo_module->schedule();
EXPECT_TRUE(hlo_module->has_entry_computation());
auto scheduler_config = GetDefaultSchedulerConfig();
auto latency_estimator = std::make_unique<AnalyticalLatencyEstimator>(
scheduler_config, std::make_unique<ApproximateLatencyEstimator>(),
dev_info, ShapeSizeBytesFunction(), hlo_module->entry_computation());
EXPECT_TRUE(RunScheduler(hlo_module.get(), scheduler_config,
std::move(latency_estimator))
.ok());
EXPECT_TRUE(hlo_module->has_entry_computation());
std::vector<HloInstruction*> new_instruction_schedule =
module_schedule.sequence(hlo_module->entry_computation()).instructions();
int64_t ar2_index = GetInstructionIndexInSchedule(new_instruction_schedule,
"all-reduce-start.2");
int64_t ar1_done_index = GetInstructionIndexInSchedule(
new_instruction_schedule, "all-reduce-done.1");
int64_t conv0_index =
GetInstructionIndexInSchedule(new_instruction_schedule, "conv0");
EXPECT_LT(ar1_done_index, ar2_index);
EXPECT_LT(ar2_index, conv0_index);
}
}
}
} | 2,153 |
#ifndef XLA_SERVICE_GPU_MODEL_SYMBOLIC_TILE_H_
#define XLA_SERVICE_GPU_MODEL_SYMBOLIC_TILE_H_
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "llvm/ADT/DenseMap.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
class ConstraintExpression {
public:
using ConjointConstraints = llvm::DenseMap<mlir::AffineExpr, Interval>;
static ConstraintExpression And(ConstraintExpression first,
ConstraintExpression second);
static ConstraintExpression Or(ConstraintExpression first,
ConstraintExpression second);
static ConstraintExpression GetUnsatisfiableConstraintExpression() {
ConstraintExpression unsatisfiable;
unsatisfiable.is_satisfiable_ = false;
return unsatisfiable;
}
void Or(ConjointConstraints conjunction);
void And(ConjointConstraints conjunction);
bool is_satisfiable() const { return is_satisfiable_; }
bool IsAlwaysSatisfied() const {
return is_satisfiable_ && disjoint_conjoint_constraints_.empty();
}
absl::Span<ConjointConstraints const> DisjointConjointConstraints() const {
return disjoint_conjoint_constraints_;
}
std::string ToString(
const AffineMapPrinter& printer = AffineMapPrinter()) const;
void Print(std::ostream& out, const AffineMapPrinter& printer) const;
private:
bool is_satisfiable_ = true;
std::vector<ConjointConstraints> disjoint_conjoint_constraints_;
};
class SymbolicTile {
public:
static std::optional<SymbolicTile> FromIndexingMap(IndexingMap indexing_map);
std::string RtVarsToString(
const AffineMapPrinter& printer = AffineMapPrinter()) const;
std::string ToString(
const AffineMapPrinter& printer = AffineMapPrinter()) const;
void Print(std::ostream& out, const AffineMapPrinter& printer) const;
mlir::AffineMap offset_map() const;
mlir::AffineMap size_map() const;
mlir::AffineMap stride_map() const;
const ConstraintExpression& constraints() const {
CHECK(constraints_.is_satisfiable());
return constraints_;
}
bool is_satisfiable() const { return constraints_.is_satisfiable(); }
const IndexingMap& tile_map() const { return tile_map_; }
template <typename Sink>
friend void AbslStringify(Sink& sink, const SymbolicTile& tile) {
sink.Append(tile.ToString());
}
private:
IndexingMap tile_map_;
ConstraintExpression constraints_;
explicit SymbolicTile(IndexingMap tile_map, ConstraintExpression constraints)
: tile_map_(std::move(tile_map)), constraints_(std::move(constraints)) {}
};
}
}
#endif
#include "xla/service/gpu/model/symbolic_tile.h"
#include <algorithm>
#include <cstdint>
#include <iterator>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_map.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineConstantExpr;
using ::mlir::AffineDimExpr;
using ::mlir::AffineExpr;
using ::mlir::AffineExprKind;
using ::mlir::AffineMap;
using ::mlir::AffineSymbolExpr;
using ::mlir::getAffineConstantExpr;
using ::mlir::getAffineDimExpr;
using ::mlir::MLIRContext;
using ConjointConstraints = ConstraintExpression::ConjointConstraints;
std::vector<AffineExpr> DimsToSymbols(std::vector<AffineExpr> expressions,
const IndexingMap& indexing_map) {
MLIRContext* mlir_context = indexing_map.GetMLIRContext();
for (AffineExpr& expression : expressions) {
expression =
expression.shiftSymbols(indexing_map.GetSymbolCount(),
indexing_map.GetDimensionCount());
}
llvm::DenseMap<AffineExpr, AffineExpr> dim_to_symbol_map;
for (int i = 0; i < indexing_map.GetDimensionCount(); i++) {
dim_to_symbol_map[getAffineDimExpr(i, mlir_context)] =
getAffineSymbolExpr(i, mlir_context);
}
for (AffineExpr& expression : expressions) {
expression = expression.replace(dim_to_symbol_map);
}
return expressions;
}
AffineMap SubstituteAllIndicesAndRangeVarSymbolsWithSameValue(
AffineMap affine_map, AffineExpr value, int num_range_vars) {
CHECK_LE(num_range_vars, affine_map.getNumSymbols());
MLIRContext* mlir_context = affine_map.getContext();
int64_t num_dims = affine_map.getNumDims();
int64_t num_symbols = affine_map.getNumSymbols();
llvm::DenseMap<AffineExpr, AffineExpr> indices;
for (int64_t i = 0; i < num_dims; ++i) {
indices[getAffineDimExpr(i, mlir_context)] = value;
}
for (int64_t i = 0; i < num_range_vars; ++i) {
indices[getAffineSymbolExpr(i, mlir_context)] = value;
}
return simplifyAffineMap(affine_map.replace(indices, num_dims, num_symbols));
}
struct SizeAndStrideExpression {
AffineExpr size;
AffineExpr stride;
ConstraintExpression constraints;
SizeAndStrideExpression(
AffineExpr size, AffineExpr stride,
ConstraintExpression constraints = ConstraintExpression())
: size(std::move(size)),
stride(std::move(stride)),
constraints(std::move(constraints)) {}
};
std::optional<SizeAndStrideExpression> ExtractSizeAndStrideFromMod(
AffineExpr lhs, AffineExpr modulus) {
CHECK(modulus.getKind() == AffineExprKind::Constant);
if (auto dim_expr = llvm::dyn_cast<mlir::AffineDimExpr>(lhs)) {
AffineExpr size =
dim_expr - mlir::getAffineBinaryOpExpr(AffineExprKind::FloorDiv,
dim_expr - 1, modulus) *
modulus;
AffineExpr tile_size_expr =
getAffineSymbolExpr(dim_expr.getPosition(), lhs.getContext());
Interval zero_interval{0, 0};
ConstraintExpression constraints;
constraints.And(
{{tile_size_expr % modulus, zero_interval}});
constraints.Or(
{{modulus % tile_size_expr, zero_interval}});
return SizeAndStrideExpression(
size, getAffineConstantExpr(1, lhs.getContext()),
std::move(constraints));
}
return std::nullopt;
}
std::optional<SizeAndStrideExpression> ExtractSizeAndStrideFromFloorDiv(
AffineExpr num, AffineExpr den) {
if (den.getKind() != AffineExprKind::Constant) {
return std::nullopt;
}
if (auto dim_expr = llvm::dyn_cast<mlir::AffineDimExpr>(num)) {
AffineExpr size = mlir::getAffineBinaryOpExpr(AffineExprKind::FloorDiv,
dim_expr + (den - 1), den);
return SizeAndStrideExpression(
size, getAffineConstantExpr(1, num.getContext()));
}
return std::nullopt;
}
void DestructureSummationImpl(AffineExpr expr,
std::vector<AffineExpr>& summands) {
switch (expr.getKind()) {
case AffineExprKind::Add: {
const auto add = llvm::cast<mlir::AffineBinaryOpExpr>(expr);
DestructureSummationImpl(add.getLHS(), summands);
DestructureSummationImpl(add.getRHS(), summands);
break;
}
default:
summands.push_back(expr);
break;
}
}
std::vector<AffineExpr> DestructureSummation(AffineExpr expr) {
std::vector<AffineExpr> summands;
DestructureSummationImpl(expr, summands);
return summands;
}
std::optional<SizeAndStrideExpression> ExtractSizeAndStride(
AffineExpr strided_indexing, absl::Span<Interval const> dimension_intervals,
absl::Span<Interval const> symbol_intervals);
std::optional<std::vector<SizeAndStrideExpression>>
ExtractSizesAndStridesFromMultivariateSummation(
AffineExpr summation, absl::Span<Interval const> dimension_intervals,
absl::Span<Interval const> symbol_intervals) {
std::vector<AffineExpr> summands = DestructureSummation(summation);
std::vector<SizeAndStrideExpression> sizes_and_strides;
sizes_and_strides.reserve(summands.size());
for (AffineExpr summand : summands) {
std::optional<SizeAndStrideExpression> maybe_size_and_stride =
ExtractSizeAndStride(summand, dimension_intervals, symbol_intervals);
if (!maybe_size_and_stride.has_value()) {
VLOG(1) << "Couldn't extract size and stride from "
<< AffineMapPrinter().ToString(summand);
return std::nullopt;
}
sizes_and_strides.push_back(*maybe_size_and_stride);
}
return sizes_and_strides;
}
AffineExpr CombineSizes(
absl::Span<SizeAndStrideExpression const> sizes_and_strides) {
CHECK(!sizes_and_strides.empty());
AffineExpr product =
getAffineConstantExpr(1, sizes_and_strides[0].size.getContext());
for (const SizeAndStrideExpression& size_and_stride : sizes_and_strides) {
product = product * size_and_stride.size;
}
return product;
}
AffineExpr IfNeqOne(AffineExpr eq_param, AffineExpr true_expr,
AffineExpr false_expr,
int64_t eq_param_inclusive_upper_bound) {
AffineExpr b = getAffineConstantExpr(eq_param_inclusive_upper_bound,
eq_param.getContext());
AffineExpr condition = mlir::getAffineBinaryOpExpr(AffineExprKind::FloorDiv,
b + 1 - eq_param, b);
return condition * false_expr + (1 - condition) * true_expr;
}
void SortByStride(std::vector<SizeAndStrideExpression>& sizes_and_strides) {
absl::c_sort(sizes_and_strides, [](const SizeAndStrideExpression& sas1,
const SizeAndStrideExpression& sas2) {
int64_t stride1 = llvm::cast<AffineConstantExpr>(sas1.stride).getValue();
int64_t stride2 = llvm::cast<AffineConstantExpr>(sas2.stride).getValue();
return stride1 < stride2;
});
}
std::optional<int64_t> TryGetSizeExpressionRangeSize(
AffineExpr size, absl::Span<Interval const> dimension_intervals) {
CHECK(size.getKind() == AffineExprKind::Constant ||
size.getKind() == AffineExprKind::DimId);
if (auto dimension = llvm::dyn_cast<AffineDimExpr>(size)) {
const Interval& interval = dimension_intervals.at(dimension.getPosition());
if (interval.lower != 0) {
VLOG(1) << "Attempted to combine strides but got dimension "
<< AffineMapPrinter().ToString(dimension) << " with lower bound "
<< interval.lower << " != 0";
return std::nullopt;
}
return interval.upper + 1;
}
return llvm::cast<AffineConstantExpr>(size).getValue();
};
std::optional<AffineExpr> CombineStrides(
std::vector<SizeAndStrideExpression> sizes_and_strides,
absl::Span<Interval const> dimension_intervals) {
CHECK(!sizes_and_strides.empty());
for (const SizeAndStrideExpression& size_and_stride : sizes_and_strides) {
if (size_and_stride.stride.getKind() != AffineExprKind::Constant) {
VLOG(1) << "Attempted to combine non-constant stride: "
<< AffineMapPrinter().ToString(size_and_stride.stride);
return std::nullopt;
}
if (size_and_stride.size.getKind() != AffineExprKind::Constant &&
size_and_stride.size.getKind() != AffineExprKind::DimId) {
VLOG(1) << "Attempted to combine strides but got non-constant, "
"non-dimension size "
<< AffineMapPrinter().ToString(size_and_stride.size);
return std::nullopt;
}
}
SortByStride(sizes_and_strides);
for (auto [dim_id, size_and_stride] : llvm::enumerate(sizes_and_strides)) {
int64_t stride =
llvm::cast<AffineConstantExpr>(size_and_stride.stride).getValue();
if (dim_id > 0) {
const SizeAndStrideExpression& previous_size_and_stride =
sizes_and_strides[dim_id - 1];
std::optional<int64_t> previous_size_expression_range_size =
TryGetSizeExpressionRangeSize(previous_size_and_stride.size,
dimension_intervals);
if (!previous_size_expression_range_size.has_value()) {
return std::nullopt;
}
int64_t previous_stride =
llvm::cast<AffineConstantExpr>(previous_size_and_stride.stride)
.getValue();
if (*previous_size_expression_range_size * previous_stride != stride) {
VLOG(1) << "Attempted to combine strides but stride did not grow "
<< "exactly as expected: got "
<< *previous_size_expression_range_size << " * "
<< previous_stride << " != " << stride;
return std::nullopt;
}
}
}
MLIRContext* ctx = sizes_and_strides[0].stride.getContext();
AffineExpr nested_if = getAffineConstantExpr(0, ctx);
for (auto size_and_stride_it = sizes_and_strides.rbegin();
size_and_stride_it != sizes_and_strides.rend(); ++size_and_stride_it) {
AffineExpr size = size_and_stride_it->size;
AffineExpr stride = size_and_stride_it->stride;
std::optional<int64_t> size_expression_range_size =
TryGetSizeExpressionRangeSize(size, dimension_intervals);
if (!size_expression_range_size.has_value()) {
return std::nullopt;
}
nested_if = IfNeqOne(size, stride, nested_if, *size_expression_range_size);
}
return nested_if;
}
std::optional<SizeAndStrideExpression> CombineSizesAndStrides(
std::vector<SizeAndStrideExpression> sizes_and_strides,
absl::Span<Interval const> dimension_intervals) {
CHECK(!sizes_and_strides.empty());
if (VLOG_IS_ON(1)) {
for (const SizeAndStrideExpression& size_and_stride : sizes_and_strides) {
LOG(INFO) << "CombineSizesAndStrides:";
LOG(INFO) << "size: " << AffineMapPrinter().ToString(size_and_stride.size)
<< " stride: "
<< AffineMapPrinter().ToString(size_and_stride.stride);
}
}
ConstraintExpression constraints;
for (SizeAndStrideExpression& size_and_stride : sizes_and_strides) {
constraints = ConstraintExpression::And(
std::move(constraints), std::move(size_and_stride.constraints));
}
AffineExpr size = CombineSizes(sizes_and_strides);
std::optional<AffineExpr> stride =
CombineStrides(std::move(sizes_and_strides), dimension_intervals);
if (!stride.has_value()) {
return std::nullopt;
}
return SizeAndStrideExpression(size, *stride, std::move(constraints));
}
std::optional<SizeAndStrideExpression> ExtractSizeAndStride(
AffineExpr strided_indexing, absl::Span<Interval const> | #include "xla/service/gpu/model/symbolic_tile.h"
#include <cstdint>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/log/check.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::llvm::SmallVector;
using ::mlir::AffineExpr;
using ::mlir::AffineMap;
using ::testing::ElementsAre;
using ::testing::ExplainMatchResult;
using ::testing::IsEmpty;
using ::testing::Optional;
using ::testing::SizeIs;
using ConjointConstraints = ConstraintExpression::ConjointConstraints;
MATCHER_P(MatchSymbolicTileString, symbolic_tile_string, "") {
return ExplainMatchResult(
true, ApproximateMatch(symbolic_tile_string, arg.ToString()),
result_listener);
}
MATCHER_P(MatchConstraintExpressionString, constraint_expression_string, "") {
return ExplainMatchResult(
true, ApproximateMatch(constraint_expression_string, arg.ToString()),
result_listener);
}
std::vector<int64_t> EvaluateMapAt(AffineMap affine_map,
absl::Span<int64_t const> parameters) {
CHECK_EQ(affine_map.getNumSymbols(), parameters.size());
CHECK_EQ(affine_map.getNumDims(), 0);
SmallVector<AffineExpr> symbol_replacements = llvm::to_vector(
llvm::map_range(parameters, [affine_map](const int64_t v) -> AffineExpr {
return mlir::getAffineConstantExpr(v, affine_map.getContext());
}));
AffineMap simplified_affine_map =
mlir::simplifyAffineMap(affine_map.replaceDimsAndSymbols(
{}, symbol_replacements, 0,
0));
SmallVector<int64_t> results = llvm::to_vector(llvm::map_range(
simplified_affine_map.getResults(), [](AffineExpr result) -> int64_t {
return llvm::cast<mlir::AffineConstantExpr>(result).getValue();
}));
return std::vector<int64_t>(results.begin(), results.end());
}
using SymbolicTileTest = IndexingTestBase;
TEST_F(SymbolicTileTest, CanPropagateTileFromDotOutputToInputs) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[11, 17, 19] parameter(0)
p1 = f32[11, 19, 23] parameter(1)
ROOT dot = f32[11, 17, 23] dot(p0, p1),
lhs_batch_dims={0}, rhs_batch_dims={0},
lhs_contracting_dims={2}, rhs_contracting_dims={1}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2] -> (0, 0, 0)
size_map: ()[s0, s1, s2] -> (s0, s1, 19)
stride_map: ()[s0, s1, s2] -> (1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughTrivialReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[11, 17, 19] parameter(0)
ROOT reshape = f32[1, 11, 17, 19] reshape(p0)
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2, s3] -> (0, 0, 0)
size_map: ()[s0, s1, s2, s3] -> (s1, s2, s3)
stride_map: ()[s0, s1, s2, s3] -> (1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest,
CanPropagateTileThroughNonTrivialMergeReshapeFromOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT reshape = f32[48,4]{1,0} reshape(p0)
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] -> (0, 0, 0, 0)
size_map: ()[s0, s1] -> (1, (s0 + 5) floordiv 6, s0 - ((s0 - 1) floordiv 6) * 6, s1)
stride_map: ()[s0, s1] -> (0, 1, 1, 1)
constraints:
6 mod s0 in [0, 1) || s0 mod 6 in [0, 1)
)")));
}
TEST_F(SymbolicTileTest,
CanPropagateTileThroughNonTrivialSplitReshapeFromOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[192,4]{1,0} parameter(0)
ROOT reshape = f32[4,8,6,4]{3,2,1,0} reshape(p0)
}
)"));
std::optional<SymbolicTile> symbolic_tile =
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin());
EXPECT_THAT(symbolic_tile, Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2, s3] -> (0, 0)
size_map: ()[s0, s1, s2, s3] -> ((s0 * s1) * s2, s3)
stride_map: ()[s0, s1, s2, s3] ->
(((-s2 + 7) floordiv 6) * (((-s1 + 9) floordiv 8) *
((-((-s0 + 5) floordiv 4) + 1) * 48) +
(-((-s1 + 9) floordiv 8) + 1) * 6) + -((-s2 + 7) floordiv 6) + 1, 1)
)")));
EXPECT_THAT(EvaluateMapAt(symbolic_tile->stride_map(), {4, 8, 6, 4}),
ElementsAre(1, 1));
EXPECT_THAT(EvaluateMapAt(symbolic_tile->stride_map(), {1, 1, 6, 4}),
ElementsAre(1, 1));
EXPECT_THAT(EvaluateMapAt(symbolic_tile->stride_map(), {1, 8, 1, 4}),
ElementsAre(6, 1));
EXPECT_THAT(EvaluateMapAt(symbolic_tile->stride_map(), {2, 1, 1, 4}),
ElementsAre(48, 1));
EXPECT_THAT(EvaluateMapAt(symbolic_tile->stride_map(), {2, 8, 1, 4}),
ElementsAre(6, 1));
EXPECT_THAT(EvaluateMapAt(symbolic_tile->stride_map(), {1, 1, 1, 4}),
ElementsAre(0, 1));
}
TEST_F(SymbolicTileTest, FailsToPropagateTileThroughNonTrivialReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[12, 4, 19] parameter(0)
ROOT reshape = f32[4, 12, 19] reshape(p0)
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughElementwiseOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[150] parameter(0)
p1 = f32[150] parameter(1)
ROOT add = f32[150] add(p0, p1)
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0] -> (0)
size_map: ()[s0] -> (s0)
stride_map: ()[s0] -> (1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileFromBroadcastOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[150] parameter(0)
ROOT broadcast = f32[157,150] broadcast(p0), dimensions={1}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] -> (0)
size_map: ()[s0, s1] -> (s1)
stride_map: ()[s0, s1] -> (1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileFromReduceOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY e {
p0 = f32[125,150] parameter(0)
c0 = f32[] constant(-inf)
ROOT reduce = f32[150] reduce(p0, c0), dimensions={0}, to_apply=max
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0] -> (0, 0)
size_map: ()[s0] -> (125, s0)
stride_map: ()[s0] -> (1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughReverse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[179] parameter(0)
ROOT reverse = f32[179] reverse(p0), dimensions={0}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0] -> (-s0 + 179)
size_map: ()[s0] -> (s0)
stride_map: ()[s0] -> (1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileFromSliceOutputToInput) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[120,142] parameter(0)
ROOT slice = f32[10,21] slice(p0), slice={[40:60:2], [20:104:4]}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] -> (40, 20)
size_map: ()[s0, s1] -> (s0, s1)
stride_map: ()[s0, s1] -> (2, 4)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[21,10] parameter(0)
ROOT transpose = f32[10,21] transpose(p0), dimensions={1,0}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] -> (0, 0)
size_map: ()[s0, s1] -> (s1, s0)
stride_map: ()[s0, s1] -> (1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughConcatenate) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2,5,7] parameter(0)
p1 = f32[2,11,7] parameter(1)
p2 = f32[2,17,7] parameter(2)
ROOT concat = f32[2,33,7] concatenate(p0, p1, p2), dimensions={1}
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2] -> (0, 0, 0)
size_map: ()[s0, s1, s2] -> (s0, s1, s2)
stride_map: ()[s0, s1, s2] -> (1, 1, 1)
)")));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[1].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2] -> (0, -5, 0)
size_map: ()[s0, s1, s2] -> (s0, s1, s2)
stride_map: ()[s0, s1, s2] -> (1, 1, 1)
)")));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[2].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2] -> (0, -16, 0)
size_map: ()[s0, s1, s2] -> (s0, s1, s2)
stride_map: ()[s0, s1, s2] -> (1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughPadOpWithoutInteriorPadding) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
input = f32[4, 4] parameter(0)
padding_value = f32[] parameter(1)
ROOT pad = f32[8,8] pad(input, padding_value), padding=2_2_0x1_3_0
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] -> (-2, -1)
size_map: ()[s0, s1] -> (s0, s1)
stride_map: ()[s0, s1] -> (1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughDynamicSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[2,2,258] parameter(0)
%of1 = s32[] parameter(1)
%of2 = s32[] parameter(2)
%of3 = s32[] parameter(3)
ROOT %ds = s32[1,2,32] dynamic-slice(s32[2,2,258] %src,
s32[] %of1, s32[] %of2, s32[] %of3),
dynamic_slice_sizes={1, 2, 32}
}
)"));
ASSERT_EQ(input_indexing.indexing_maps.size(), 4);
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2, s3, s4] -> (s3, 0, s4)
size_map: ()[s0, s1, s2] -> (1, s1, s2)
stride_map: ()[s0, s1, s2] -> (0, 1, 1)
rt_vars:
s3 in [0, 2)
hlo: %of1 = s32[] parameter(1)
(d0, d1, d2) -> ()
s4 in [0, 227)
hlo: %of3 = s32[] parameter(3)
(d0, d1, d2) -> ()
)")));
for (int i = 1; i <= 3; i++) {
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[i].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2] -> ()
size_map: ()[s0, s1, s2] -> ()
stride_map: ()[s0, s1, s2] -> ()
)")));
}
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughDynamicUpdateSlice) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[20,30] parameter(0)
%upd = s32[5,10] parameter(1)
%of1 = s32[] parameter(2)
%of2 = s32[] parameter(3)
ROOT %dus = s32[20,30] dynamic-update-slice(
s32[20,30] %src, s32[5,10] %upd, s32[] %of1, s32[] %of2)
}
)"));
ASSERT_EQ(input_indexing.indexing_maps.size(), 4);
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] -> (0, 0)
size_map: ()[s0, s1] -> (s0, s1)
stride_map: ()[s0, s1] -> (1, 1)
)")));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[1].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2, s3] -> (-s2, -s3)
size_map: ()[s0, s1] -> (s0, s1)
stride_map: ()[s0, s1] -> (1, 1)
rt_vars:
s2 in [0, 16)
hlo: %of1 = s32[] parameter(2)
(d0, d1) -> ()
s3 in [0, 21)
hlo: %of2 = s32[] parameter(3)
(d0, d1) -> ()
)")));
for (int i = 2; i <= 3; i++) {
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[i].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] -> ()
size_map: ()[s0, s1] -> ()
stride_map: ()[s0, s1] -> ()
)")));
}
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughGather) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY main {
operand = f32[33,76,70] parameter(0)
indices = s32[1806,2] parameter(1)
ROOT r = f32[1806,7,8,4] gather(operand, indices), offset_dims={1,2,3},
collapsed_slice_dims={}, start_index_map={0,1},
index_vector_dim=1, slice_sizes={7,8,4}
}
)"));
ASSERT_EQ(input_indexing.indexing_maps.size(), 2);
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2, s3, s4, s5] -> (s4, s5, 0)
size_map: ()[s0, s1, s2, s3] -> (s1, s2, s3)
stride_map: ()[s0, s1, s2, s3] -> (1, 1, 1)
rt_vars:
s4 in [0, 27)
hlo: %indices = s32[1806,2]{1,0} parameter(1)
(d0, d1, d2, d3) -> (d0, 0)
s5 in [0, 69)
hlo: %indices = s32[1806,2]{1,0} parameter(1)
(d0, d1, d2, d3) -> (d0, 1)
)")));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[1].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2, s3] -> (0, 0)
size_map: ()[s0, s1, s2, s3] -> (s0, 2)
stride_map: ()[s0, s1, s2, s3] -> (1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanPropagateTileThroughSplitReshapeOfReverse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
reverse = f32[1,8,6,4]{3,2,1,0} reverse(p0), dimensions={1,2}
ROOT reshape = f32[48,4]{1,0} reshape(reverse)
}
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[48,4]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] ->
(0, -((s0 + 5) floordiv 6) + 8, -(s0 - ((s0 - 1) floordiv 6) * 6) + 6, 0)
size_map: ()[s0, s1] ->
(1, (s0 + 5) floordiv 6, s0 - ((s0 - 1) floordiv 6) * 6, s1)
stride_map: ()[s0, s1] -> (0, 1, 1, 1)
)")));
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughSliceOfSplitReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
reshape = f32[48,4]{1,0} reshape(p0)
ROOT slice = f32[5,2]{1,0} slice(reshape), slice={[18:43:5], [0:4:2]}
}
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[5,2]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughMisalignedSliceOfSplitReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
reshape = f32[48,4]{1,0} reshape(p0)
ROOT slice = f32[5,2]{1,0} slice(reshape), slice={[20:45:5], [0:4:2]}
}
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[5,2]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughSliceOfSplitReshapeOnTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,6,8,4]{3,2,1,0} parameter(0)
transpose = f32[1,8,6,4]{3,2,1,0} transpose(p0), dimensions={0,2,1,3}
reshape = f32[48,4]{1,0} reshape(transpose)
ROOT slice = f32[5,2]{1,0} slice(reshape), slice={[18:43:5], [0:4:2]}
}
ENTRY e {
p0 = f32[1,6,8,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[5,2]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughSliceOfSplitReshapeOfReverse) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
computation {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
reverse = f32[1,8,6,4]{3,2,1,0} reverse(p0), dimensions={1,2}
reshape = f32[48,4]{1,0} reshape(reverse)
ROOT slice = f32[5,2]{1,0} slice(reshape), slice={[18:43:5], [0:4:2]}
}
ENTRY e {
p0 = f32[1,8,6,4]{3,2,1,0} parameter(0)
ROOT fusion = f32[5,2]{1,0} fusion(p0), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest,
FailsGracefullyAtPropagatingTileThroughReductionOfConcatenation) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
max_computation {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(p0, p1)
}
computation {
p0 = f32[10,8]{1,0} parameter(0)
p1 = f32[20,8]{1,0} parameter(1)
concatenate = f32[30,8]{1,0} concatenate(p0, p1), dimensions={0}
neg_inf = f32[] constant(-inf)
ROOT reduce = f32[8] reduce(concatenate, neg_inf), dimensions={0},
to_apply=max_computation
}
ENTRY e {
p0 = f32[10,8]{1,0} parameter(0)
p1 = f32[20,8]{1,0} parameter(1)
ROOT fusion = f32[8] fusion(p0, p1), kind=kLoop, calls=computation
}
)"));
EXPECT_EQ(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[1].begin()),
std::nullopt);
}
TEST_F(SymbolicTileTest, CanCombineCompatibleConstraints) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,8,6,4,8]{4,3,2,1,0} parameter(0)
ROOT reshape = f32[48,32]{1,0} reshape(p0)
}
)"));
EXPECT_THAT(
SymbolicTile::FromIndexingMap(*input_indexing.indexing_maps[0].begin()),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1] -> (0, 0, 0, 0, 0)
size_map: ()[s0, s1] -> (1, (s0 + 5) floordiv 6, s0 - ((s0 - 1) floordiv 6) * 6, (s1 + 7) floordiv 8, s1 - ((s1 - 1) floordiv 8) * 8)
stride_map: ()[s0, s1] -> (0, 1, 1, 1, 1)
constraints:
6 mod s0 in [0, 1) && 8 mod s1 in [0, 1) ||
6 mod s0 in [0, 1) && s1 mod 8 in [0, 1) ||
8 mod s1 in [0, 1) && s0 mod 6 in [0, 1) ||
s0 mod 6 in [0, 1) && s1 mod 8 in [0, 1)
)")));
}
TEST_F(SymbolicTileTest,
CanDeriveTileWhenPreexistingConstraintsCanBeSimplifiedAway) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1, d2)[s0] -> (d0 * 2048 + d1, s0)",
&mlir_context_),
{4, 2048, 50304}, {50304});
indexing_map.AddConstraint(ParseAffineExpr("d0 * 2048 + d1", &mlir_context_),
Interval{0, 8191});
EXPECT_THAT(SymbolicTile::FromIndexingMap(indexing_map),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2] -> (0, 0)
size_map: ()[s0, s1, s2] -> (s0 * s1, 50304)
stride_map: ()[s0, s1, s2] -> (((-s1 + 2049) floordiv 2048) * ((-((-s0 + 5) floordiv 4) + 1) * 2048) + -((-s1 + 2049) floordiv 2048) + 1, 1)
)")));
}
TEST_F(SymbolicTileTest, CanDeriveTileWhenTheIndexingMapHasSymbolsInASum) {
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
ParseAffineMap("(d0, d1, d2)[s0] -> (d0, d1, d2 * 128 + s0)",
&mlir_context_),
{4, 2048, 393}, {128});
EXPECT_THAT(SymbolicTile::FromIndexingMap(indexing_map),
Optional(MatchSymbolicTileString(R"(
Symbolic tile with
offset_map: ()[s0, s1, s2] -> (0, 0, 0)
size_map: ()[s0, s1, s2] -> (s0, s1, s2 * 128)
stride_map: ()[s0, s1, s2] -> (1, 1, 1)
)")));
}
class ConstraintExpressionTest : public IndexingTestBase {
public:
using ConstraintVector = std::vector<std::pair<std::string, Interval>>;
ConjointConstraints GetConjointConstraints(
ConstraintVector&& expr_and_interval_pairs) {
ConjointConstraints conjunction;
for (auto& [string_expr, interval] : expr_and_interval_pairs) {
conjunction.insert(
{ParseAffineExpr(string_expr, &mlir_context_), interval});
}
return conjunction;
}
};
TEST_F(ConstraintExpressionTest,
DefaultConstructedConstraintExpressionIsAlwaysSatisfied) {
EXPECT_TRUE(ConstraintExpression().IsAlwaysSatisfied());
}
TEST_F(ConstraintExpressionTest, PrettyPrintingTest) {
EXPECT_THAT(ConstraintExpression(),
MatchConstraintExpressionString("always satisfied"));
EXPECT_THAT(ConstraintExpression::GetUnsatisfiableConstraintExpression(),
MatchConstraintExpressionString("unsatisfiable"));
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}, {"d1", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d2", Interval{0, 5}}});
ConstraintExpression constraints;
constraints.Or(std::move(conjunction_1));
constraints.Or(std::move(conjunction_2));
EXPECT_THAT(constraints, MatchConstraintExpressionString(
"d0 in [0, 6) && d1 in [0, 6) || d2 in [0, 6)"));
}
TEST_F(ConstraintExpressionTest,
ConjunctionOfConstraintsOnTheSameExpressionAreIntersected) {
ConstraintExpression constraints;
constraints.And(GetConjointConstraints({{"d0", Interval{0, 5}}}));
EXPECT_THAT(constraints, MatchConstraintExpressionString("d0 in [0, 6)"));
constraints.And(GetConjointConstraints({{"d0", Interval{3, 6}}}));
EXPECT_THAT(constraints, MatchConstraintExpressionString("d0 in [3, 6)"));
constraints.And(GetConjointConstraints({{"d0", Interval{7, 8}}}));
EXPECT_THAT(constraints, MatchConstraintExpressionString("unsatisfiable"));
}
TEST_F(ConstraintExpressionTest,
UnsatisfiableConstraintExpressionHoldsNoConstraint) {
ConstraintExpression unsatisfiable_constraint =
ConstraintExpression::GetUnsatisfiableConstraintExpression();
EXPECT_FALSE(unsatisfiable_constraint.is_satisfiable());
EXPECT_THAT(unsatisfiable_constraint.DisjointConjointConstraints(),
IsEmpty());
}
TEST_F(
ConstraintExpressionTest,
CanSuccessfullyPerformConjunctionOfConstraintExpressionWithConjointConstraints) {
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}, {"d1", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d2", Interval{0, 5}}});
ConstraintExpression constraints;
constraints.And(std::move(conjunction_1));
constraints.And(std::move(conjunction_2));
EXPECT_TRUE(constraints.is_satisfiable());
const auto& conjunctions = constraints.DisjointConjointConstraints();
EXPECT_THAT(conjunctions, SizeIs(1));
EXPECT_THAT(conjunctions.front(), SizeIs(3));
}
TEST_F(
ConstraintExpressionTest,
CorrectlyEliminatesConjunctionFromDisjunctionWhenItBecomesUnsatisfiable) {
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d1", Interval{0, 5}}});
ConstraintExpression constraints;
constraints.Or(std::move(conjunction_1));
constraints.Or(std::move(conjunction_2));
EXPECT_THAT(constraints,
MatchConstraintExpressionString("d0 in [0, 6) || d1 in [0, 6)"));
ConjointConstraints conjunction_3 =
GetConjointConstraints({{"d0", Interval{6, 6}}});
constraints.And(std::move(conjunction_3));
EXPECT_THAT(constraints,
MatchConstraintExpressionString("d0 in [6, 7) && d1 in [0, 6)"));
ConjointConstraints conjunction_4 =
GetConjointConstraints({{"d0", Interval{7, 7}}});
constraints.And(std::move(conjunction_4));
EXPECT_THAT(constraints, MatchConstraintExpressionString("unsatisfiable"));
}
TEST_F(
ConstraintExpressionTest,
CanSuccessfullyPerformDisjunctionOfConstraintExpressionWithConjointConstraints) {
ConjointConstraints conjunction_1 =
GetConjointConstraints({{"d0", Interval{0, 5}}, {"d1", Interval{0, 5}}});
ConjointConstraints conjunction_2 =
GetConjointConstraints({{"d2", Interval{0, 5}}});
ConstraintExpression constraints;
constraints.Or(std::move(conjunction_1));
constraints.Or(std::move(conjunction_2));
EXPECT_TRUE(constraints.is_satisfiable());
const auto& conjunctions = constraints.DisjointConjointConstraints(); | 2,154 |
#ifndef XLA_SERVICE_GPU_MODEL_TILED_HLO_COMPUTATION_H_
#define XLA_SERVICE_GPU_MODEL_TILED_HLO_COMPUTATION_H_
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "xla/iterator_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "tsl/lib/gtl/iterator_range.h"
namespace xla {
namespace gpu {
struct BlockLevelParameters {
std::vector<int64_t> output_tile_sizes;
int64_t num_warps = 1;
int num_ctas = 1;
int num_stages = 1;
static BlockLevelParameters FromBlockLevelFusionConfig(
const BlockLevelFusionConfig& config) {
return BlockLevelParameters{
std::vector<int64_t>(config.output_tile_sizes().begin(),
config.output_tile_sizes().end()),
config.num_warps()};
}
BlockLevelFusionConfig ToBlockLevelFusionConfig() const {
BlockLevelFusionConfig config;
config.mutable_output_tile_sizes()->Add(output_tile_sizes.begin(),
output_tile_sizes.end());
config.set_num_warps(num_warps);
return config;
}
};
class TiledHloComputation {
public:
static TiledHloComputation FromSortedTiledHloInstructions(
std::vector<std::unique_ptr<TiledHloInstruction>> instructions) {
return TiledHloComputation(std::move(instructions));
}
tsl::gtl::iterator_range<UnwrappingIterator<
std::vector<std::unique_ptr<TiledHloInstruction>>::const_iterator>>
instructions() const {
return {MakeUnwrappingIterator(instructions_.begin()),
MakeUnwrappingIterator(instructions_.end())};
}
const TiledHloInstruction* GetRoot() const {
return instructions_.back().get();
}
std::string ToString() const;
private:
explicit TiledHloComputation(
std::vector<std::unique_ptr<TiledHloInstruction>> instructions)
: instructions_(std::move(instructions)) {}
std::vector<std::unique_ptr<TiledHloInstruction>> instructions_;
};
}
}
#endif
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include <sstream>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/name_uniquer.h"
namespace xla {
namespace gpu {
std::string TiledHloComputation::ToString() const {
std::stringstream ss;
NameUniquer name_uniquer("_");
absl::flat_hash_map<const TiledHloInstruction*, std::string> tile_names;
for (const auto* tiled_hlo : instructions()) {
std::string tile_name = name_uniquer.GetUniqueName(
absl::StrCat(tiled_hlo->hlo()->name(), ".tile_0"));
tile_names[tiled_hlo] = tile_name;
absl::InlinedVector<std::string, 4> operand_names;
for (const auto& operand : tiled_hlo->operands()) {
operand_names.push_back(tile_names.at(operand));
}
ss << tile_name << " = " << HloOpcodeString(tiled_hlo->hlo()->opcode())
<< "(" << absl::StrJoin(operand_names, ", ") << ")\n";
ss << tiled_hlo->ToString() << "\n";
}
return ss.str();
}
}
} | #include "xla/service/gpu/model/tiled_hlo_computation.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "xla/service/gpu/backend_configs.pb.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
TEST(BlockLevelParametersTest,
BlockLevelParametersCanBeParsedFromBlockLevelFusionConfig) {
BlockLevelFusionConfig block_level_fusion_config;
block_level_fusion_config.mutable_output_tile_sizes()->Add(18);
block_level_fusion_config.mutable_output_tile_sizes()->Add(19);
block_level_fusion_config.set_num_warps(12);
BlockLevelParameters block_level_parameters =
BlockLevelParameters::FromBlockLevelFusionConfig(
block_level_fusion_config);
EXPECT_THAT(block_level_parameters.output_tile_sizes, ElementsAre(18, 19));
EXPECT_THAT(block_level_parameters.num_warps, 12);
}
TEST(BlockLevelParametersTest,
BlockLevelParametersCanBeConvertedToBlockLevelFusionConfig) {
BlockLevelParameters block_level_parameters;
block_level_parameters.output_tile_sizes = {18, 19};
block_level_parameters.num_warps = 12;
BlockLevelFusionConfig block_level_fusion_config =
block_level_parameters.ToBlockLevelFusionConfig();
EXPECT_THAT(block_level_fusion_config.output_tile_sizes(),
ElementsAre(18, 19));
EXPECT_THAT(block_level_fusion_config.num_warps(), 12);
}
}
}
} | 2,155 |
#ifndef XLA_SERVICE_GPU_MODEL_SYMBOLIC_TILE_ANALYSIS_H_
#define XLA_SERVICE_GPU_MODEL_SYMBOLIC_TILE_ANALYSIS_H_
#include <cstdint>
#include <functional>
#include <memory>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/instruction_fusion.h"
namespace xla {
namespace gpu {
class SymbolicTileAnalysis;
using SymbolicTileAnalysisOrError =
std::variant<SymbolicTileAnalysis, FusionDecision>;
class SymbolicTileAnalysis {
public:
using Tiling = absl::InlinedVector<int64_t, 4>;
static SymbolicTileAnalysisOrError AnalyzeComputation(
const HloComputation& computation, mlir::MLIRContext* ctx);
static SymbolicTileAnalysisOrError AnalyzeFusion(
const HloFusionAdaptor& fusion, mlir::MLIRContext* ctx);
absl::StatusOr<TiledHloComputation> ComputeTiledHloInstructions(
absl::Span<const int64_t> tile_parameters,
bool constraints_are_known_satisfied = false) const;
const SymbolicTiledHloInstruction* GetRoot() const {
return symbolic_tiled_hlo_instructions_.back().get();
}
const std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>&
GetSymbolicTiledHloComputation() const {
return symbolic_tiled_hlo_instructions_;
}
const ConstraintExpression& GetConstraints() const { return constraints_; }
absl::StatusOr<bool> ParametersSatisfyConstraints(
absl::Span<const int64_t> tile_parameters) const;
mlir::MLIRContext* GetMLIRContext() const { return context_; };
std::string ToString(
const AffineMapPrinter& printer = AffineMapPrinter()) const;
absl::StatusOr<std::vector<Tiling>> GetGoodTilings() const;
private:
SymbolicTileAnalysis(std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>
symbolic_tiled_hlo_instructions,
ConstraintExpression constraints,
mlir::MLIRContext* context)
: symbolic_tiled_hlo_instructions_(
std::move(symbolic_tiled_hlo_instructions)),
constraints_(std::move(constraints)),
context_(context) {}
std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>
symbolic_tiled_hlo_instructions_;
ConstraintExpression constraints_;
mlir::MLIRContext* context_;
};
namespace detail {
std::vector<SymbolicTileAnalysis::Tiling> GetGoodTilings(
absl::Span<const int64_t> dim_sizes,
std::function<bool(absl::Span<const int64_t>)> is_valid);
}
}
}
#endif
#include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <sstream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_analysis.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/instruction_fusion.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using ::mlir::AffineExpr;
using ::mlir::MLIRContext;
IndexingMap ComputeBlockIdToOutputTileIndexing(
absl::Span<const int64_t> dimensions, absl::Span<const int64_t> tile_sizes,
mlir::MLIRContext* mlir_context) {
CHECK_EQ(dimensions.size(), tile_sizes.size());
int64_t num_tiles = 1;
std::vector<int64_t> outer_loop_bounds;
outer_loop_bounds.reserve(dimensions.size());
for (auto [dim_size, tile_size] : llvm::zip(dimensions, tile_sizes)) {
int64_t num_tiles_per_dim = (dim_size + tile_size - 1) / tile_size;
num_tiles *= num_tiles_per_dim;
outer_loop_bounds.push_back(num_tiles_per_dim);
}
mlir::AffineExpr program_id = mlir::getAffineDimExpr(0, mlir_context);
auto tile_exprs =
DelinearizeIndex(outer_loop_bounds, program_id, mlir_context);
for (auto [tile_expr, tile_size] : llvm::zip(tile_exprs, tile_sizes)) {
tile_expr = tile_expr * tile_size;
}
return IndexingMap::FromTensorSizes(
mlir::AffineMap::get(
1, 0, tile_exprs, mlir_context),
{num_tiles}, {});
}
absl::StatusOr<IndexingMap> ComputeBlockIdToTileOffsetIndexing(
const SymbolicTiledHloInstruction& tiled_hlo,
const IndexingMap& block_id_to_root_tile_offset,
mlir::MLIRContext* mlir_context) {
IndexingMap block_id_to_tile_offset_indexing = ComposeIndexingMaps(
block_id_to_root_tile_offset, tiled_hlo.indexing_map());
if (absl::c_any_of(block_id_to_tile_offset_indexing.GetSymbolBounds(),
[](const Interval& symbol_bound) {
return symbol_bound.lower != 0;
})) {
return absl::FailedPreconditionError(
absl::StrCat("Symbol lower bound is not zero. ",
block_id_to_tile_offset_indexing.ToString()));
}
std::vector<AffineExpr> symbol_lower_bounds(
block_id_to_tile_offset_indexing.GetSymbolCount(),
mlir::getAffineConstantExpr(0, mlir_context));
mlir::AffineMap simplified_affine_map =
block_id_to_tile_offset_indexing.GetAffineMap().replaceDimsAndSymbols(
{}, symbol_lower_bounds,
block_id_to_tile_offset_indexing.GetDimVarsCount(),
block_id_to_tile_offset_indexing.GetRangeVarsCount());
IndexingMap simplified_indexing_map = IndexingMap{
simplified_affine_map, block_id_to_tile_offset_indexing.GetDimVars(),
block_id_to_tile_offset_indexing.GetRangeVars(),
block_id_to_tile_offset_indexing.GetRTVars()};
simplified_indexing_map.Simplify();
simplified_indexing_map.RescaleSymbols();
simplified_indexing_map.RemoveUnusedSymbols();
return simplified_indexing_map;
}
}
SymbolicTileAnalysisOrError SymbolicTileAnalysis::AnalyzeComputation(
const HloComputation& computation, MLIRContext* ctx) {
auto fusion = HloFusionAdaptor::ForComputation(&computation);
return SymbolicTileAnalysis::AnalyzeFusion(*fusion, ctx);
}
SymbolicTileAnalysisOrError SymbolicTileAnalysis::AnalyzeFusion(
const HloFusionAdaptor& fusion, MLIRContext* ctx) {
std::vector<std::unique_ptr<SymbolicTiledHloInstruction>>
tiled_hlo_instructions;
absl::flat_hash_map<std::pair<const HloInstruction*, IndexingMap>,
SymbolicTiledHloInstruction*>
tiled_hlo_instructions_map;
absl::flat_hash_map<SymbolicTiledHloInstruction*, int64_t> topological_order;
std::function<std::variant<SymbolicTiledHloInstruction*, FusionDecision>(
const HloInstructionAdaptor&, IndexingMap)>
get_tiled_hlo_instruction;
ConstraintExpression constraints;
get_tiled_hlo_instruction =
[&](const HloInstructionAdaptor& instruction_adaptor,
IndexingMap indexing_map)
-> std::variant<SymbolicTiledHloInstruction*, FusionDecision> {
const HloInstruction* hlo = &instruction_adaptor.instruction();
auto key = std::make_pair(hlo, indexing_map);
auto it = tiled_hlo_instructions_map.find(key);
if (it != tiled_hlo_instructions_map.end()) {
return it->second;
}
if (hlo->opcode() == HloOpcode::kDot ||
hlo->opcode() == HloOpcode::kConcatenate) {
return FusionDecision{} << "Bailing out on " << hlo->ToString();
}
if (!hlo->shape().IsArray()) {
return FusionDecision{} << hlo->ToString()
<< " outputs more than a single array";
}
auto symbolic_tile = SymbolicTile::FromIndexingMap(indexing_map);
if (!symbolic_tile.has_value()) {
return FusionDecision{} << "Failed to compute symbolic tile for "
<< indexing_map.ToString() << " for HLO "
<< hlo->ToString();
}
if (!symbolic_tile->is_satisfiable()) {
return FusionDecision{} << "Symbolic tile " << symbolic_tile->ToString()
<< " is not satisfiable for "
<< indexing_map.ToString() << " for HLO "
<< hlo->ToString();
}
constraints = ConstraintExpression::And(std::move(constraints),
symbolic_tile->constraints());
if (!constraints.is_satisfiable()) {
return FusionDecision{} << "Fusion has unsatisfiable constraints";
}
tiled_hlo_instructions.push_back(
std::make_unique<SymbolicTiledHloInstruction>(
hlo, std::move(indexing_map), std::move(*symbolic_tile)));
auto tiled_hlo_instruction = tiled_hlo_instructions.back().get();
std::optional<HloInstructionIndexing> operands_indexing =
ComputeOutputToInputIndexing(hlo, 0, ctx);
if (!operands_indexing.has_value()) {
return FusionDecision{} << "Failed to compute operands indexing for "
<< tiled_hlo_instruction->hlo()->ToString();
}
if (fusion.ContainsInstruction(instruction_adaptor)) {
for (auto [operand, operand_indexing_map_set] :
llvm::zip(instruction_adaptor.GetOperands(),
operands_indexing->indexing_maps)) {
CHECK_EQ(operand_indexing_map_set.size(), 1);
IndexingMap operand_indexing_map =
ComposeIndexingMaps(tiled_hlo_instruction->indexing_map(),
*operand_indexing_map_set.begin());
if (operand_indexing_map.IsUndefined()) {
return FusionDecision{}
<< "Couldn't derive indexing map for instruction "
<< tiled_hlo_instruction->hlo()->ToString() << " and operand "
<< operand.instruction().ToString();
}
operand_indexing_map.Simplify();
operand_indexing_map.RescaleSymbols();
operand_indexing_map.RemoveUnusedSymbols();
auto tiled_operand_or =
get_tiled_hlo_instruction(operand, std::move(operand_indexing_map));
if (auto fusion_decison =
std::get_if<FusionDecision>(&tiled_operand_or)) {
return *fusion_decison;
}
tiled_hlo_instruction->AppendOperand(
std::get<SymbolicTiledHloInstruction*>(tiled_operand_or));
}
}
topological_order[tiled_hlo_instruction] = topological_order.size();
tiled_hlo_instructions_map.emplace(key, tiled_hlo_instruction);
return tiled_hlo_instruction;
};
auto roots = fusion.GetRoots();
if (roots.size() > 1) {
return FusionDecision{} << "Multi-output fusions are not supported. "
<< fusion.ToString();
}
auto& root = roots[0];
auto tiled_root =
get_tiled_hlo_instruction(root, CreateIdentityMap(root.shape(), ctx));
if (auto* fusion_decision = std::get_if<FusionDecision>(&tiled_root)) {
return *fusion_decision;
}
absl::c_sort(tiled_hlo_instructions, [&](const auto& i1, const auto& i2) {
return topological_order.at(i1.get()) < topological_order.at(i2.get());
});
return SymbolicTileAnalysis(std::move(tiled_hlo_instructions),
std::move(constraints), ctx);
}
absl::StatusOr<bool> SymbolicTileAnalysis::ParametersSatisfyConstraints(
absl::Span<const int64_t> tile_parameters) const {
if (!constraints_.is_satisfiable()) {
return absl::FailedPreconditionError(
"SymbolicTileAnalysis's constraints are not satisfiable. "
"This should never happen.");
}
if (constraints_.IsAlwaysSatisfied()) {
return true;
}
llvm::SmallVector<AffineExpr> parameters = llvm::to_vector(
llvm::map_range(tile_parameters, [this](const int64_t v) -> AffineExpr {
return mlir::getAffineConstantExpr(v, context_);
}));
bool constraints_are_satisfied = false;
for (const ConstraintExpression::ConjointConstraints& conjunction :
constraints_.DisjointConjointConstraints()) {
bool conjunction_is_satisfied = true;
for (const auto& [constrained_expr, interval] : conjunction) {
AffineExpr constrained_expr_value =
constrained_expr.replaceSymbols(parameters);
if (constrained_expr_value.getKind() != mlir::AffineExprKind::Constant) {
return absl::InvalidArgumentError(absl::StrCat(
"Failed to reduce ", AffineMapPrinter().ToString(constrained_expr),
" to a constant with tile parameters ",
absl::StrJoin(tile_parameters, ", ")));
}
int64_t constrained_value =
llvm::cast<mlir::AffineConstantExpr>(constrained_expr_value)
.getValue();
if (constrained_value < interval.lower ||
constrained_value > interval.upper) {
conjunction_is_satisfied = false;
break;
}
}
constraints_are_satisfied |= conjunction_is_satisfied;
}
return constraints_are_satisfied;
}
absl::StatusOr<TiledHloComputation>
SymbolicTileAnalysis::ComputeTiledHloInstructions(
absl::Span<const int64_t> tile_parameters,
bool constraints_are_known_satisfied) const {
if (!constraints_are_known_satisfied) {
TF_ASSIGN_OR_RETURN(bool constraints_are_satisfied,
ParametersSatisfyConstraints(tile_parameters));
if (!constraints_are_satisfied) {
return absl::InvalidArgumentError(absl::StrCat(
"Tile parameters ", absl::StrJoin(tile_parameters, ", "),
" do not satisfy the SymbolicTileAnalysis's constraints."));
}
}
IndexingMap block_id_to_root_tile_offset = ComputeBlockIdToOutputTileIndexing(
GetRoot()->hlo()->shape().dimensions(), tile_parameters, context_);
std::vector<std::unique_ptr<TiledHloInstruction>> tiled_hlo_instructions;
absl::flat_hash_map<const SymbolicTiledHloInstruction*, TiledHloInstruction*>
symbolic_to_tiled_hlo_map;
absl::flat_hash_set<TiledHloInstruction*, TiledHloInstruction::PtrHash,
TiledHloInstruction::PtrEqual>
tiled_hlo_instructions_set;
absl::flat_hash_map<TiledHloInstruction*, int64_t> topological_order;
std::function<absl::StatusOr<TiledHloInstruction*>(
const SymbolicTiledHloInstruction*)>
get_tiled_hlo_instruction;
get_tiled_hlo_instruction =
[&](const SymbolicTiledHloInstruction* symbolic_tiled_hlo)
-> absl::StatusOr<TiledHloInstruction*> {
auto it1 = symbolic_to_tiled_hlo_map.find(symbolic_tiled_hlo);
if (it1 != symbolic_to_tiled_hlo_map.end()) {
return it1->second;
}
std::vector<int64_t> tile_sizes =
symbolic_tiled_hlo->TileSizes(tile_parameters);
std::vector<int64_t> tile_strides =
symbolic_tiled_hlo->TileStrides(tile_parameters);
TF_ASSIGN_OR_RETURN(
IndexingMap block_id_to_block_offset_indexing,
ComputeBlockIdToTileOffsetIndexing(
*symbolic_tiled_hlo, block_id_to_root_tile_offset, context_));
TF_ASSIGN_OR_RETURN(std::unique_ptr<TiledHloInstruction> tiled_hlo_holder,
TiledHloInstruction::Create(
symbolic_tiled_hlo->hlo(), std::move(tile_sizes),
std::move(tile_strides),
std::move(block_id_to_block_offset_indexing)));
auto it2 = tiled_hlo_instructions_set.find(tiled_hlo_holder.get());
if (it2 != tiled_hlo_instructions_set.end()) {
return *it2;
}
tiled_hlo_instructions.push_back(std::move(tiled_hlo_holder));
TiledHloInstruction* tiled_hlo = tiled_hlo_instructions.back().get();
tiled_hlo_instructions_set.insert(tiled_hlo);
symbolic_to_tiled_hlo_map[symbolic_tiled_hlo] = tiled_hlo;
for (SymbolicTiledHloInstruction* operand :
symbolic_tiled_hlo->operands()) {
TF_ASSIGN_OR_RETURN(TiledHloInstruction * tiled_operand,
get_tiled_hlo_instruction(operand));
tiled_hlo->AppendOperand(tiled_operand);
}
topological_order[tiled_hlo] = topological_order.size();
return tiled_hlo;
};
TF_CHECK_OK(get_tiled_hlo_instruction(GetRoot()).status());
absl::c_sort(tiled_hlo_instructions, [&](const auto& i1, const auto& i2) {
return topological_order.at(i1.get()) < topological_order.at(i2.get());
});
return TiledHloComputation::FromSortedTiledHloInstructions(
std::move(tiled_hlo_instructions));
}
std::string SymbolicTileAnalysis::ToString(
const AffineMapPrinter& printer) const {
std::stringstream ss;
NameUniquer name_uniquer("_");
absl::flat_hash_map<SymbolicTiledHloInstruction*, std::string> tile_names;
for (const auto& tiled_hlo : symbolic_tiled_hlo_instructions_) {
std::string tile_name = name_uniquer.GetUniqueName(
absl::StrCat(tiled_hlo->hlo()->name(), ".tile_0"));
tile_names[tiled_hlo.get()] = tile_name;
absl::InlinedVector<std::string, 4> operand_names;
for (const auto& operand : tiled_hlo->operands()) {
operand_names.push_back(tile_names.at(operand));
}
ss << tile_name << " = " << HloOpcodeString(tiled_hlo->hlo()->opcode())
<< "(" << absl::StrJoin(operand_names, ", ") << ")\n";
ss << tiled_hlo->ToString();
}
return ss.str();
}
namespace {
std::vector<int64_t> PossibleTileSizesForOneDimension(int64_t dim_size) {
CHECK_GE(dim_size, 1);
std::vector<int64_t> result;
result.reserve(absl::bit_width(static_cast<uint64_t>(dim_size)));
for (int64_t tile_size = 1; tile_size < dim_size; tile_size *= 2) {
result.push_back(tile_size);
}
result.push_back(dim_size);
return result;
}
}
namespace detail {
std::vector<SymbolicTileAnalysis::Tiling> GetGoodTilings(
absl::Span<const int64_t> dim_sizes,
std::function<bool(absl::Span<const int64_t>)> is_valid) {
CHECK(is_valid != nullptr);
std::vector<SymbolicTileAnalysis::Tiling> tilings;
tilings.push_back({});
for (int dim_size : dim_sizes) {
std::vector<int64_t> possible_tile_sizes =
PossibleTileSizesForOneDimension(dim_size);
std::vector<SymbolicTileAnalysis::Tiling> extended_tilings;
extended_tilings.reserve(tilings.size() * possible_tile_sizes.size());
for (const SymbolicTileAnalysis::Tiling& tiling : tilings) {
for (int64_t tile_size : possible_tile_sizes) {
SymbolicTileAnalysis::Tiling extended_tiling = tiling;
extended_tiling.push_back(tile_size);
extended_tilings.push_back(extended_tiling);
}
}
tilings = std::move(extended_tilings);
}
tilings.erase(
std::remove_if(tilings.begin(), tilings.end(), std::not_fn(is_valid)),
tilings.end());
return tilings;
}
}
absl::StatusOr<std::vector<SymbolicTileAnalysis::Tiling>>
SymbolicTileAnalysis::GetGoodTilings() const {
TF_RET_CHECK(!symbolic_tiled_hlo_instructions_.empty());
TF_RET_CHECK(symbolic_tiled_hlo_instructions_.back() != nullptr);
const SymbolicTiledHloInstruction& instr =
*symbolic_tiled_hlo_instructions_.back();
TF_RET_CHECK(instr.hlo() != nullptr);
const Shape& shape = instr.hlo()->shape();
if (!absl::c_all_of(shape.dimensions(),
[](int64_t dim_size) { return dim_size >= 1; })) {
return absl::InvalidArgumentError(absl::StrFormat(
"Shape %s has zero or negative dimensions.", shape.ToString()));
}
absl::Status status = absl::OkStatus();
std::vector<SymbolicTileAnalysis::Tiling> result = detail::GetGoodTilings(
shape.dimensions(), [&](absl::Span<const int64_t> tile_sizes) {
absl::StatusOr<bool> is_valid =
ParametersSatisfyConstraints(tile_sizes);
if (!is_valid.ok()) {
status = is_valid.status();
return false;
}
return is_valid.value();
});
if (status.ok()) {
return result;
}
return status;
}
}
} | #include "xla/service/gpu/model/symbolic_tile_analysis.h"
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/service/gpu/model/symbolic_tile.h"
#include "xla/service/gpu/model/tiled_hlo_computation.h"
#include "xla/service/gpu/model/tiled_hlo_instruction.h"
#include "xla/service/instruction_fusion.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/verified_hlo_module.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
using detail::GetGoodTilings;
using ::testing::ElementsAreArray;
using ::testing::ExplainMatchResult;
using ::testing::IsEmpty;
using ::testing::Matcher;
using ::testing::Not;
using ::testing::SizeIs;
using ::testing::status::IsOkAndHolds;
using ::testing::status::StatusIs;
using TilingVector = std::vector<SymbolicTileAnalysis::Tiling>;
MATCHER_P3(MatchTiledHloInstructionImpl, tile_sizes, tile_strides,
block_id_to_tile_offsets_indexing, "") {
return ExplainMatchResult(ElementsAreArray(tile_sizes), arg.tile_sizes(),
result_listener) &&
ExplainMatchResult(ElementsAreArray(tile_strides), arg.tile_strides(),
result_listener) &&
ExplainMatchResult(MatchIndexingMap(block_id_to_tile_offsets_indexing),
arg.block_id_to_tile_offsets_indexing(),
result_listener);
}
Matcher<const TiledHloInstruction> MatchTiledHloInstruction(
absl::Span<const int64_t> tile_sizes,
absl::Span<const int64_t> tile_strides,
absl::string_view block_id_to_tile_offsets_indexing) {
return MatchTiledHloInstructionImpl(tile_sizes, tile_strides,
block_id_to_tile_offsets_indexing);
}
class SymbolicTileAnalysisTest : public HloTestBase {
public:
std::optional<SymbolicTileAnalysis> TryAnalyzeModule(HloModule* module) {
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeComputation(
*module->entry_computation()
->root_instruction()
->fused_instructions_computation(),
&mlir_context_);
if (std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error)) {
return std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
}
VLOG(1) << "Cannot analyze module: "
<< std::get<FusionDecision>(analysis_or_error).Explain();
return std::nullopt;
}
mlir::MLIRContext mlir_context_;
};
TEST_F(SymbolicTileAnalysisTest, SimpleNormalizationDiamondIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
max {
p1 = f32[] parameter(1)
p0 = f32[] parameter(0)
ROOT m = f32[] maximum(p0, p1)
}
fusion {
p0 = f32[2,97]{1,0} parameter(0)
constant = f32[] constant(-inf)
reduce = f32[2] reduce(p0, constant), dimensions={1}, to_apply=max
broadcast = f32[2,97]{1,0} broadcast(reduce), dimensions={0}
ROOT subtract = f32[2,97]{1,0} subtract(p0, broadcast)
}
ENTRY main {
p0 = f32[2,97]{1,0} parameter(0)
ROOT fusion = f32[2,97]{1,0} fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions({1, 10}));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
EXPECT_THAT(root->block_id_to_tile_offsets_indexing(), MatchIndexingMap(R"(
(d0) -> (d0 floordiv 10, (d0 mod 10) * 10)
domain:
d0 in [0, 20)
)"));
auto p0_from_subtract0 = root->operand(0);
auto p0_from_subtract1 = root->operand(1)->operand(0)->operand(0);
EXPECT_THAT(*p0_from_subtract0, MatchTiledHloInstruction(
{1, 10},
{1, 1},
R"(
(d0) -> (d0 floordiv 10, (d0 mod 10) * 10)
domain:
d0 in [0, 20)
)"));
EXPECT_THAT(*p0_from_subtract1, MatchTiledHloInstruction(
{1, 97},
{1, 1},
R"(
(d0) -> (d0 floordiv 10, 0)
domain:
d0 in [0, 20)
)"));
}
TEST_F(SymbolicTileAnalysisTest, ElementwiseDiamondCSEIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[2,97] parameter(0)
exp = f32[2,97] exponential(p0)
log = f32[2,97] log(p0)
ROOT subtract = f32[2,97] subtract(exp, log)
}
ENTRY main {
p0 = f32[2,97] parameter(0)
ROOT fusion = f32[2,97] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions({1, 10}));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
auto p0_from_subtract0 = root->operand(0)->operand(0);
auto p0_from_subtract1 = root->operand(1)->operand(0);
EXPECT_EQ(p0_from_subtract0, p0_from_subtract1);
}
TEST_F(SymbolicTileAnalysisTest, ProducerConsumerFusionIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT m = f32[] maximum(p0, p1)
}
fusion.1 {
p0 = f32[2,97] parameter(0)
constant = f32[] constant(-inf)
exp = f32[2,97] exponential(p0)
ROOT reduce = f32[2] reduce(exp, constant), dimensions={1}, to_apply=max
}
fusion.2 {
p0 = f32[2] parameter(0)
p1 = f32[2,97] parameter(1)
broadcast = f32[2,97]{1,0} broadcast(p0), dimensions={0}
ROOT subtract = f32[2,97] subtract(p1, broadcast)
}
ENTRY main {
p0 = f32[2,97] parameter(0)
producer = f32[2] fusion(p0), kind=kLoop, calls=fusion.1
ROOT consumer = f32[2,97] fusion(producer, p0), kind=kLoop, calls=fusion.2
})"));
const auto* consumer = module->entry_computation()->root_instruction();
const auto* producer = consumer->operand(0);
auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer);
SymbolicTileAnalysisOrError analysis_or_error =
SymbolicTileAnalysis::AnalyzeFusion(*fusion, &mlir_context_);
ASSERT_TRUE(std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error));
SymbolicTileAnalysis analysis =
std::get<SymbolicTileAnalysis>(std::move(analysis_or_error));
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis.ComputeTiledHloInstructions({1, 97}));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
const TiledHloInstruction* p0_from_producer =
root->operand(1)->operand(0)->operand(0)->operand(0);
const TiledHloInstruction* p0_from_consumer = root->operand(0);
EXPECT_EQ(p0_from_producer, p0_from_consumer);
EXPECT_THAT(*p0_from_producer,
MatchTiledHloInstruction(
{1, 97}, {1, 1},
R"(
(d0) -> (d0, 0)
domain: d0 in [0, 2)
)"));
}
TEST_F(SymbolicTileAnalysisTest, TransposeOffsetIndexingIsCorrect) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[8,16,4] parameter(0)
ROOT transpose = f32[4,8,16] transpose(p0), dimensions={2,0,1}
}
ENTRY main {
p0 = f32[8,16,4] parameter(0)
ROOT fusion = f32[4,8,16] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions({2, 4, 2}));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
EXPECT_THAT(*root, MatchTiledHloInstruction(
{2, 4, 2}, {1, 1, 1},
R"(
(d0) -> ((d0 floordiv 16) * 2, ((d0 floordiv 8) mod 2) * 4, (d0 mod 8) * 2)
domain:
d0 in [0, 32)
)"));
EXPECT_THAT(*root->operand(0),
MatchTiledHloInstruction(
{4, 2, 2}, {1, 1, 1},
R"(
(d0) -> (((d0 floordiv 8) mod 2) * 4, (d0 mod 8) * 2, (d0 floordiv 16) * 2)
domain:
d0 in [0, 32)
)"));
}
TEST_F(SymbolicTileAnalysisTest, SliceOffsetIndexingIsCorrect) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[8,16] parameter(0)
slice.0 = f32[4,8] slice(p0), slice={[0:4], [2:10]}
slice.1 = f32[4,8] slice(p0), slice={[3:7], [4:12]}
ROOT add = f32[4,8] add(slice.0, slice.1)
}
ENTRY main {
p0 = f32[8,16] parameter(0)
ROOT fusion = f32[4,8] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions({2, 2}));
const TiledHloInstruction* root = tiled_hlo_computation.GetRoot();
const TiledHloInstruction* p0_from_slice0 = root->operand(0)->operand(0);
const TiledHloInstruction* p0_from_slice1 = root->operand(1)->operand(0);
EXPECT_THAT(*root, MatchTiledHloInstruction(
{2, 2}, {1, 1},
R"(
(d0) -> ((d0 floordiv 4) * 2, (d0 mod 4) * 2)
domain:
d0 in [0, 8)
)"));
EXPECT_THAT(*p0_from_slice0,
MatchTiledHloInstruction(
{2, 2}, {1, 1},
R"(
(d0) -> ((d0 floordiv 4) * 2, (d0 mod 4) * 2 + 2)
domain:
d0 in [0, 8)
)"));
EXPECT_THAT(*p0_from_slice1,
MatchTiledHloInstruction(
{2, 2}, {1, 1},
R"(
(d0) -> ((d0 floordiv 4) * 2 + 3, (d0 mod 4) * 2 + 4)
domain:
d0 in [0, 8)
)"));
}
TEST_F(SymbolicTileAnalysisTest, BailOutOnUnsupportedDot) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,2]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
ROOT dot = f32[1,3]{1,0} dot(p0, p1),
lhs_batch_dims={}, rhs_batch_dims={},
lhs_contracting_dims={1}, rhs_contracting_dims={0}
}
ENTRY main {
p0 = f32[1,2]{1,0} parameter(0)
p1 = f32[2,3]{1,0} parameter(1)
ROOT fusion = f32[1,3]{1,0} fusion(p0, p1), kind=kLoop, calls=fusion
})"));
EXPECT_FALSE(TryAnalyzeModule(module.get()).has_value());
}
TEST_F(SymbolicTileAnalysisTest, DoesNotBailOutOnConstrainedReshape) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[4,2]{1,0} parameter(0)
ROOT reshape = f32[8] reshape(p0)
}
ENTRY main {
p0 = f32[4,2]{1,0} parameter(0)
ROOT fusion = f32[8] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const ConstraintExpression& constraints = analysis->GetConstraints();
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(2));
EXPECT_THAT(constraints.DisjointConjointConstraints().front(), SizeIs(1));
}
TEST_F(SymbolicTileAnalysisTest, DoesNotBailOutOnConstrainedBitcast) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[4,2]{1,0} parameter(0)
ROOT bitcast = f32[8] bitcast(p0)
}
ENTRY main {
p0 = f32[4,2]{1,0} parameter(0)
ROOT fusion = f32[8] fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const ConstraintExpression& constraints = analysis->GetConstraints();
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(2));
EXPECT_THAT(constraints.DisjointConjointConstraints().front(), SizeIs(1));
}
TEST_F(SymbolicTileAnalysisTest, BailOutOnUnsupportedConcatenate) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,3]{1,0} parameter(0)
p1 = f32[1,3]{1,0} parameter(1)
ROOT concatenate = f32[2,3] concatenate(p0, p1), dimensions={0}
}
ENTRY main {
p0 = f32[1,3]{1,0} parameter(0)
p1 = f32[1,3]{1,0} parameter(1)
ROOT fusion = f32[2,3] fusion(p0, p1), kind=kLoop, calls=fusion
})"));
EXPECT_FALSE(TryAnalyzeModule(module.get()).has_value());
}
TEST_F(SymbolicTileAnalysisTest, MultiOutputFusionIsNotSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[32] parameter(0)
p1 = f32[32] parameter(1)
add = f32[32] add(p0, p1)
subtract = f32[32] subtract(p0, p1)
ROOT tuple = (f32[32], f32[32]) tuple(add, subtract)
}
ENTRY main {
p0 = f32[32] parameter(0)
p1 = f32[32] parameter(1)
ROOT fusion = (f32[32], f32[32]) fusion(p0, p1), kind=kLoop, calls=fusion
})"));
EXPECT_FALSE(TryAnalyzeModule(module.get()).has_value());
}
TEST_F(SymbolicTileAnalysisTest, ConstraintSatisfactionIsEvaluatedCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,8,6,4,8]{4,3,2,1,0} parameter(0)
ROOT bitcast = f32[48,32]{1,0} bitcast(p0)
}
ENTRY main {
p0 = f32[1,8,6,4,8]{4,3,2,1,0} parameter(0)
ROOT fusion = f32[48,32]{1,0} fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const ConstraintExpression& constraints = analysis->GetConstraints();
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(4));
for (const ConstraintExpression::ConjointConstraints& conjunction :
constraints.DisjointConjointConstraints())
EXPECT_THAT(conjunction, SizeIs(2));
std::vector<int64_t> possible_tile_parameters({6, 8});
EXPECT_THAT(analysis->ParametersSatisfyConstraints(possible_tile_parameters),
IsOkAndHolds(true));
std::vector<int64_t> impossible_tile_parameters({6, 7});
EXPECT_THAT(
analysis->ParametersSatisfyConstraints(impossible_tile_parameters),
IsOkAndHolds(false));
EXPECT_THAT(analysis->ParametersSatisfyConstraints({6}),
StatusIs(absl::StatusCode::kInvalidArgument));
TF_EXPECT_OK(
analysis->ParametersSatisfyConstraints(possible_tile_parameters));
EXPECT_THAT(analysis->ComputeTiledHloInstructions(impossible_tile_parameters),
StatusIs(absl::StatusCode::kInvalidArgument));
TF_EXPECT_OK(analysis->ComputeTiledHloInstructions(
impossible_tile_parameters, true));
}
TEST_F(SymbolicTileAnalysisTest, ConstraintsAreAggregatedCorrectly) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,48,4,8]{3,2,1,0} parameter(0)
p1 = f32[1,8,6,32]{3,2,1,0} parameter(1)
bitcast_p0 = f32[48,32]{1,0} bitcast(p0)
bitcast_p1 = f32[48,32]{1,0} bitcast(p1)
ROOT add = f32[48,32]{1,0} add(bitcast_p0, bitcast_p1)
}
ENTRY main {
p0 = f32[1,48,4,8]{3,2,1,0} parameter(0)
p1 = f32[1,8,6,32]{3,2,1,0} parameter(1)
ROOT fusion = f32[48,32]{1,0} fusion(p0, p1), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
const ConstraintExpression& constraints = analysis->GetConstraints();
EXPECT_THAT(constraints.DisjointConjointConstraints(), SizeIs(4));
EXPECT_THAT(constraints.DisjointConjointConstraints().front(), SizeIs(2));
}
bool AlwaysValid(absl::Span<const int64_t>) { return true; }
TEST(GetGoodTilingsTest, ReturnsOneTilingWhenRankIsZero) {
EXPECT_EQ(GetGoodTilings({}, AlwaysValid),
TilingVector{SymbolicTileAnalysis::Tiling{}});
}
TEST(GetGoodTilingsTest, ReturnsPowersOfTwoAndTheDimSizeForRankOne) {
EXPECT_EQ(GetGoodTilings({1}, AlwaysValid), TilingVector{{1}});
EXPECT_EQ(GetGoodTilings({2}, AlwaysValid), TilingVector({{1}, {2}}));
EXPECT_EQ(GetGoodTilings({3}, AlwaysValid), TilingVector({{1}, {2}, {3}}));
EXPECT_EQ(GetGoodTilings({4}, AlwaysValid), TilingVector({{1}, {2}, {4}}));
EXPECT_EQ(GetGoodTilings({5}, AlwaysValid),
TilingVector({{1}, {2}, {4}, {5}}));
EXPECT_EQ(GetGoodTilings({11}, AlwaysValid),
TilingVector({{1}, {2}, {4}, {8}, {11}}));
}
TEST(GetGoodTilingsTest, CreatesCartesianProductForRankTwo) {
EXPECT_EQ(GetGoodTilings({3, 4}, AlwaysValid), TilingVector({{1, 1},
{1, 2},
{1, 4},
{2, 1},
{2, 2},
{2, 4},
{3, 1},
{3, 2},
{3, 4}}));
}
TEST(GetGoodTilingsTest, CreatesCartesianProductForRankThree) {
EXPECT_EQ(GetGoodTilings({3, 4, 2}, AlwaysValid), TilingVector({{1, 1, 1},
{1, 1, 2},
{1, 2, 1},
{1, 2, 2},
{1, 4, 1},
{1, 4, 2},
{2, 1, 1},
{2, 1, 2},
{2, 2, 1},
{2, 2, 2},
{2, 4, 1},
{2, 4, 2},
{3, 1, 1},
{3, 1, 2},
{3, 2, 1},
{3, 2, 2},
{3, 4, 1},
{3, 4, 2}}));
}
TEST(GetGoodTilingsTest, FiltersTheTilingsUsingThePredicate) {
auto all_even = [](absl::Span<const int64_t> tile_sizes) {
return absl::c_all_of(tile_sizes,
[](int64_t tile_size) { return tile_size % 2 == 0; });
};
EXPECT_EQ(GetGoodTilings({3, 4}, all_even), TilingVector({{2, 2}, {2, 4}}));
auto all_equal = [](absl::Span<const int64_t> tile_sizes) {
return absl::c_all_of(tile_sizes, [&](int64_t tile_size) {
return tile_size == tile_sizes.at(0);
});
};
EXPECT_EQ(GetGoodTilings({3, 3, 3}, all_equal),
TilingVector({{1, 1, 1}, {2, 2, 2}, {3, 3, 3}}));
}
TEST_F(SymbolicTileAnalysisTest,
GetGoodTilingsWorksTakingConstraintsIntoAccount) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
fusion {
p0 = f32[1,8,6,1]{3,2,1,0} parameter(0)
ROOT bitcast = f32[48,1]{1,0} bitcast(p0)
}
ENTRY main {
p0 = f32[1,8,6,1]{3,2,1,0} parameter(0)
ROOT fusion = f32[48,1]{1,0} fusion(p0), kind=kLoop, calls=fusion
})"));
std::optional<SymbolicTileAnalysis> opt_analysis =
TryAnalyzeModule(module.get());
ASSERT_TRUE(opt_analysis.has_value());
const SymbolicTileAnalysis& analysis = opt_analysis.value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<SymbolicTileAnalysis::Tiling> good_tilings,
analysis.GetGoodTilings());
EXPECT_EQ(good_tilings, std::vector<SymbolicTileAnalysis::Tiling>(
{{1, 1}, {2, 1}, {48, 1}}));
}
void LogTilingsIfVlog1(absl::Span<const SymbolicTileAnalysis::Tiling> tilings) {
if (VLOG_IS_ON(1)) {
LOG(INFO) << "Tilings: {";
for (const SymbolicTileAnalysis::Tiling& tiling : tilings) {
LOG(INFO) << "{" << absl::StrJoin(tiling, ",") << "},";
}
LOG(INFO) << "}";
}
}
TEST_F(SymbolicTileAnalysisTest, GetGoodTilingsWorksForSoftmaxExample) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(param_0, param_1)
}
add_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add = f32[] add(param_0, param_1)
}
fused_computation {
param_0 = f32[8192,50304] parameter(0)
bitcast = f32[4,2048,50304] bitcast(param_0)
constant = f32[] constant(-inf)
reduce = f32[8192] reduce(param_0, constant), dimensions={1}, to_apply=max_computation
bitcast.1 = f32[4,2048] bitcast(reduce)
broadcast = f32[4,2048,50304] broadcast(bitcast.1), dimensions={0,1}
subtract = f32[4,2048,50304] subtract(bitcast, broadcast)
exponential = f32[4,2048,50304] exponential(subtract)
constant.1 = f32[] constant(0)
reduce.1 = f32[4,2048] reduce(exponential, constant.1), dimensions={2}, to_apply=add_computation
log = f32[4,2048] log(reduce.1)
broadcast.1 = f32[4,2048,50304] broadcast(log), dimensions={0,1}
ROOT subtract.1 = f32[4,2048,50304] subtract(subtract, broadcast.1)
}
ENTRY entry_computation {
param_0 = f32[8192,50304] parameter(0)
ROOT fusion = f32[4,2048,50304] fusion(param_0), kind=kCustom, calls=fused_computation, backend_config={"fusion_backend_config":{"kind":"__triton"}}
}
)"));
std::optional<SymbolicTileAnalysis> opt_analysis =
TryAnalyzeModule(module.get());
ASSERT_TRUE(opt_analysis.has_value());
const SymbolicTileAnalysis& analysis = opt_analysis.value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<SymbolicTileAnalysis::Tiling> good_tilings,
analysis.GetGoodTilings());
EXPECT_THAT(good_tilings, Not(IsEmpty()));
LogTilingsIfVlog1(good_tilings);
}
TEST_F(SymbolicTileAnalysisTest,
GetGoodTilingsWorksForSoftmaxAndReduceExample) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule m
max_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT maximum = f32[] maximum(param_0, param_1)
}
add_computation {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
ROOT add = f32[] add(param_0, param_1)
}
fused_computation {
param_0 = f32[8192,50304] parameter(0)
param_1 = s32[4,2048] parameter(1)
broadcast = s32[4,2048,50304] broadcast(param_1), dimensions={0,1}
iota = s32[4,2048,50304] iota(), iota_dimension=2
compare = pred[4,2048,50304] compare(broadcast, iota), direction=EQ
bitcast = f32[4,2048,50304] bitcast(param_0)
constant = f32[] constant(-inf)
reduce = f32[8192] reduce(param_0, constant), dimensions={1}, to_apply=max_computation
bitcast.1 = f32[4,2048] bitcast(reduce)
broadcast.1 = f32[4,2048,50304] broadcast(bitcast.1), dimensions={0,1}
subtract = f32[4,2048,50304] subtract(bitcast, broadcast.1)
exponential = f32[4,2048,50304] exponential(subtract)
constant.1 = f32[] constant(0)
reduce.1 = f32[4,2048] reduce(exponential, constant.1), dimensions={2}, to_apply=add_computation
log = f32[4,2048] log(reduce.1)
broadcast.2 = f32[4,2048,50304] broadcast(log), dimensions={0,1}
subtract.1 = f32[4,2048,50304] subtract(subtract, broadcast.2)
constant.2 = f32[] constant(0)
broadcast.3 = f32[4,2048,50304] broadcast(constant.2), dimensions={}
select = f32[4,2048,50304] select(compare, subtract.1, broadcast.3)
bitcast.2 = f32[4,2048,393,128] bitcast(select)
ROOT reduce.2 = f32[4,2048,393] reduce(bitcast.2, constant.2), dimensions={3}, to_apply=add_computation
}
ENTRY entry_computation {
param_0 = f32[8192,50304] parameter(0)
param_1 = s32[4,2048] parameter(1)
ROOT fusion = f32[4,2048,393] fusion(param_0, param_1), kind=kCustom, calls=fused_computation, backend_config={"fusion_backend_config":{"kind":"__triton_softmax"}}
}
)"));
std::optional<SymbolicTileAnalysis> opt_analysis =
TryAnalyzeModule(module.get());
ASSERT_TRUE(opt_analysis.has_value());
const SymbolicTileAnalysis& analysis = opt_analysis.value();
TF_ASSERT_OK_AND_ASSIGN(
std::vector<SymbolicTileAnalysis::Tiling> good_tilings,
analysis.GetGoodTilings());
EXPECT_THAT(good_tilings, Not(IsEmpty()));
LogTilingsIfVlog1(good_tilings);
}
TEST_F(SymbolicTileAnalysisTest,
FusionWithNumberOfTilesLargerThanInt32MaxIsSupported) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(R"(
HloModule softmax
fused_computation {
param_0 = f16[65538,32768]{1,0} parameter(0)
ROOT log = f16[65538,32768]{1,0} log(param_0)
}
ENTRY main {
param_0 = f16[65538,32768]{1,0} parameter(0)
ROOT fusion = f16[65538,32768]{1,0} fusion(param_0), kind=kLoop, calls=fused_computation
}
)"));
std::optional<SymbolicTileAnalysis> analysis = TryAnalyzeModule(module.get());
ASSERT_TRUE(analysis.has_value());
TF_ASSERT_OK_AND_ASSIGN(
TiledHloComputation tiled_hlo_computation,
analysis->ComputeTiledHloInstructions({1, 1}));
EXPECT_THAT(*tiled_hlo_computation.GetRoot(),
MatchTiledHloInstruction(
{1, 1},
{1, 1},
R"(
(d0) -> (d0 floordiv 32768, d0 mod 32768)
domain:
d0 in [0, 2147549184)
)"));
}
}
}
} | 2,156 |
#ifndef XLA_SERVICE_GPU_MODEL_GPU_PERFORMANCE_MODEL_H_
#define XLA_SERVICE_GPU_MODEL_GPU_PERFORMANCE_MODEL_H_
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
namespace xla {
namespace gpu {
class GpuPerformanceModel : public GpuPerformanceModelBase {
public:
static EstimateRunTimeData EstimateRunTimeForInstruction(
const HloInstruction* instr, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config);
static EstimateRunTimeData EstimateRunTimeForInstructionCached(
const HloInstruction* instr, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config);
static absl::Duration EstimateRunTimeForFusion(
const HloInstruction* producer, const HloInstruction* consumer,
const EstimateRunTimeData& producer_runtime,
const EstimateRunTimeData& consumer_runtime,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config);
static absl::Duration EstimateRunTimeForFusionCached(
const HloInstruction* producer, const HloInstruction* consumer,
const EstimateRunTimeData& producer_runtime,
const EstimateRunTimeData& consumer_runtime,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config);
static absl::Duration EstimateUnfusedExecTime(
const HloInstruction* producer,
const EstimateRunTimeData& producer_runtime,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers);
static absl::Duration EstimateFusedExecTime(
const HloInstruction* producer,
const EstimateRunTimeData& producer_runtime,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers,
bool multi_output);
static RunTimes EstimateRunTimes(
const HloInstruction* producer, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers = {},
bool multi_output = false);
static RunTimes EstimateRunTimesForPriorityFusion(
const HloInstruction* producer, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers = {},
bool multi_output = false);
static void RecordEstimatedRunTime(HloInstruction* instruction,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config);
};
}
}
#endif
#include "xla/service/gpu/model/gpu_performance_model.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <optional>
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "llvm/ADT/STLExtras.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/hlo_fusion_analysis.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/model/coalescing_analysis.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/stream_executor/device_description.h"
#include "xla/util.h"
#include "tsl/platform/status.h"
namespace xla {
namespace gpu {
EstimateRunTimeData
GpuPerformanceModel::EstimateRunTimeForInstruction(
const HloInstruction* instr, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
VLOG(8) << "EstimateRunTimeForInstruction: " << instr->name();
const se::DeviceDescription* device_info = cost_analysis->device_info_;
int64_t flops = cost_analysis->flop_count(*instr);
int64_t bytes_written = cost_analysis->output_bytes_accessed(*instr);
std::optional<HloFusionAnalysis> local_analysis;
if (!config.fusion_analysis_cache) {
local_analysis = AnalyzeFusion(*instr, *cost_analysis->device_info_);
}
const auto& fusion_analysis = config.fusion_analysis_cache
? config.fusion_analysis_cache->Get(*instr)
: local_analysis.value();
LaunchDimensions launch_dimensions =
EstimateFusionLaunchDimensions(fusion_analysis);
int64_t num_blocks = launch_dimensions.num_blocks();
absl::Duration compute_time =
ComputeTime(*device_info, flops, num_blocks,
launch_dimensions.num_threads_per_block());
CoalescingAnalysis coalescing_analysis(instr, instr->operands(),
fusion_analysis);
absl::Duration read_time;
int64_t bytes_read = 0;
for (const auto [operand_id, operand] : llvm::enumerate(instr->operands())) {
int64_t operand_size = cost_analysis->GetShapeSize(operand->shape());
int64_t n_bytes_total =
GetOperandBytesAccessed(cost_analysis, instr, operand);
int64_t n_bytes_net = std::min(operand_size, n_bytes_total);
bytes_read += n_bytes_total;
bool coalesced = coalescing_analysis.IsReadCoalesced(operand);
VLogOperandRead(operand, n_bytes_total, n_bytes_net, coalesced);
read_time += ReadTimeWithDRAMHeuristic(
*device_info, num_blocks, n_bytes_net, n_bytes_total,
operand->shape().element_type(), coalesced);
}
absl::Duration write_time = WriteTime(*device_info, bytes_written);
absl::Duration exec_time = CombineComputeAndMemoryAccessTime(
compute_time, read_time + write_time, config);
EstimateRunTimeData runtime_data = {flops, bytes_read, bytes_written,
read_time, write_time, compute_time,
exec_time};
VLOG(3) << "Runtime data for HLO: " << instr->name() << "\n"
<< launch_dimensions.ToString() << "\n"
<< runtime_data.ToString();
return runtime_data;
}
EstimateRunTimeData
GpuPerformanceModel::EstimateRunTimeForInstructionCached(
const HloInstruction* instr, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
if (config.gpu_performance_model_cache) {
if (auto cached_result = config.gpu_performance_model_cache->Get(*instr)) {
return *cached_result;
}
}
auto runtime_data =
EstimateRunTimeForInstruction(instr, cost_analysis, config);
if (config.gpu_performance_model_cache) {
config.gpu_performance_model_cache->Set(*instr, runtime_data);
}
return runtime_data;
}
absl::Duration GpuPerformanceModel::EstimateUnfusedExecTime(
const HloInstruction* producer, const EstimateRunTimeData& producer_runtime,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers) {
const se::DeviceDescription* device_info = cost_analysis->device_info_;
absl::Duration time_unfused =
kKernelLaunchOverhead * (fused_consumers.size() + 1) +
producer_runtime.exec_time;
for (const HloInstruction* fused_consumer : fused_consumers) {
VLOG(8) << "Unfused consumer: " << fused_consumer->name();
float utilization_by_this_consumer =
GetOperandUtilization(cost_analysis, fused_consumer, producer);
std::optional<HloFusionAnalysis> local_analysis;
if (!config.fusion_analysis_cache) {
local_analysis = AnalyzeFusion(*fused_consumer, *device_info);
}
const auto& analysis_unfused =
config.fusion_analysis_cache
? config.fusion_analysis_cache->Get(*fused_consumer)
: local_analysis.value();
LaunchDimensions launch_dimensions_unfused =
EstimateFusionLaunchDimensions(analysis_unfused);
int64_t n_bytes_total = std::llround(producer_runtime.bytes_written *
utilization_by_this_consumer);
int64_t n_bytes_net =
std::min(producer_runtime.bytes_written, n_bytes_total);
auto read_time_unfused =
ReadTime(*device_info, launch_dimensions_unfused.num_blocks(),
n_bytes_net, n_bytes_total);
VLOG(10) << " Read time unfused: " << read_time_unfused;
time_unfused += read_time_unfused;
}
return time_unfused;
}
absl::Duration GpuPerformanceModel::EstimateRunTimeForFusion(
const HloInstruction* producer, const HloInstruction* consumer,
const EstimateRunTimeData& producer_runtime,
const EstimateRunTimeData& consumer_runtime,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
VLOG(8) << "EstimateRunTimeForFusion, producer: " << producer->name()
<< " consumer: " << consumer->name();
const se::DeviceDescription* device_info = cost_analysis->device_info_;
float utilization_by_this_consumer = 0;
for (int64_t i = 0; i < consumer->operand_count(); ++i) {
if (consumer->operand(i) == producer ||
(consumer->operand(i)->opcode() == HloOpcode::kGetTupleElement &&
consumer->operand(i)->operand(0) == producer)) {
utilization_by_this_consumer +=
cost_analysis->operand_utilization(*consumer, i);
}
}
std::optional<HloFusionAnalysis> local_analysis_fused;
if (!config.fusion_analysis_cache) {
local_analysis_fused =
AnalyzeProducerConsumerFusion(*producer, *consumer, *device_info);
}
const auto& fusion_analysis =
config.fusion_analysis_cache
? config.fusion_analysis_cache->Get(*producer, *consumer)
: local_analysis_fused.value();
LaunchDimensions launch_dimensions =
EstimateFusionLaunchDimensions(fusion_analysis);
int64_t flops = producer_runtime.flops * utilization_by_this_consumer +
consumer_runtime.flops;
absl::Duration compute_time =
ComputeTime(*device_info, flops, launch_dimensions.num_blocks(),
launch_dimensions.num_threads_per_block());
auto fusion_operands = fusion_analysis.fusion().GetParameters();
CoalescingAnalysis coalescing_analysis(producer, consumer, fusion_operands,
fusion_analysis);
absl::Duration read_time;
int64_t bytes_read = 0;
for (const auto* operand : fusion_operands) {
int64_t operand_size = cost_analysis->GetShapeSize(operand->shape());
int64_t n_bytes_total = GetSharedOperandBytesAccessed(
cost_analysis, producer, consumer, operand);
int64_t n_bytes_net = std::min(operand_size, n_bytes_total);
bytes_read += n_bytes_total;
bool coalesced = coalescing_analysis.IsReadCoalesced(operand);
VLogOperandRead(operand, n_bytes_total, n_bytes_net, coalesced);
read_time += ReadTimeWithDRAMHeuristic(
*device_info, launch_dimensions.num_blocks(), n_bytes_net,
n_bytes_total, operand->shape().element_type(), coalesced);
}
auto exec_time = CombineComputeAndMemoryAccessTime(
compute_time, read_time + consumer_runtime.write_time, config);
VLOG(3) << "Runtime data for producer-consumer fusion:\n"
<< " producer: " << producer->name() << "\n"
<< " consumer: " << consumer->name() << "\n"
<< launch_dimensions.ToString() << "\n"
<< EstimateRunTimeData{flops,
bytes_read,
consumer_runtime.bytes_written,
read_time,
consumer_runtime.write_time,
compute_time,
exec_time}
.ToString();
return exec_time;
}
absl::Duration GpuPerformanceModel::EstimateRunTimeForFusionCached(
const HloInstruction* producer, const HloInstruction* consumer,
const EstimateRunTimeData& producer_runtime,
const EstimateRunTimeData& consumer_runtime,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
if (config.gpu_performance_model_cache) {
if (auto fusion_runtime =
config.gpu_performance_model_cache->Get(*producer, *consumer)) {
return *fusion_runtime;
}
}
auto fusion_runtime =
EstimateRunTimeForFusion(producer, consumer, producer_runtime,
consumer_runtime, cost_analysis, config);
if (config.gpu_performance_model_cache) {
config.gpu_performance_model_cache->Set(*producer, *consumer,
fusion_runtime);
}
return fusion_runtime;
}
absl::Duration GpuPerformanceModel::EstimateFusedExecTime(
const HloInstruction* producer, const EstimateRunTimeData& producer_runtime,
const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers,
bool multi_output) {
const se::DeviceDescription* device_info = cost_analysis->device_info_;
absl::Duration exec_time_fused =
kKernelLaunchOverhead * fused_consumers.size();
for (auto [idx, fused_consumer] : llvm::enumerate(fused_consumers)) {
VLOG(8) << "Fused consumer: " << fused_consumer->name();
float utilization_by_this_consumer = cost_analysis->operand_utilization(
*fused_consumer, fused_consumer->operand_index(producer));
std::optional<HloFusionAnalysis> local_analysis_fused;
if (!config.fusion_analysis_cache) {
local_analysis_fused = AnalyzeProducerConsumerFusion(
*producer, *fused_consumer, *device_info);
}
const auto& analysis_fused =
config.fusion_analysis_cache
? config.fusion_analysis_cache->Get(*producer, *fused_consumer)
: local_analysis_fused.value();
LaunchDimensions launch_dimensions_fused =
EstimateFusionLaunchDimensions(analysis_fused);
absl::Duration compute_time_by_this_consumer = ComputeTime(
*device_info, producer_runtime.flops * utilization_by_this_consumer,
launch_dimensions_fused.num_blocks(),
launch_dimensions_fused.num_threads_per_block());
absl::Duration input_access_time_by_this_consumer = ProducerInputAccessTime(
cost_analysis, *device_info, launch_dimensions_fused.num_blocks(),
producer, analysis_fused, config, fused_consumer);
VLOG(10) << " Compute time by consumer: " << compute_time_by_this_consumer;
VLOG(10) << " Input access time by consumer: "
<< input_access_time_by_this_consumer;
exec_time_fused += CombineComputeAndMemoryAccessTime(
compute_time_by_this_consumer, input_access_time_by_this_consumer,
config);
}
if (multi_output) {
exec_time_fused += producer_runtime.write_time;
}
return exec_time_fused;
}
GpuPerformanceModel::RunTimes
GpuPerformanceModel::EstimateRunTimesForPriorityFusion(
const HloInstruction* producer, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers,
bool multi_output) {
EstimateRunTimeData producer_runtime =
EstimateRunTimeForInstructionCached(producer, cost_analysis, config);
absl::Duration time_unfused =
kKernelLaunchOverhead * (fused_consumers.size() + 1) +
producer_runtime.exec_time;
absl::Duration time_fused = kKernelLaunchOverhead * fused_consumers.size();
for (auto fused_consumer : fused_consumers) {
VLOG(8) << "Fused consumer: " << fused_consumer->name();
EstimateRunTimeData consumer_runtime = EstimateRunTimeForInstructionCached(
fused_consumer, cost_analysis, config);
time_unfused += consumer_runtime.exec_time;
time_fused += EstimateRunTimeForFusionCached(
producer, fused_consumer, producer_runtime, consumer_runtime,
cost_analysis, config);
}
if (multi_output) {
time_fused += producer_runtime.write_time;
}
if (VLOG_IS_ON(8)) {
LOG(INFO) << "Consumer count: " << fused_consumers.size();
LOG(INFO) << "Unfused time: " << time_unfused;
LOG(INFO) << "Fused time: " << time_fused;
}
return {time_unfused, time_fused};
}
GpuPerformanceModel::RunTimes GpuPerformanceModel::EstimateRunTimes(
const HloInstruction* producer, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config,
absl::Span<const HloInstruction* const> fused_consumers,
bool multi_output) {
VLOG(8) << "Producer: " << producer->name();
if (producer->opcode() == HloOpcode::kFusion) {
VLOG(10) << producer->fused_instructions_computation()->ToString();
}
EstimateRunTimeData producer_runtime =
EstimateRunTimeForInstructionCached(producer, cost_analysis, config);
absl::Duration time_unfused = EstimateUnfusedExecTime(
producer, producer_runtime, cost_analysis, config, fused_consumers);
absl::Duration time_fused =
EstimateFusedExecTime(producer, producer_runtime, cost_analysis, config,
fused_consumers, multi_output);
if (VLOG_IS_ON(8)) {
LOG(INFO) << "Consumer count: " << fused_consumers.size();
LOG(INFO) << "Unfused time: " << time_unfused;
LOG(INFO) << "Fused time: " << time_fused;
}
return {time_unfused, time_fused};
}
void GpuPerformanceModel::RecordEstimatedRunTime(
HloInstruction* instruction, const GpuHloCostAnalysis* cost_analysis,
const GpuPerformanceModelOptions& config) {
DCHECK(Cast<const HloFusionInstruction>(instruction)) << "expected fusion";
DCHECK(cost_analysis != nullptr) << "expected cost analysis";
EstimateRunTimeData data =
EstimateRunTimeForInstructionCached(instruction, cost_analysis, config);
double cycles = absl::ToDoubleNanoseconds(data.exec_time) *
cost_analysis->device_info_->clock_rate_ghz();
auto gpu_config = instruction->backend_config<GpuBackendConfig>();
TF_CHECK_OK(gpu_config.status()) << instruction->ToString();
auto reification_cost =
gpu_config->mutable_fusion_backend_config()->mutable_reification_cost();
reification_cost->set_end_to_end_cycles(cycles);
reification_cost->set_compute_time_us(
absl::ToDoubleMicroseconds(data.compute_time));
reification_cost->set_memory_access_time_us(
absl::ToDoubleMicroseconds(data.read_time + data.write_time));
reification_cost->set_exec_time_us(
absl::ToDoubleMicroseconds(data.exec_time));
TF_CHECK_OK(instruction->set_backend_config(*gpu_config));
VLOG(8) << "RecordEstimatedRunTime: " << instruction->ToString();
}
}
} | #include "xla/service/gpu/model/gpu_performance_model.h"
#include <cstdint>
#include <memory>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "absl/time/time.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/gpu/gpu_device_info_for_tests.h"
#include "xla/service/gpu/model/fusion_analysis_cache.h"
#include "xla/service/gpu/model/gpu_hlo_cost_analysis.h"
#include "xla/service/gpu/model/gpu_indexing_performance_model.h"
#include "xla/service/gpu/model/gpu_performance_model_base.h"
#include "xla/service/hlo_module_config.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/device_description.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
namespace {
class GpuPerformanceModelTest : public HloTestBase {
GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const {
return [&](const Shape& shape) {
constexpr int64_t kPointerSize = 8;
return ShapeUtil::ByteSizeOf(shape, kPointerSize);
};
}
public:
GpuPerformanceModel::RunTimes EstimateRunTimesDefault(
const HloInstruction* producer,
std::vector<HloInstruction*> fused_consumers = {}) {
return GpuPerformanceModel::EstimateRunTimes(
producer, &analysis_, GpuPerformanceModelOptions::Default(),
fused_consumers);
}
GpuPerformanceModel::RunTimes EstimateRunTimesForPriorityFusion(
const HloInstruction* producer,
std::vector<HloInstruction*> fused_consumers = {}) {
return GpuPerformanceModel::EstimateRunTimesForPriorityFusion(
producer, &analysis_, GpuPerformanceModelOptions::PriorityFusion(),
fused_consumers);
}
mlir::MLIRContext mlir_context_;
GpuHloCostAnalysis::Options options_{ShapeSizeBytesFunction(),
{},
true};
se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()};
HloFusionAnalysisCache fusion_analysis_cache_{device_info_};
GpuHloCostAnalysis analysis_{options_, &device_info_};
GpuPerformanceModelWithIndexingAnalysis indexing_cost_model_{
&device_info_, &fusion_analysis_cache_, ShapeSizeBytesFunction(),
&mlir_context_};
GpuPerformanceModelTest() : HloTestBase() {}
};
TEST_F(GpuPerformanceModelTest, LargeWrite) {
absl::string_view hlo_string = R"(
HloModule m
f {
c0 = f32[] constant(0)
ROOT b0 = f32[10000000] broadcast(c0)
}
ENTRY e {
ROOT r.1 = f32[10000000] fusion(), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 53, 10);
auto prio_t = EstimateRunTimesForPriorityFusion(root);
EXPECT_NEAR(absl::ToInt64Microseconds(prio_t.time_unfused), 53, 10);
auto indexing_t = indexing_cost_model_.EstimateRunTimes(root);
EXPECT_NEAR(absl::ToInt64Microseconds(indexing_t.time_unfused), 53, 10);
}
TEST_F(GpuPerformanceModelTest, SmallReadWrite) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = f32[1000] parameter(0)
p1 = f32[1000] parameter(1)
ROOT b0 = f32[1000] add(p0, p1)
}
ENTRY e {
p0 = f32[1000] parameter(0)
p1 = f32[1000] parameter(1)
ROOT r.1 = f32[1000] fusion(p0, p1), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 1, 1);
GpuPerformanceModel::RecordEstimatedRunTime(
root, &analysis_, GpuPerformanceModelOptions::Default());
auto reification_cost = root->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.reification_cost();
EXPECT_NEAR(reification_cost.end_to_end_cycles(), 257.7, 0.1);
EXPECT_NEAR(reification_cost.exec_time_us(), 0, 1);
auto indexing_t = indexing_cost_model_.EstimateRunTimes(root);
EXPECT_NEAR(absl::ToInt64Microseconds(indexing_t.time_unfused), 1, 1);
}
TEST_F(GpuPerformanceModelTest, LargeReadWrite) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = f32[10000000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT a0 = f32[10000000] add(p0, p1)
}
ENTRY e {
p0 = f32[10000000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT r.1 = f32[10000000] fusion(p0, p1), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 175, 30);
GpuPerformanceModel::RecordEstimatedRunTime(
root, &analysis_, GpuPerformanceModelOptions::Default());
auto reification_cost = root->backend_config<GpuBackendConfig>()
->fusion_backend_config()
.reification_cost();
EXPECT_NEAR(reification_cost.end_to_end_cycles(), 220284, 100);
EXPECT_NEAR(reification_cost.exec_time_us(), 156, 10);
EXPECT_NEAR(reification_cost.compute_time_us(), 1, 1);
EXPECT_NEAR(reification_cost.memory_access_time_us(), 156, 10);
}
TEST_F(GpuPerformanceModelTest, L1CacheEffect) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = f32[10000] parameter(0)
bc0 = f32[10000,1000] broadcast(p0), dimensions={0}
b0 = f32[10000000] bitcast(bc0)
p1 = f32[10000000] parameter(1)
ROOT a0 = f32[10000000] add(b0, p1)
}
ENTRY e {
p0 = f32[10000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT r.1 = f32[10000000] fusion(p0, p1), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 118, 12);
}
TEST_F(GpuPerformanceModelTest, L2CacheEffect) {
absl::string_view hlo_string = R"(
HloModule m
f {
p0 = f32[1000000] parameter(0)
bc0 = f32[1000000,10] broadcast(p0), dimensions={0}
b0 = f32[10000000] bitcast(bc0)
p1 = f32[10000000] parameter(1)
ROOT a0 = f32[10000000] add(b0, p1)
}
ENTRY e {
p0 = f32[1000000] parameter(0)
p1 = f32[10000000] parameter(1)
ROOT r.1 = f32[10000000] fusion(p0, p1), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(root->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 123, 12);
}
TEST_F(GpuPerformanceModelTest, UnusedParameter) {
Shape shape = ShapeUtil::MakeShape(F32, {100000});
auto module = std::make_unique<HloModule>("m", HloModuleConfig{});
HloComputation::Builder b("b");
auto p0 = b.AddInstruction(HloInstruction::CreateParameter(0, shape, "p0"));
auto p1 = b.AddInstruction(HloInstruction::CreateParameter(1, shape, "p1"));
HloComputation::Builder sub_builder("subcomp");
HloInstruction* p0f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "p0f"));
HloInstruction* p1f = sub_builder.AddInstruction(
HloInstruction::CreateParameter(1, shape, "p1f"));
ASSERT_NE(p1f, nullptr);
sub_builder.AddInstruction(
HloInstruction::CreateUnary(shape, HloOpcode::kNegate, p0f));
HloComputation* subcomp = module->AddEmbeddedComputation(sub_builder.Build());
auto fusion = HloInstruction::CreateFusion(
shape, HloInstruction::FusionKind::kLoop, {p0, p1}, subcomp);
b.AddInstruction(std::move(fusion));
module->AddEntryComputation(b.Build());
HloInstruction* root = module->entry_computation()->root_instruction();
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto t = EstimateRunTimesDefault(root);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 1, 1);
}
TEST_F(GpuPerformanceModelTest, ComputeBoundReducesWithSameLaunchDimensions) {
absl::string_view small_large_reduce_hlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
log0 = f32[] log(p0)
log1 = f32[] log(log0)
log2 = f32[] log(log1)
log3 = f32[] log(log2)
log4 = f32[] log(log3)
ROOT max = f32[] maximum(log4, p1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[150,32,128] parameter(0)
reduce.1 = f32[150,32] reduce(p0, c), dimensions={2}, to_apply=max
ROOT reduce.2 = f32[150] reduce(reduce.1, c), dimensions={1}, to_apply=max
}
)";
absl::string_view large_small_reduce_hlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
log0 = f32[] log(p0)
log1 = f32[] log(log0)
log2 = f32[] log(log1)
log3 = f32[] log(log2)
log4 = f32[] log(log3)
ROOT max = f32[] maximum(log4, p1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[150,128,32] parameter(0)
reduce.1 = f32[150,128] reduce(p0, c), dimensions={2}, to_apply=max
ROOT reduce.2 = f32[150] reduce(reduce.1, c), dimensions={1}, to_apply=max
}
)";
auto run = [&](absl::string_view hlo_text)
-> absl::StatusOr<GpuPerformanceModel::RunTimes> {
TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_text));
GpuHloCostAnalysis analysis(options_, &device_info_);
TF_RETURN_IF_ERROR(module->entry_computation()->Accept(&analysis));
auto* producer =
module->entry_computation()->GetInstructionWithName("reduce.1");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("reduce.2")};
return EstimateRunTimesDefault(producer, consumers);
};
TF_ASSERT_OK_AND_ASSIGN(auto large_small_reduce_runtime,
run(small_large_reduce_hlo));
TF_ASSERT_OK_AND_ASSIGN(auto small_large_reduce_runtime,
run(large_small_reduce_hlo));
EXPECT_NEAR(absl::ToInt64Microseconds(large_small_reduce_runtime.time_fused),
absl::ToInt64Microseconds(small_large_reduce_runtime.time_fused),
2);
}
TEST_F(GpuPerformanceModelTest, FusingTransposeIntoReduceIsSlow) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[1500,32,128] parameter(0)
transpose.1 = f32[1500,128,32] transpose(p0), dimensions={0,2,1}
ROOT reduce.1 = f32[1500,32] reduce(transpose.1, c), dimensions={1}, to_apply=max
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer =
module->entry_computation()->GetInstructionWithName("transpose.1");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("reduce.1")};
auto t = EstimateRunTimesForPriorityFusion(producer, consumers);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 105, 10);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_fused), 514, 10);
}
TEST_F(GpuPerformanceModelTest,
FusingTransposeMultiOutputFusionIntoReduceIsSlow) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
transpose_fusion {
param0 = f32[1500,32,128] parameter(0)
transpose.1 = f32[1500,128,32] transpose(param0), dimensions={0,2,1}
ROOT res = (f32[1500,128,32]) tuple(transpose.1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[1500,32,128] parameter(0)
fusion = (f32[1500,128,32]) fusion(p0), kind=kInput, calls=transpose_fusion
gte = f32[1500,128,32] get-tuple-element(fusion), index=0
ROOT reduce.1 = f32[1500,32] reduce(gte, c), dimensions={1}, to_apply=max
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer =
module->entry_computation()->GetInstructionWithName("fusion");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("reduce.1")};
auto t = EstimateRunTimesForPriorityFusion(producer, consumers);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_unfused), 105, 10);
EXPECT_NEAR(absl::ToInt64Microseconds(t.time_fused), 514, 10);
}
TEST_F(GpuPerformanceModelTest, FusingNonMinorTransposeIntoReduceIsFast) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
ENTRY fusion {
c = f32[] constant(-inf)
p0 = f32[1500,32,128]{1,2,0} parameter(0)
transpose.1 = f32[1500,128,32]{2,0,1} transpose(p0), dimensions={0,2,1}
ROOT reduce.1 = f32[1500,32] reduce(transpose.1, c), dimensions={1}, to_apply=max
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer =
module->entry_computation()->GetInstructionWithName("transpose.1");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("reduce.1")};
auto t = EstimateRunTimesDefault(producer, consumers);
EXPECT_LT(t.time_fused, t.time_unfused);
auto prio_t = EstimateRunTimesForPriorityFusion(producer, consumers);
EXPECT_LT(prio_t.time_fused, prio_t.time_unfused);
}
TEST_F(GpuPerformanceModelTest, DusScalesWithUpdates) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
fusion.1 {
p0 = f32[1073741824] parameter(0)
p1 = f32[1024,1048576] parameter(1)
p2 = s32[] parameter(2)
c0 = f32[] constant(0)
r = f32[1024] reduce(p1, c0), dimensions={1}, to_apply=max
ROOT dus.1 = f32[1073741824] dynamic-update-slice(p0, r, p2)
}
fusion.2 {
p0 = f32[1024] parameter(0)
p1 = f32[1024,1048576] parameter(1)
p2 = s32[] parameter(2)
c0 = f32[] constant(0)
r = f32[1024] reduce(p1, c0), dimensions={1}, to_apply=max
ROOT dus.1 = f32[1024] dynamic-update-slice(p0, r, p2)
}
ENTRY main {
p0 = f32[1073741824] parameter(0)
p1 = f32[1024,1048576] parameter(1)
p2 = s32[] parameter(2)
p3 = f32[1024] parameter(3)
dus1 = f32[1073741824] fusion(p0, p1, p2), kind=kInput, calls=fusion.1
dus2 = f32[1024] fusion(p3, p1, p2), kind=kInput, calls=fusion.2
ROOT tuple = (f32[1073741824], f32[1024]) tuple(dus1, dus2)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* operand0 = module->entry_computation()->root_instruction()->operand(0);
auto* operand1 = module->entry_computation()->root_instruction()->operand(1);
auto t1 = EstimateRunTimesDefault(operand0);
auto t2 = EstimateRunTimesDefault(operand1);
EXPECT_NEAR(absl::ToInt64Microseconds(t1.time_unfused),
absl::ToInt64Microseconds(t2.time_unfused), 10);
auto prio_t1 = EstimateRunTimesForPriorityFusion(operand0);
auto prio_t2 = EstimateRunTimesForPriorityFusion(operand1);
EXPECT_NEAR(absl::ToInt64Microseconds(prio_t1.time_unfused),
absl::ToInt64Microseconds(prio_t2.time_unfused), 10);
}
TEST_F(GpuPerformanceModelTest, EqualCostBeforeAndAfterFusion) {
absl::string_view hlo_string = R"(
HloModule m
f1 {
p0 = f32[4194304] parameter(0)
p1 = f32[4194304] parameter(1)
ROOT tmp_3 = f32[4194304] multiply(f32[4194304] p0, f32[4194304] p1)
}
e1 {
p0 = f32[4194304] parameter(0)
p1 = f32[4194304] parameter(1)
f.1 = f32[4194304] fusion(f32[4194304] p0, f32[4194304] p1), kind=kLoop, calls=f1
ROOT r.1 = f32[4194304] tanh(f32[4194304] f.1)
}
f2 {
p0 = f32[4194304] parameter(0)
p1 = f32[4194304] parameter(1)
mul = f32[4194304] multiply(f32[4194304] p0, f32[4194304] p1)
ROOT res = f32[4194304] tanh(f32[4194304] mul)
}
ENTRY e2 {
p0 = f32[4194304] parameter(0)
p1 = f32[4194304] parameter(1)
ROOT f.2 = f32[4194304] fusion(f32[4194304] p0, f32[4194304] p1), kind=kLoop, calls=f2
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* computation_without_fusion =
module->GetComputationWithName("e1");
ASSERT_IS_OK(computation_without_fusion->Accept(&analysis_));
HloInstruction* consumer = computation_without_fusion->root_instruction();
const HloInstruction* producer = consumer->operand(0);
auto t1 = EstimateRunTimesForPriorityFusion(producer, {consumer});
HloComputation* computation_with_fusion =
module->GetComputationWithName("e2");
ASSERT_IS_OK(computation_with_fusion->Accept(&analysis_));
HloInstruction* root_with_fusion =
computation_with_fusion->root_instruction();
auto t2 = EstimateRunTimesForPriorityFusion(root_with_fusion);
EXPECT_EQ(t1.time_fused, t2.time_unfused);
}
TEST_F(GpuPerformanceModelTest, DoNotFuseDivideIntoSmallReduce) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
ENTRY fusion {
c = f32[] constant(0)
p0 = f32[3072] parameter(0)
p1 = f32[] parameter(1)
reduce = f32[] reduce(p0, c), dimensions={0}, to_apply=add
ROOT divide = f32[] divide(reduce, p1)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* producer =
module->entry_computation()->GetInstructionWithName("reduce");
std::vector<HloInstruction*> consumers{
module->entry_computation()->GetInstructionWithName("divide")};
auto t = EstimateRunTimesForPriorityFusion(producer, consumers);
EXPECT_LT(t.time_unfused, t.time_fused);
}
TEST_F(GpuPerformanceModelTest, PreferFusingExpensiveInstructionsIntoProducer) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation.0 {
p0 = f32[4,8,8] parameter(0)
bc = f32[1,4,1424,8,8] broadcast(p0), dimensions={1,3,4}
p1 = f32[1,4,1424,8,8] parameter(1)
ROOT sub = f32[1,4,1424,8,8] subtract(bc, p1)
}
fused_computation.1 {
p0 = f32[1,4,1424,8,8] parameter(0)
bc = f32[4,1424,8,8] bitcast(p0)
c0 = f32[] constant(0)
ROOT reduce = f32[4,8,8] reduce(bc, c0), to_apply=add, dimensions={1}
}
ENTRY fusion {
p0 = f32[4,8,8] parameter(0)
p1 = f32[1,4,1424,8,8] parameter(1)
fusion.0 = f32[1,4,1424,8,8] fusion(p0, p1), kind=kLoop, calls=fused_computation.0
exp = f32[1,4,1424,8,8] exponential(fusion.0)
ROOT fusion.1 = f32[4,8,8] fusion(exp), kind=kInput, calls=fused_computation.1
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* fusion_0 =
module->entry_computation()->GetInstructionWithName("fusion.0");
auto* exp = module->entry_computation()->GetInstructionWithName("exp");
auto exp_consumer_runtimes =
EstimateRunTimesForPriorityFusion(fusion_0, {exp});
auto exp_producer_runtimes =
EstimateRunTimesForPriorityFusion(exp, exp->users());
auto exp_consumer_priority =
exp_consumer_runtimes.time_unfused - exp_consumer_runtimes.time_fused;
auto exp_producer_priority =
exp_producer_runtimes.time_unfused - exp_producer_runtimes.time_fused;
EXPECT_LT(exp_producer_priority, exp_consumer_priority);
}
TEST_F(GpuPerformanceModelTest, DontFuseExpensiveElementwiseIntoSmallReduce) {
constexpr absl::string_view kHlo = R"(
HloModule testmodule
add {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT add = f32[] add(p0, p1)
}
fused_computation.0 {
p0 = f32[4,28672,32] parameter(0)
tanh = f32[4,28672,32] tanh(p0)
c1 = f32[] constant(72)
broadcast = f32[4,28672,32] broadcast(c1), dimensions={}
ROOT mul = f32[4,28672,32] multiply(tanh, broadcast)
}
ENTRY fusion {
p0 = f32[4,28672,32] parameter(0)
fusion = f32[4,28672,32] fusion(p0), kind=kLoop, calls=fused_computation.0
c0 = f32[] constant(0)
ROOT reduce = f32[4,32] reduce(fusion, c0), to_apply=add, dimensions={1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo));
ASSERT_IS_OK(module->entry_computation()->Accept(&analysis_));
auto* fusion = module->entry_computation()->GetInstructionWithName("fusion");
auto* reduce = module->entry_computation()->GetInstructionWithName("reduce");
auto t = EstimateRunTimesForPriorityFusion(fusion, {reduce});
EXPECT_LT(t.time_unfused, t.time_fused);
}
}
}
} | 2,157 |
#ifndef XLA_SERVICE_GPU_MODEL_INDEXING_ANALYSIS_H_
#define XLA_SERVICE_GPU_MODEL_INDEXING_ANALYSIS_H_
#include <cstdint>
#include <functional>
#include <ostream>
#include <string>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/types/span.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusions/tiling_util.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/shape.h"
namespace xla {
namespace gpu {
using IndexingMapSet = absl::flat_hash_set<IndexingMap>;
struct HloInstructionIndexing {
std::string ToString(
const AffineMapPrinter& printer = AffineMapPrinter()) const;
void Print(std::ostream& out, const AffineMapPrinter& printer) const;
bool Simplify();
static HloInstructionIndexing FromIndexingMaps(
absl::Span<const IndexingMap> indexing_maps);
std::vector<IndexingMapSet> indexing_maps;
};
std::ostream& operator<<(std::ostream& out,
const HloInstructionIndexing& instr_indexing);
HloInstructionIndexing ComputeOutputToInputIndexing(const HloInstruction* instr,
int output_id,
mlir::MLIRContext* ctx);
HloInstructionIndexing ComputeInputToOutputIndexing(const HloInstruction* instr,
int input_id,
mlir::MLIRContext* ctx);
IndexingMap ComputeEpilogueInputToOutputIndexing(
HloInstructionAdaptor epilogue_parent, HloInstructionAdaptor epilogue_root,
mlir::MLIRContext* mlir_context);
using GroupedByOpIndexingMap =
absl::flat_hash_map<const HloInstruction*, IndexingMapSet>;
GroupedByOpIndexingMap ComputeGroupedOutputToInputIndexing(
const HloFusionAdaptor& fusion_adaptor, HloInstructionAdaptor target_instr,
mlir::MLIRContext* ctx);
absl::flat_hash_map<const HloInstruction*, IndexingMapSet>
GroupIndexingMapsByProducers(const HloInstructionIndexing& indexing,
const HloInstruction* instr);
bool FuseProducerConsumerOutputToInputIndexing(
const HloInstruction* producer_instr,
absl::flat_hash_map<const HloInstruction*, IndexingMapSet>*
consumer_indexing,
mlir::MLIRContext* mlir_context);
IndexingMap GetBitcastMap(const Shape& input_shape, const Shape& output_shape,
mlir::MLIRContext* mlir_context);
IndexingMap GetBitcastMap(absl::Span<const int64_t> input_shape,
const Shape& output_shape,
mlir::MLIRContext* mlir_context);
IndexingMap GetIndexingMapFromPhysicalLayoutToLogical(
const Shape& shape, mlir::MLIRContext* mlir_context);
IndexingMap GetIndexingMapFromLogicalToPhysicalLayout(
const Shape& shape, mlir::MLIRContext* mlir_context);
mlir::AffineMap GetBlockOffsetsForTiling(
absl::Span<const int64_t> num_blocks,
absl::Span<const int64_t> tile_sizes_per_block, int64_t rank,
mlir::MLIRContext* mlir_context);
mlir::AffineMap GetBlockOffsetsForTiling(const Tiling& tiling,
mlir::MLIRContext* mlir_context);
mlir::AffineMap GetThreadOffsetsForTiling(
absl::Span<const int64_t> num_threads,
absl::Span<const int64_t> tile_sizes_per_thread, int64_t rank,
mlir::MLIRContext* mlir_context);
mlir::AffineMap GetThreadOffsetsForTiling(const Tiling& tiling,
mlir::MLIRContext* mlir_context);
IndexingMap GetIndexingMapForTiling(const Tiling& tiling,
mlir::MLIRContext* mlir_context);
IndexingMap GetIndexingMapForTiling(mlir::AffineMap block_offsets,
mlir::AffineMap thread_offsets,
int64_t threads_per_block,
int64_t num_blocks,
absl::Span<const int64_t> thread_tile_sizes,
absl::Span<const int64_t> tiled_shape);
const Shape& GetOutputShape(const HloInstruction* instr, int64_t output_id);
mlir::AffineExpr LinearizeShape(
absl::Span<const int64_t> dims,
absl::Span<const mlir::AffineExpr> dimension_exprs,
mlir::MLIRContext* mlir_context);
std::vector<mlir::AffineExpr> DelinearizeIndex(absl::Span<const int64_t> dims,
mlir::AffineExpr linear_index,
mlir::MLIRContext* mlir_context);
IndexingMap CreateIdentityMap(const Shape& shape,
mlir::MLIRContext* mlir_context);
llvm::SmallVector<mlir::AffineExpr, 4> DelinearizeInBoundsIndex(
mlir::AffineExpr linear, absl::Span<const int64_t> sizes);
}
}
#endif
#include "xla/service/gpu/model/indexing_analysis.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iterator>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "mlir/IR/AffineExpr.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/MLIRContext.h"
#include "mlir/Support/LLVM.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout.h"
#include "xla/permutation_util.h"
#include "xla/service/gather_simplifier.h"
#include "xla/service/gpu/fusions/tiling_util.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/model/affine_map_printer.h"
#include "xla/service/gpu/model/indexing_map.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace gpu {
namespace {
using llvm::SmallVector;
using mlir::AffineExpr;
using mlir::AffineMap;
using mlir::getAffineConstantExpr;
using mlir::getAffineDimExpr;
using mlir::getAffineSymbolExpr;
using mlir::MLIRContext;
HloInstructionIndexing CreateUnknownIndexing(int64_t count = 1) {
HloInstructionIndexing indexing;
indexing.indexing_maps = std::vector<absl::flat_hash_set<IndexingMap>>(
count, {IndexingMap::GetUndefined()});
return indexing;
}
HloInstructionIndexing ComputeOutputToInputCwiseOpIndexing(
const HloInstruction* instr, MLIRContext* mlir_context) {
IndexingMap identity_map = CreateIdentityMap(instr->shape(), mlir_context);
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(instr->operand_count());
int64_t operand_count = instr->operand_count();
for (int64_t operand_id = 0; operand_id < operand_count; ++operand_id) {
instr_indexing.indexing_maps[operand_id].insert(identity_map);
}
return instr_indexing;
}
HloInstructionIndexing ComputeInputToOutputCwiseOpIndexing(
const HloInstruction* instr, MLIRContext* mlir_context) {
IndexingMap identity_map = CreateIdentityMap(instr->shape(), mlir_context);
return HloInstructionIndexing::FromIndexingMaps({identity_map});
}
HloInstructionIndexing ComputeOutputToInputBroadcastOpIndexing(
const HloBroadcastInstruction* bcast, MLIRContext* mlir_context) {
auto output_dims = bcast->shape().dimensions();
std::vector<AffineExpr> exprs;
exprs.reserve(bcast->dimensions().size());
for (int64_t bcast_dim : bcast->dimensions()) {
exprs.push_back(getAffineDimExpr(bcast_dim, mlir_context));
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_dims.size(), 0, exprs,
mlir_context),
output_dims, {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeInputToOutputBroadcastOpIndexing(
const HloBroadcastInstruction* bcast, MLIRContext* mlir_context) {
absl::Span<const int64_t> bcast_dims = bcast->dimensions();
const Shape& input_shape = bcast->operand(0)->shape();
const Shape& output_shape = bcast->shape();
std::vector<int64_t> added_dims_sizes;
std::vector<AffineExpr> exprs;
exprs.reserve(output_shape.rank());
for (auto [output_dim_id, output_dim] :
llvm::enumerate(output_shape.dimensions())) {
auto bcast_dim =
std::find(bcast_dims.begin(), bcast_dims.end(), output_dim_id);
if (bcast_dim == bcast_dims.end()) {
exprs.push_back(
getAffineSymbolExpr(added_dims_sizes.size(), mlir_context));
added_dims_sizes.push_back(output_dim);
continue;
}
exprs.push_back(getAffineDimExpr(
std::distance(bcast_dims.begin(), bcast_dim), mlir_context));
}
IndexingMap indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(input_shape.rank(), added_dims_sizes.size(), exprs,
mlir_context),
input_shape.dimensions(), added_dims_sizes);
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeOutputToInputConcatenateOpIndexing(
const HloConcatenateInstruction* concat, MLIRContext* mlir_context) {
const auto& operand_0_dims = concat->operand(0)->shape().dimensions();
mlir::MutableAffineMap affine_map =
AffineMap::getMultiDimIdentityMap(operand_0_dims.size(), mlir_context);
std::vector<DimVar> dim_vars = DimVarsFromTensorSizes(operand_0_dims);
HloInstructionIndexing concat_indexing;
concat_indexing.indexing_maps.resize(concat->operand_count());
int64_t concat_dim = concat->concatenate_dimension();
AffineExpr concat_dim_expr = getAffineDimExpr(concat_dim, mlir_context);
int64_t offset = 0;
for (const auto [operand_id, operand] : llvm::enumerate(concat->operands())) {
affine_map.setResult(concat_dim, concat_dim_expr - offset);
int64_t operand_concat_dim = operand->shape().dimensions()[concat_dim];
dim_vars[concat_dim] = DimVar{{offset, offset + operand_concat_dim - 1}};
concat_indexing.indexing_maps[operand_id].insert(
IndexingMap(affine_map.getAffineMap(), dim_vars,
{}, {}));
offset += operand_concat_dim;
}
return concat_indexing;
}
HloInstructionIndexing ComputeInputToOutputConcatenateOpIndexing(
const HloConcatenateInstruction* concat, int input_id,
MLIRContext* mlir_context) {
int64_t concat_dim = concat->concatenate_dimension();
int64_t offset = 0;
for (int64_t operand_id = 0; operand_id < input_id; ++operand_id) {
offset += concat->operand(operand_id)->shape().dimensions()[concat_dim];
}
const auto& operand_dims = concat->operand(input_id)->shape().dimensions();
mlir::MutableAffineMap affine_map =
AffineMap::getMultiDimIdentityMap(operand_dims.size(), mlir_context);
affine_map.setResult(concat_dim,
getAffineDimExpr(concat_dim, mlir_context) + offset);
IndexingMap indexing_map =
IndexingMap::FromTensorSizes(affine_map.getAffineMap(), operand_dims, {});
return HloInstructionIndexing::FromIndexingMaps({indexing_map});
}
HloInstructionIndexing ComputeOutputToInputFusionOpIndexing(
const HloFusionInstruction* fusion, int output_id,
MLIRContext* mlir_context) {
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(fusion);
auto grouped_indexing_maps = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[output_id], mlir_context);
HloInstructionIndexing fusion_indexing;
fusion_indexing.indexing_maps.resize(fusion->operand_count());
for (auto [operand_id, operand] : llvm::enumerate(fusion->operands())) {
fusion_indexing.indexing_maps[operand_id] = grouped_indexing_maps[operand];
}
return fusion_indexing;
}
HloInstructionIndexing ComputeOutputToInputDotOpIndexing(
const HloDotInstruction* dot, MLIRContext* mlir_context) {
CHECK_NE(dot, nullptr);
const DotDimensionNumbers& dim_numbers = dot->dot_dimension_numbers();
absl::Span<const int64_t> lhs_contracting_dims(
dim_numbers.lhs_contracting_dimensions());
absl::Span<const int64_t> rhs_contracting_dims =
dim_numbers.rhs_contracting_dimensions();
absl::Span<const int64_t> lhs_batch_dims = dim_numbers.lhs_batch_dimensions();
absl::Span<const int64_t> rhs_batch_dims = dim_numbers.rhs_batch_dimensions();
const Shape& lhs_shape = dot->operand(0)->shape();
const Shape& rhs_shape = dot->operand(1)->shape();
SmallVector<AffineExpr> lhs_exprs(lhs_shape.rank());
SmallVector<AffineExpr> rhs_exprs(rhs_shape.rank());
int64_t output_dim_id = 0;
for (auto [lhs_batch_dim, rhs_batch_dim] :
llvm::zip(lhs_batch_dims, rhs_batch_dims)) {
AffineExpr output_dim_expr = getAffineDimExpr(output_dim_id, mlir_context);
lhs_exprs[lhs_batch_dim] = output_dim_expr;
rhs_exprs[rhs_batch_dim] = output_dim_expr;
++output_dim_id;
}
auto lhs_non_contracting_dims =
GetNonContractingDims(lhs_shape, lhs_batch_dims, lhs_contracting_dims);
assert(lhs_non_contracting_dims.ok());
for (int64_t lhs_non_contracting_dim : lhs_non_contracting_dims.value()) {
lhs_exprs[lhs_non_contracting_dim] =
getAffineDimExpr(output_dim_id++, mlir_context);
}
auto rhs_non_contracting_dims =
GetNonContractingDims(rhs_shape, rhs_batch_dims, rhs_contracting_dims);
assert(rhs_non_contracting_dims.ok());
for (int64_t rhs_non_contracting_dim : rhs_non_contracting_dims.value()) {
rhs_exprs[rhs_non_contracting_dim] =
getAffineDimExpr(output_dim_id++, mlir_context);
}
int64_t input_dim_id = 0;
std::vector<int64_t> input_dim_sizes;
input_dim_sizes.reserve(lhs_contracting_dims.size());
for (auto [lhs_contracting_dim, rhs_contracting_dim] :
llvm::zip(lhs_contracting_dims, rhs_contracting_dims)) {
AffineExpr input_dim_expr = getAffineSymbolExpr(input_dim_id, mlir_context);
lhs_exprs[lhs_contracting_dim] = input_dim_expr;
rhs_exprs[rhs_contracting_dim] = input_dim_expr;
++input_dim_id;
input_dim_sizes.push_back(lhs_shape.dimensions(lhs_contracting_dim));
}
IndexingMap lhs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(dot->shape().rank(), input_dim_sizes.size(), lhs_exprs,
mlir_context),
dot->shape().dimensions(), input_dim_sizes);
IndexingMap rhs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(dot->shape().rank(), input_dim_sizes.size(), rhs_exprs,
mlir_context),
dot->shape().dimensions(), input_dim_sizes);
return HloInstructionIndexing::FromIndexingMaps(
{lhs_indexing_map, rhs_indexing_map});
}
HloInstructionIndexing ComputeOutputToInputDynamicSliceOpIndexing(
const HloDynamicSliceInstruction* dynamic_slice,
MLIRContext* mlir_context) {
const Shape& input_shape = dynamic_slice->operand(0)->shape();
const Shape& output_shape = dynamic_slice->shape();
int64_t rank = output_shape.rank();
const int64_t first_index_num = dynamic_slice->first_index_operand_number();
CHECK(dynamic_slice->operand(first_index_num)->shape().rank() == 0)
<< "b/118437727: Old form, not supported.";
AffineMap empty_results_affine_map = AffineMap::get(
rank, 0, {}, mlir_context);
IndexingMap start_indices_map = IndexingMap::FromTensorSizes(
empty_results_affine_map, output_shape.dimensions(), {});
std::vector<RTVar> offsets_rt_vars;
offsets_rt_vars.reserve(rank);
std::vector<AffineExpr> exprs;
exprs.reserve(rank);
for (auto [dim, slice_size] :
llvm::enumerate(dynamic_slice->dynamic_slice_sizes())) {
exprs.push_back(getAffineDimExpr(dim, mlir_context) +
getAffineSymbolExpr(dim, mlir_context));
offsets_rt_vars.push_back(
RTVar{Interval{0, input_shape.dimensions(dim) - slice_size},
dynamic_slice->operand(dim + first_index_num),
empty_results_affine_map});
}
std::vector<IndexingMap> indexing_maps(dynamic_slice->operand_count(),
start_indices_map);
indexing_maps.front() =
IndexingMap{AffineMap::get(rank, rank, exprs,
mlir_context),
start_indices_map.GetDimVars(), {},
std::move(offsets_rt_vars)};
return HloInstructionIndexing::FromIndexingMaps(indexing_maps);
}
HloInstructionIndexing ComputeOutputToInputDynamicUpdateSliceOpIndexing(
const HloDynamicUpdateSliceInstruction* dus, MLIRContext* mlir_context) {
const Shape& update_shape = dus->update()->shape();
const Shape& output_shape = dus->shape();
int64_t rank = output_shape.rank();
std::vector<AffineExpr> identity;
for (int64_t dim = 0; dim < rank; ++dim) {
identity.push_back(getAffineDimExpr(dim, mlir_context));
}
IndexingMap operand_map = IndexingMap::FromTensorSizes(
AffineMap::get(rank, 0, identity,
mlir_context),
output_shape.dimensions(), {});
AffineMap empty_results_affine_map = AffineMap::get(
rank, 0, {}, mlir_context);
IndexingMap start_indices_map = IndexingMap::FromTensorSizes(
empty_results_affine_map, output_shape.dimensions(), {});
std::vector<AffineExpr> exprs;
exprs.reserve(rank);
std::vector<RTVar> rt_vars;
rt_vars.reserve(rank);
for (auto [dim, slice_size] : llvm::enumerate(update_shape.dimensions())) {
exprs.push_back(getAffineDimExpr(dim, mlir_context) -
getAffineSymbolExpr(dim, mlir_context));
Interval feasible_values{0, output_shape.dimensions(dim) - slice_size};
rt_vars.push_back(RTVar{feasible_values, dus->operand(2 + dim),
empty_results_affine_map});
}
IndexingMap update_map{AffineMap::get(rank, rank,
exprs, mlir_context),
operand_map.GetDimVars(),
{}, rt_vars};
std::vector<IndexingMap> indexing_maps(dus->operand_count(),
start_indices_map);
indexing_maps[0] = std::move(operand_map);
indexing_maps[1] = std::move(update_map);
return HloInstructionIndexing::FromIndexingMaps(indexing_maps);
}
HloInstructionIndexing ComputeOutputToInputGatherOpIndexing(
const HloGatherInstruction* gather, MLIRContext* mlir_context) {
CHECK(GatherSimplifier::IsSimplifiedGather(gather))
<< "Non-simplified HLO Gather is not supported.";
const Shape& operand_shape = gather->operand(0)->shape();
const Shape& indices_shape = gather->operand(1)->shape();
const GatherDimensionNumbers& dimension_numbers =
gather->gather_dimension_numbers();
int64_t index_vector_length =
indices_shape.dimensions(dimension_numbers.index_vector_dim());
const Shape& output_shape = gather->shape();
int64_t output_rank = output_shape.rank();
AffineExpr indices_id_dim = getAffineDimExpr(0, mlir_context);
std::vector<DimVar> dim_vars =
DimVarsFromTensorSizes(output_shape.dimensions());
IndexingMap indices_map{
AffineMap::get(output_rank, 1,
{indices_id_dim, getAffineSymbolExpr(0, mlir_context)},
mlir_context),
dim_vars,
{RangeVar{{0, index_vector_length - 1}}},
{}};
std::vector<RTVar> rt_vars;
std::vector<AffineExpr> exprs;
exprs.reserve(operand_shape.rank());
for (auto [operand_dim_id, slice_size] :
llvm::enumerate(gather->gather_slice_sizes())) {
int64_t output_dim_id = dimension_numbers.offset_dims(operand_dim_id);
exprs.push_back(getAffineDimExpr(output_dim_id, mlir_context));
if (operand_dim_id >= index_vector_length) continue;
rt_vars.push_back(RTVar{
Interval{0, operand_shape.dimensions(operand_dim_id) - slice_size},
gather->operand(1),
AffineMap::get(output_rank, 0,
{indices_id_dim,
getAffineConstantExpr(operand_dim_id, mlir_context)},
mlir_context)});
exprs.back() =
exprs.back() + getAffineSymbolExpr(operand_dim_id, mlir_context);
}
IndexingMap operand_map = {
AffineMap::get(output_rank,
index_vector_length, exprs, mlir_context),
std::move(dim_vars), {}, std::move(rt_vars)};
return HloInstructionIndexing::FromIndexingMaps({operand_map, indices_map});
}
IndexingMap ComputeOutputToInputPadOpIndexingImpl(
absl::Span<const int64_t> output_dims,
absl::Span<const int64_t> padding_low,
absl::Span<const int64_t> padding_high,
absl::Span<const int64_t> padding_interior, MLIRContext* mlir_context) {
int64_t output_rank = output_dims.size();
std::vector<AffineExpr> exprs;
std::vector<std::pair<AffineExpr, Interval>> constraints;
std::vector<DimVar> dim_vars;
exprs.reserve(output_rank);
constraints.reserve(output_rank);
int64_t output_dim_id = 0;
for (const auto [output_dim, pad_low, pad_high, pad_interior] :
llvm::zip(output_dims, padding_low, padding_high, padding_interior)) {
AffineExpr dim_expr = getAffineDimExpr(output_dim_id, mlir_context);
dim_vars.push_back(
{Interval{std::max(int64_t{0}, pad_low),
std::min(output_dim - 1, output_dim - 1 - pad_high)}});
if (pad_interior == 0) {
exprs.push_back(dim_expr - pad_low);
} else {
exprs.push_back((dim_expr - pad_low).floorDiv(pad_interior + 1));
constraints.push_back(
{(dim_expr - pad_low) % (pad_interior + 1), Interval{0, 0}});
}
++output_dim_id;
}
return IndexingMap{
AffineMap::get(output_rank, 0, exprs, mlir_context),
std::move(dim_vars),
{},
{}, absl::MakeSpan(constraints)};
}
HloInstructionIndexing ComputeOutputToInputPadOpIndexing(
const HloPadInstruction* pad, MLIRContext* mlir_context) {
const Shape& output_shape = pad->shape();
int64_t rank = output_shape.rank();
SmallVector<int64_t> padding_low, padding_high, padding_interior;
padding_low.reserve(rank);
padding_high.reserve(rank);
padding_interior.reserve(rank);
for (const auto& dim_config : pad->padding_config().dimensions()) {
padding_low.push_back(dim_config.edge_padding_low());
padding_high.push_back(dim_config.edge_padding_high());
padding_interior.push_back(dim_config.interior_padding());
}
IndexingMap input_indexing_map = ComputeOutputToInputPadOpIndexingImpl(
output_shape.dimensions(), padding_low, padding_high, padding_interior,
mlir_context);
IndexingMap padding_value_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), 0, {}, mlir_context),
output_shape.dimensions(), {});
return HloInstructionIndexing::FromIndexingMaps(
{input_indexing_map, padding_value_indexing_map});
}
HloInstructionIndexing ComputeOutputToInputReduceOpIndexing(
const HloReduceInstruction* reduce, int output_id,
MLIRContext* mlir_context) {
absl::flat_hash_set<int64_t> reduce_dims_ids(reduce->dimensions().begin(),
reduce->dimensions().end());
const Shape& input_shape = reduce->operand(output_id)->shape();
const Shape& output_shape = GetOutputShape(reduce, 0);
std::vector<int64_t> parallel_dims_sizes;
int64_t output_dim_id = 0;
std::vector<AffineExpr> exprs;
exprs.reserve(input_shape.rank());
for (auto [input_dim_id, input_dim] :
llvm::enumerate(input_shape.dimensions())) {
if (reduce_dims_ids.contains(input_dim_id)) {
exprs.push_back(
getAffineSymbolExpr(parallel_dims_sizes.size(), mlir_context));
parallel_dims_sizes.push_back(input_dim);
continue;
}
exprs.push_back(getAffineDimExpr(output_dim_id++, mlir_context));
}
IndexingMap inputs_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), reduce_dims_ids.size(), exprs,
mlir_context),
output_shape.dimensions(), parallel_dims_sizes);
IndexingMap inits_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(output_shape.rank(), 0, {}, mlir_context),
output_shape.dimensions(), {});
HloInstructionIndexing instr_indexing;
instr_indexing.indexing_maps.resize(reduce->operand_count());
for (int64_t id = 0; id < reduce->input_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inputs_indexing_map);
}
for (int64_t id = reduce->input_count(); id < reduce->operand_count(); ++id) {
instr_indexing.indexing_maps[id].insert(inits_indexing_map);
}
return instr_indexing;
}
HloInstructionIndexing ComputeInputToOutputReduceOpIndexing(
const HloReduceInstruction* reduce, int input_id,
MLIRContext* mlir_context) {
const Shape& output_shape = GetOutputShape(reduce, 0);
int64_t output_rank = output_shape.rank();
HloInstructionIndexing instr_indexing;
int arity = reduce->input_count();
instr_indexing.indexing_maps.resize(arity);
if (input_id >= arity) {
std::vector<AffineExpr> inits_exprs;
inits_exprs.reserve(output_rank);
for (int sym = 0; sym < output_rank; ++sym) {
inits_exprs.push_back(getAffineSymbolExpr(sym, mlir_context));
}
IndexingMap inits_indexing_map = IndexingMap::FromTensorSizes(
AffineMap::get(0, output_rank, inits_exprs,
mlir_context),
{}, output_shape.dimensions());
for (int64_t id = 0; id < arity; ++id) {
instr_indexing.indexing_maps[id].insert(inits_indexing_map);
}
return instr_indexing;
}
const Shape& input_shape = reduce->operand(input_id)->shape();
std::vector<AffineExpr> inputs_exprs;
inputs_exprs.reserve(output_rank);
for (auto [input_dim_id, input_dim] :
llvm::enumerate(input_shape.dimensions())) {
if (!absl::c_linear_search(reduce->dimensions(), input_dim_id)) {
inputs_exprs.push_back(getAffineDimExpr(inpu | #include "xla/service/gpu/model/indexing_analysis.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "mlir/IR/MLIRContext.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/gpu/fusions/tiling_util.h"
#include "xla/service/gpu/hlo_traversal.h"
#include "xla/service/gpu/model/indexing_test_utils.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test.h"
namespace xla {
namespace gpu {
namespace {
using ::testing::ElementsAre;
using ::testing::Eq;
using ::testing::ExplainMatchResult;
using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
MATCHER_P2(MatchInstrIndexing, operand_id, indexing_map_matchers, "") {
return ExplainMatchResult(Eq(operand_id), arg.operand_id, result_listener) &&
ExplainMatchResult(indexing_map_matchers, arg.indexing_maps,
result_listener);
}
using IndexingAnalysisTest = IndexingTestBase;
TEST_F(IndexingAnalysisTest, FuseProducerConsumerOutputToInputIndexing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
transpose_p0 = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
ROOT a0 = f32[1000, 1000] add(p0, transpose_p0)
}
)");
const HloInstruction* parameter = root->operand(0);
const HloInstruction* transpose = root->operand(1);
auto root_indexing = GetOutputToInputIndexing(root);
auto grouped_by_key = GroupIndexingMapsByProducers(root_indexing, root);
EXPECT_THAT(
grouped_by_key,
UnorderedElementsAre(Pair(parameter, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 1000)
d1 in [0, 1000)
)"))),
Pair(transpose, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 1000)
d1 in [0, 1000)
)")))));
}
TEST_F(IndexingAnalysisTest, ComputeGroupedOutputToInputIndexing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
transpose_p0 = f32[1000, 1000]{0, 1} transpose(p0), dimensions={1, 0}
ROOT a0 = f32[1000, 1000] add(p0, transpose_p0)
}
)");
const HloInstruction* parameter = root->operand(0);
const HloInstruction* transpose = root->operand(1);
auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(transpose, root);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_context_);
EXPECT_THAT(grouped_indexing,
UnorderedElementsAre(
Pair(root, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 1000)
d1 in [0, 1000)
)"))),
Pair(transpose, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 1000)
d1 in [0, 1000)
)"))),
Pair(parameter, UnorderedElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 1000)
d1 in [0, 1000)
)"),
MatchIndexingMap(R"(
(d0, d1) -> (d1, d0)
domain:
d0 in [0, 1000)
d1 in [0, 1000)
)")))));
}
TEST_F(IndexingAnalysisTest,
ComputeGroupedOutputToInputIndexing_VariadicReduce) {
auto root = ParseAndGetRoot(R"(
HloModule m
add {
param_0 = f32[] parameter(0)
param_1 = f32[] parameter(1)
param_2 = f32[] parameter(2)
param_3 = f32[] parameter(3)
add.0 = f32[] add(param_0, param_2)
add.1 = f32[] add(param_1, param_3)
ROOT t = (f32[], f32[]) tuple(add.0, add.1)
}
ENTRY entry_computation {
param_0.3 = f32[32,40]{1,0} parameter(0)
param_1.3 = f32[32,40]{1,0} parameter(1)
param_2.2 = f32[] parameter(2)
constant = f32[] constant(0)
ROOT reduce = (f32[32]{0}, f32[32]{0})
reduce(param_0.3, param_1.3, param_2.2, constant),
dimensions={1}, to_apply=add
}
)");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, fusion_adaptor->GetRoots()[0], &mlir_context_);
EXPECT_THAT(grouped_indexing,
UnorderedElementsAre(
Pair(root, ElementsAre(MatchIndexingMap(R"(
(d0) -> (d0)
domain:
d0 in [0, 32)
)"))),
Pair(root->operand(0), ElementsAre(MatchIndexingMap(R"(
(d0)[s0] -> (d0, s0)
domain:
d0 in [0, 32)
s0 in [0, 40)
)"))),
Pair(root->operand(1), ElementsAre(MatchIndexingMap(R"(
(d0)[s0] -> (d0, s0)
domain:
d0 in [0, 32)
s0 in [0, 40)
)"))),
Pair(root->operand(2), ElementsAre(MatchIndexingMap(R"(
(d0) -> ()
domain:
d0 in [0, 32)
)"))),
Pair(root->operand(3), ElementsAre(MatchIndexingMap(R"(
(d0) -> ()
domain:
d0 in [0, 32)
)")))));
}
TEST_F(IndexingAnalysisTest, ComputeGroupedOutputToInputIndexing_SingleOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1000, 1000] parameter(0)
p1 = f32[1000, 1000] parameter(1)
exp0 = f32[1000, 1000] exponential(p1)
ROOT a0 = f32[1000, 1000] add(p0, exp0)
}
)");
HloComputation* entry_computation = root->parent();
const HloInstruction* exponential =
entry_computation->GetInstructionWithName("exp0");
const HloInstruction* parameter =
entry_computation->GetInstructionWithName("p1");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(exponential);
HloInstructionAdaptor parameter_adaptor =
fusion_adaptor->GetRoots()[0].GetOperand(0);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, parameter_adaptor, &mlir_context_);
EXPECT_THAT(grouped_indexing, UnorderedElementsAre(Pair(
parameter, ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 1000)
d1 in [0, 1000)
)")))));
}
TEST_F(IndexingAnalysisTest,
ComputeGroupedOutputToInputIndexing_StartNotAtRoot) {
auto root = ParseAndGetRoot(R"(
HloModule m
max {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
ROOT max = f32[] maximum(p0, p1)
}
f {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] parameter(1)
p0_bcast = f32[15, 32, 20, 64] broadcast(p0), dimensions={0, 2}
ROOT reduce_2 = f32[15, 64] reduce(p0_bcast, p0_init),
dimensions={1, 2}, to_apply=max
}
ENTRY e {
p0 = f32[15, 20] parameter(0)
p0_init = f32[] constant(-inf)
ROOT fusion = f32[15, 64] fusion(p0, p0_init), kind=kLoop, calls=f
}
)");
auto fusion_adaptor = HloFusionAdaptor::ForInstruction(root);
auto root_adaptor = fusion_adaptor->GetRoots()[0];
auto bcast = root_adaptor.GetOperand(0);
auto parameter_0 = bcast.GetOperand(0);
auto grouped_indexing = ComputeGroupedOutputToInputIndexing(
*fusion_adaptor, bcast, &mlir_context_);
EXPECT_THAT(
grouped_indexing,
UnorderedElementsAre(
Pair(&bcast.instruction(), ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3) -> (d0, d1, d2, d3)
domain:
d0 in [0, 15)
d1 in [0, 32)
d2 in [0, 20)
d3 in [0, 64)
)"))),
Pair(¶meter_0.instruction(), ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3) -> (d0, d2)
domain:
d0 in [0, 15)
d1 in [0, 32)
d2 in [0, 20)
d3 in [0, 64)
)")))));
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestOutputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30] parameter(0)
ROOT add0 = f32[10, 20, 30]{1, 0, 2} exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d1, d2, d0)
domain:
d0 in [0, 30)
d1 in [0, 10)
d2 in [0, 20)
)"))));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d2, d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
d2 in [0, 30)
)"))));
}
TEST_F(IndexingAnalysisTest, CopyNothing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[0, 0]{0,1} parameter(0)
ROOT copy0 = f32[0, 0]{1,0} copy(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0);
input_indexing.Simplify();
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap("KNOWN EMPTY"))));
auto output_indexing = GetInputToOutputIndexing(root, 0);
output_indexing.Simplify();
EXPECT_THAT(output_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap("KNOWN EMPTY"))));
}
TEST_F(IndexingAnalysisTest, ReshapeNothing) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[1,0,0] parameter(0)
ROOT reshape = f32[0] reshape(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0);
input_indexing.Simplify();
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap("KNOWN EMPTY"))));
auto output_indexing = GetInputToOutputIndexing(root, 0);
output_indexing.Simplify();
EXPECT_THAT(output_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap("KNOWN EMPTY"))));
EXPECT_EQ(
output_indexing.indexing_maps[0].begin()->GetAffineMap().getNumResults(),
1);
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestInputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30]{1, 0, 2} parameter(0)
ROOT add0 = f32[10, 20, 30] exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d2, d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
d2 in [0, 30)
)"))));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d1, d2, d0)
domain:
d0 in [0, 30)
d1 in [0, 10)
d2 in [0, 20)
)"))));
}
TEST_F(IndexingAnalysisTest, PhysicalLayoutTestInputAndOutputPermutation) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20, 30]{1, 0, 2} parameter(0)
ROOT add0 = f32[10, 20, 30]{1, 0, 2} exponential(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root, 0,
true);
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1, d2)
domain:
d0 in [0, 30)
d1 in [0, 10)
d2 in [0, 20)
)"))));
auto output_indexing = GetInputToOutputIndexing(root, 0,
true);
EXPECT_THAT(output_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1, d2)
domain:
d0 in [0, 30)
d1 in [0, 10)
d2 in [0, 20)
)"))));
}
TEST_F(IndexingAnalysisTest, ElementwiseOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[10, 20] parameter(0)
p1 = f32[10, 20] parameter(1)
ROOT add0 = f32[10, 20] add(p0, p1)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
)"))));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
)"))));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
)"))));
}
TEST_F(IndexingAnalysisTest, Map) {
auto root = ParseAndGetRoot(R"(
HloModule m
mapper {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY e {
p0 = f32[10, 20] parameter(0)
p1 = f32[10, 20] parameter(1)
ROOT add0 = f32[10, 20] map(%p0, %p1), dimensions={}, to_apply=mapper
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
)"))));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
)"))));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
)"))));
}
TEST_F(IndexingAnalysisTest, BitcastIsReshape) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[4, 32] parameter(0)
ROOT bitcast = f32[4, 8, 4] bitcast(p0)
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1 * 4 + d2)
domain:
d0 in [0, 4)
d1 in [0, 8)
d2 in [0, 4)
)"))));
}
TEST_F(IndexingAnalysisTest, BitcastIsTranspose) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[3, 12288, 6, 128] parameter(0)
ROOT bitcast = f32[3, 6, 128, 12288] {2, 1, 3, 0} bitcast(p0)
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3) -> (d0, d3, d1, d2)
domain:
d0 in [0, 3)
d1 in [0, 6)
d2 in [0, 128)
d3 in [0, 12288)
)"))));
}
TEST_F(IndexingAnalysisTest, BitcastIsTransposeReshapeTranspose) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[16, 17, 3] parameter(0)
ROOT bitcast = f32[51, 16] {0, 1} bitcast(p0)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d1, d0 floordiv 3, d0 mod 3)
domain:
d0 in [0, 51)
d1 in [0, 16)
)"))));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d1 * 3 + d2, d0)
domain:
d0 in [0, 16)
d1 in [0, 17)
d2 in [0, 3)
)"))));
}
TEST_F(IndexingAnalysisTest, BroadcastOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[20] parameter(0)
ROOT bc0 = f32[10, 20, 30] broadcast(p0), dimensions={1}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d1)
domain:
d0 in [0, 10)
d1 in [0, 20)
d2 in [0, 30)
)"))));
auto output_indexing = GetInputToOutputIndexing(root);
EXPECT_THAT(output_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0)[s0, s1] -> (s0, d0, s1)
domain:
d0 in [0, 20)
s0 in [0, 10)
s1 in [0, 30)
)"))));
}
TEST_F(IndexingAnalysisTest, ConstantOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
ROOT c1 = bf16[17, 22] constant(1)
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.indexing_maps, IsEmpty());
}
TEST_F(IndexingAnalysisTest, ConcatenateOp) {
auto root = ParseAndGetRoot(R"(
HloModule m
ENTRY e {
p0 = f32[2, 5, 7] parameter(0)
p1 = f32[2, 11, 7] parameter(1)
p2 = f32[2, 17, 7] parameter(2)
ROOT concat = f32[2, 33, 7] concatenate(
f32[2, 5, 7] p0, f32[2, 11, 7] p1, f32[2, 17, 7] p2), dimensions={1}
}
)");
auto input_indexing = GetOutputToInputIndexing(root);
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1, d2)
domain:
d0 in [0, 2)
d1 in [0, 5)
d2 in [0, 7)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1 - 5, d2)
domain:
d0 in [0, 2)
d1 in [5, 16)
d2 in [0, 7)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1 - 16, d2)
domain:
d0 in [0, 2)
d1 in [16, 33)
d2 in [0, 7)
)"))));
auto output_indexing_0 = GetInputToOutputIndexing(root, 0);
EXPECT_THAT(output_indexing_0.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1, d2)
domain:
d0 in [0, 2)
d1 in [0, 5)
d2 in [0, 7)
)"))));
auto output_indexing_1 = GetInputToOutputIndexing(root, 1);
EXPECT_THAT(output_indexing_1.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1 + 5, d2)
domain:
d0 in [0, 2)
d1 in [0, 11)
d2 in [0, 7)
)"))));
auto output_indexing_2 = GetInputToOutputIndexing(root, 2);
EXPECT_THAT(output_indexing_2.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> (d0, d1 + 16, d2)
domain:
d0 in [0, 2)
d1 in [0, 17)
d2 in [0, 7)
)"))));
}
TEST_F(IndexingAnalysisTest, DynamicSliceOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[2,2,258] parameter(0)
%of1 = s32[] parameter(1)
%of2 = s32[] parameter(2)
%of3 = s32[] parameter(3)
ROOT %ds = s32[1,2,32] dynamic-slice(s32[2,2,258] %src,
s32[] %of1, s32[] %of2, s32[] %of3),
dynamic_slice_sizes={1, 2, 32}
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2)[s0, s1, s2] -> (d0 + s0, d1 + s1, d2 + s2)
domain:
d0 in [0, 1)
d1 in [0, 2)
d2 in [0, 32)
s0 in [0, 2)
hlo: %of1 = s32[] parameter(1)
(d0, d1, d2) -> ()
s1 in [0, 1)
hlo: %of2 = s32[] parameter(2)
(d0, d1, d2) -> ()
s2 in [0, 227)
hlo: %of3 = s32[] parameter(3)
(d0, d1, d2) -> ()
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> ()
domain:
d0 in [0, 1)
d1 in [0, 2)
d2 in [0, 32)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> ()
domain:
d0 in [0, 1)
d1 in [0, 2)
d2 in [0, 32)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2) -> ()
domain:
d0 in [0, 1)
d1 in [0, 2)
d2 in [0, 32)
)"))));
}
TEST_F(IndexingAnalysisTest, DynamicUpdateSliceOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
ENTRY e {
%src = s32[20,30] parameter(0)
%upd = s32[5,10] parameter(1)
%of1 = s32[] parameter(2)
%of2 = s32[] parameter(3)
ROOT %dus = s32[20,30] dynamic-update-slice(
s32[20,30] %src, s32[5,10] %upd, s32[] %of1, s32[] %of2)
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> (d0, d1)
domain:
d0 in [0, 20)
d1 in [0, 30)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1)[s0, s1] -> (d0 - s0, d1 - s1)
domain:
d0 in [0, 20)
d1 in [0, 30)
s0 in [0, 16)
hlo: %of1 = s32[] parameter(2)
(d0, d1) -> ()
s1 in [0, 21)
hlo: %of2 = s32[] parameter(3)
(d0, d1) -> ()
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> ()
domain:
d0 in [0, 20)
d1 in [0, 30)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1) -> ()
domain:
d0 in [0, 20)
d1 in [0, 30)
)"))));
}
TEST_F(IndexingAnalysisTest, FusionOpWithSingleBinaryOp) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
HloModule m
f {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
ROOT a0 = f32[100] add(p0, p1)
}
ENTRY e {
p0 = f32[100] parameter(0)
p1 = f32[100] parameter(1)
ROOT fusion = f32[100] fusion(p0, p1), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0) -> (d0)
domain:
d0 in [0, 100)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0) -> (d0)
domain:
d0 in [0, 100)
)"))));
}
TEST_F(IndexingAnalysisTest, FusionOpWithDot) {
auto input_indexing = GetOutputToInputIndexing(ParseAndGetRoot(R"(
f {
p0 = s8[3,12288,6,128]{3,2,1,0} parameter(0)
bitcast1 = s8[3,6,128,12288]{2,1,3,0} bitcast(p0)
copy1 = s8[3,6,128,12288]{3,2,1,0} copy(bitcast1)
bitcast2 = s8[2304,12288]{1,0} bitcast(copy1)
convert1 = bf16[2304,12288]{1,0} convert(bitcast2)
bitcast3 = bf16[2304,16,768]{2,1,0} bitcast(convert1)
p3 = bf16[16,12288]{1,0} parameter(3)
convert2 = f32[16,12288]{1,0} convert(p3)
p4 = bf16[16,12288]{1,0} parameter(4)
convert3 = f32[16,12288]{1,0} convert(p4)
add1 = f32[16,12288]{1,0} add(convert2, convert3)
p2 = bf16[16]{0} parameter(2)
convert15 = f32[16]{0} convert(p2)
rsqrt = f32[16]{0} rsqrt(convert15)
convert4 = bf16[16]{0} convert(rsqrt)
bcast1 = bf16[16,12288]{1,0} broadcast(convert4), dimensions={0}
convert5 = f32[16,12288]{1,0} convert(bcast1)
multiply1 = f32[16,12288]{1,0} multiply(add1, convert5)
p1 = bf16[12288]{0} parameter(1)
convert6 = f32[12288]{0} convert(p1)
c1 = bf16[] constant(1)
bcast2 = bf16[12288]{0} broadcast(c1), dimensions={}
convert7 = f32[12288]{0} convert(bcast2)
add2 = f32[12288]{0} add(convert6, convert7)
convert8 = bf16[12288]{0} convert(add2)
bcast3 = bf16[16,12288]{1,0} broadcast(convert8), dimensions={1}
convert9 = f32[16,12288]{1,0} convert(bcast3)
multiply2 = f32[16,12288]{1,0} multiply(multiply1, convert9)
convert10 = bf16[16,12288]{1,0} convert(multiply2)
bcast4 = bf16[16,16,768]{2,1,0} bitcast(convert10)
dot = bf16[16,2304,16]{2,1,0} dot(bitcast3, bcast4),
lhs_batch_dims={1}, lhs_contracting_dims={2},
rhs_batch_dims={1}, rhs_contracting_dims={2}
bcast5 = bf16[16,3,6,128,16]{4,3,2,1,0} bitcast(dot)
copy2 = bf16[16,3,6,128,16]{3,2,4,1,0} copy(bcast5)
convert13 = f32[16,3,6,128,16]{3,2,4,1,0} convert(copy2)
p5 = bf16[3,6,128]{2,1,0} parameter(5)
bcast6 = bf16[3,6,128,16]{2,1,3,0} broadcast(p5), dimensions={0,1,2}
convert11 = f32[3,6,128,16]{2,1,3,0} convert(bcast6)
bcast7 = f32[16,3,6,128,16]{3,2,4,1,0} broadcast(convert11),
dimensions={1,2,3,4}
multiply3 = f32[16,3,6,128,16]{3,2,4,1,0} multiply(convert13, bcast7)
convert12 = bf16[16,3,6,128,16]{3,2,4,1,0} convert(multiply3)
ROOT bcast8 = bf16[16,16,3,1,6,128]{5,4,1,3,2,0} bitcast(convert12)
}
ENTRY e {
p0 = s8[3,12288,6,128]{3,2,1,0} parameter(0)
p1 = bf16[12288]{0} parameter(1)
p2 = bf16[16]{0} parameter(2)
p3 = bf16[16,12288]{1,0} parameter(3)
p4 = bf16[16,12288]{1,0} parameter(4)
p5 = bf16[3,6,128]{2,1,0} parameter(5)
ROOT fusion = bf16[16,16,3,1,6,128]{5,4,1,3,2,0}
fusion(p0, p1, p2, p3, p4, p5), kind=kLoop, calls=f
}
)"));
EXPECT_THAT(input_indexing.indexing_maps,
ElementsAre(ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3, d4, d5)[s0] -> (d2, d0 * 768 + s0, d4, d5)
domain:
d0 in [0, 16)
d1 in [0, 16)
d2 in [0, 3)
d3 in [0, 1)
d4 in [0, 6)
d5 in [0, 128)
s0 in [0, 768)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3, d4, d5)[s0] -> (d0 * 768 + s0)
domain:
d0 in [0, 16)
d1 in [0, 16)
d2 in [0, 3)
d3 in [0, 1)
d4 in [0, 6)
d5 in [0, 128)
s0 in [0, 768)
)")),
ElementsAre(MatchIndexingMap(R"(
(d0, d1, d2, d3, d4, d5) -> (d1)
domain:
d0 in [0, 16)
d1 in [0, 16)
d2 in [0, 3)
d3 in [0, 1)
d4 in [0, 6)
d5 in [0, 128)
)")),
ElementsAre(MatchIndexingMap | 2,158 |
#ifndef XLA_SERVICE_GPU_RUNTIME_COMMAND_BUFFER_CMD_H_
#define XLA_SERVICE_GPU_RUNTIME_COMMAND_BUFFER_CMD_H_
#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "absl/types/span.h"
#include "xla/ffi/api/c_api.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/collective_ops_utils.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/kernels/custom_kernel.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/runtime/custom_call_thunk.h"
#include "xla/service/gpu/runtime/nccl_api.h"
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include "xla/service/gpu/runtime/nccl_collective_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla::gpu {
class CommandBufferCmd {
public:
explicit CommandBufferCmd(ExecutionStreamId execution_stream_id)
: execution_stream_id_(execution_stream_id) {}
virtual ~CommandBufferCmd() = default;
enum class MemoryAccess { kRead, kWrite };
struct BufferUsage {
BufferUsage(BufferAllocation::Slice slice, MemoryAccess access)
: slice(slice), access(access) {}
template <typename H>
friend H AbslHashValue(H h, const BufferUsage& buffer) {
return H::combine(std::move(h), buffer.slice, buffer.access);
}
bool operator==(const BufferUsage& other) const {
return slice == other.slice && access == other.access;
}
BufferAllocation::Slice slice;
MemoryAccess access;
};
using BufferUsageVector = absl::InlinedVector<BufferUsage, 4>;
class State {
public:
virtual ~State() = default;
};
class StateManager {
public:
virtual ~StateManager() = default;
template <typename ConcreteState>
ConcreteState* GetOrNull(const CommandBufferCmd* cmd) {
static_assert(std::is_base_of_v<State, ConcreteState>);
return static_cast<ConcreteState*>(GetOrNull(cmd));
}
template <typename ConcreteState>
ConcreteState* GetOrCreate(
const CommandBufferCmd* cmd,
absl::FunctionRef<std::unique_ptr<ConcreteState>()> create) {
static_assert(std::is_base_of_v<State, ConcreteState>);
return static_cast<ConcreteState*>(GetOrCreate(
cmd, [&]() -> std::unique_ptr<State> { return create(); }));
}
template <typename ConcreteState>
ConcreteState* GetOrCreate(const CommandBufferCmd* cmd) {
static_assert(std::is_base_of_v<State, ConcreteState>);
return static_cast<ConcreteState*>(
GetOrCreate(cmd, [] { return std::make_unique<ConcreteState>(); }));
}
private:
State* GetOrNull(const CommandBufferCmd* cmd);
State* GetOrCreate(const CommandBufferCmd* cmd,
absl::FunctionRef<std::unique_ptr<State>()> create);
absl::flat_hash_map<const CommandBufferCmd*, std::unique_ptr<State>> state_;
};
struct RecordParams {
StateManager& state;
se::CommandBuffer::ExecutionScopeId execution_scope_id =
se::CommandBuffer::kDefaulExecutionScope;
};
virtual absl::Status Prepare(const Thunk::PrepareParams& params,
Thunk::ResourceRequests& resource_requests) {
return absl::OkStatus();
}
virtual absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) {
return absl::OkStatus();
}
virtual absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) = 0;
virtual bool force_update() { return false; }
virtual BufferUsageVector buffers() = 0;
virtual bool IsNestedCommandBuffer() const { return false; }
se::CommandBuffer::ExecutionScopeId GetExecutionScope(
const RecordParams& record_params,
ExecutionStreamId execution_stream_id) const;
virtual se::CommandBuffer::ExecutionScopeId GetExecutionScope(
const CommandBufferCmd::RecordParams& record_params) const;
std::string_view profile_annotation() const { return profile_annotation_; }
void set_profile_annotation(std::string_view profile_annotation) {
profile_annotation_ = profile_annotation;
}
ExecutionStreamId execution_stream_id() const { return execution_stream_id_; }
private:
std::string profile_annotation_;
ExecutionStreamId execution_stream_id_;
};
class CommandBufferCmdSequence {
public:
enum class SynchronizationMode {
kSerialize,
kAutomatic
};
enum class RecordMode {
kExclusive,
kConditional
};
explicit CommandBufferCmdSequence(SynchronizationMode synchronization_mode =
SynchronizationMode::kAutomatic);
void Append(std::unique_ptr<CommandBufferCmd> cmd);
template <typename T, typename... Args>
void Emplace(Args... args) {
Append(std::make_unique<T>(std::forward<Args>(args)...));
}
absl::Status Prepare(const Thunk::PrepareParams& params,
Thunk::ResourceRequests& resource_requests);
absl::Status Initialize(const Thunk::InitializeParams& params,
CommandBufferCmd::StateManager& state);
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const CommandBufferCmd::RecordParams& record_params,
se::CommandBuffer* command_buffer,
RecordMode mode = RecordMode::kExclusive);
const absl::flat_hash_set<CommandBufferCmd::BufferUsage>& buffers() const;
const absl::flat_hash_set<BufferAllocation::Index>& allocs_indices() const;
std::vector<bool> barriers() const;
bool empty() const { return commands_.empty(); }
size_t size() const { return commands_.size(); }
bool force_update() const {
return absl::c_any_of(commands_, [](const CommandInfo& cmd_info) {
return cmd_info.cmd->force_update();
});
}
private:
struct CommandInfo {
std::unique_ptr<CommandBufferCmd> cmd;
bool requires_barrier;
};
bool HasConflicts(ExecutionStreamId execution_stream_id,
const CommandBufferCmd::BufferUsageVector& buffers);
void TrackBuffers(ExecutionStreamId execution_stream_id,
const CommandBufferCmd::BufferUsageVector& buffers);
void ClearTrackedBuffers(ExecutionStreamId execution_stream_id);
SynchronizationMode synchronization_mode_;
std::vector<CommandInfo> commands_;
absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers_;
absl::flat_hash_set<BufferAllocation::Index> allocs_indices_;
struct ReadWriteSet {
absl::flat_hash_set<BufferAllocation::Slice> read;
absl::flat_hash_set<BufferAllocation::Slice> write;
};
absl::flat_hash_map<ExecutionStreamId, ReadWriteSet> read_write_sets_;
};
class TracedCommandBuffer : public CommandBufferCmd::State {
public:
explicit TracedCommandBuffer(CommandBufferCmd::BufferUsageVector buffers,
int64_t capacity = 16);
absl::StatusOr<se::CommandBuffer*> GetOrTraceCommandBuffer(
const BufferAllocations* buffer_allocation, se::StreamExecutor* executor,
se::Stream* stream, absl::FunctionRef<absl::Status(se::Stream*)> trace);
private:
std::vector<BufferAllocation::Index> allocs_indices_;
struct Entry {
std::vector<se::DeviceMemoryBase> recorded_allocs;
std::unique_ptr<se::CommandBuffer> command_buffer;
};
int64_t capacity_;
std::vector<Entry> entries_;
};
class TracedCommandBufferCmd : public CommandBufferCmd {
protected:
explicit TracedCommandBufferCmd(ExecutionStreamId execution_stream_id);
absl::Status AddTracedCommandBuffer(
const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params, se::CommandBuffer* command_buffer,
absl::FunctionRef<absl::Status(se::Stream*)> trace);
};
class ComputationIdCmd : public CommandBufferCmd {
public:
enum class Kind { kReplica, kPartition };
ComputationIdCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice dest, Kind kind);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
private:
BufferAllocation::Slice dest_;
Kind kind_;
absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>>
memset_kernels_ ABSL_GUARDED_BY(mutex_);
};
class LaunchCmd : public CommandBufferCmd {
public:
LaunchCmd(ExecutionStreamId execution_stream_id, std::string kernel_name,
absl::Span<const BufferAllocation::Slice> args,
absl::Span<const MemoryAccess> args_access, LaunchDimensions dims,
int64_t shmem_bytes);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
private:
std::string kernel_name_;
std::vector<BufferAllocation::Slice> args_;
std::vector<MemoryAccess> args_access_;
LaunchDimensions dims_;
int64_t shmem_bytes_;
absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>> kernels_
ABSL_GUARDED_BY(mutex_);
};
class CustomKernelLaunchCmd : public CommandBufferCmd {
public:
CustomKernelLaunchCmd(ExecutionStreamId execution_stream_id,
absl::Span<const BufferAllocation::Slice> args,
absl::Span<const MemoryAccess> args_access,
CustomKernel custom_kernel);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
private:
std::vector<BufferAllocation::Slice> args_;
std::vector<MemoryAccess> args_access_;
CustomKernel custom_kernel_;
absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*, std::unique_ptr<se::Kernel>> kernels_
ABSL_GUARDED_BY(mutex_);
};
class MemcpyDeviceToDeviceCmd : public CommandBufferCmd {
public:
MemcpyDeviceToDeviceCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice dst,
BufferAllocation::Slice src, int64_t num_bytes);
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
private:
BufferAllocation::Slice dst_;
BufferAllocation::Slice src_;
int64_t num_bytes_;
};
class MemzeroCmd : public CommandBufferCmd {
public:
MemzeroCmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice dst);
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
private:
BufferAllocation::Slice dst_;
};
class Memset32Cmd : public CommandBufferCmd {
public:
Memset32Cmd(ExecutionStreamId execution_stream_id,
BufferAllocation::Slice dst, uint32_t bit_pattern);
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
private:
BufferAllocation::Slice dst_;
uint32_t bit_pattern_;
};
class IfCmd : public CommandBufferCmd {
public:
IfCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice pred,
CommandBufferCmdSequence then_commands);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
bool force_update() override;
BufferUsageVector buffers() override;
private:
BufferAllocation::Slice pred_;
CommandBufferCmdSequence then_commands_;
};
class IfElseCmd : public CommandBufferCmd {
public:
IfElseCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice pred,
CommandBufferCmdSequence then_commands,
CommandBufferCmdSequence else_commands);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
bool force_update() override;
BufferUsageVector buffers() override;
private:
BufferAllocation::Slice pred_;
CommandBufferCmdSequence then_commands_;
CommandBufferCmdSequence else_commands_;
};
class CaseCmd : public CommandBufferCmd {
public:
CaseCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice index,
std::vector<CommandBufferCmdSequence> branches_commands);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
bool force_update() override;
BufferUsageVector buffers() override;
private:
BufferAllocation::Slice index_;
std::vector<CommandBufferCmdSequence> branches_commands_;
};
class ForCmd : public CommandBufferCmd {
public:
ForCmd(ExecutionStreamId execution_stream_id, int32_t num_iterations,
BufferAllocation::Slice loop_counter,
CommandBufferCmdSequence body_commands);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
bool force_update() override;
BufferUsageVector buffers() override;
private:
int32_t num_iterations_;
BufferAllocation::Slice loop_counter_;
CommandBufferCmdSequence body_commands_;
};
class WhileCmd : public CommandBufferCmd {
public:
WhileCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice pred,
CommandBufferCmdSequence cond_commands,
CommandBufferCmdSequence body_commands);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
bool force_update() override;
BufferUsageVector buffers() override;
private:
BufferAllocation::Slice pred_;
CommandBufferCmdSequence cond_commands_;
CommandBufferCmdSequence body_commands_;
};
class GemmCmd : public TracedCommandBufferCmd {
public:
GemmCmd(ExecutionStreamId execution_stream_id, GemmConfig config,
const BufferAllocation::Slice& lhs_buffer,
const BufferAllocation::Slice& rhs_buffer,
const BufferAllocation::Slice& output_buffer,
const BufferAllocation::Slice& workspace, bool deterministic);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
bool IsNestedCommandBuffer() const final { return true; }
private:
const GemmConfig config_;
const BufferAllocation::Slice lhs_buffer_;
const BufferAllocation::Slice rhs_buffer_;
const BufferAllocation::Slice output_buffer_;
const BufferAllocation::Slice workspace_;
const bool deterministic_;
};
class CublasLtCmd : public TracedCommandBufferCmd {
public:
CublasLtCmd(ExecutionStreamId execution_stream_id, GemmConfig gemm_config,
se::gpu::BlasLt::Epilogue epilogue, int64_t algorithm_idx,
BufferAllocation::Slice a_buffer,
BufferAllocation::Slice b_buffer,
BufferAllocation::Slice c_buffer,
BufferAllocation::Slice d_buffer,
BufferAllocation::Slice bias_buffer ,
BufferAllocation::Slice aux_buffer ,
BufferAllocation::Slice a_scale_buffer ,
BufferAllocation::Slice b_scale_buffer ,
BufferAllocation::Slice c_scale_buffer ,
BufferAllocation::Slice d_scale_buffer ,
BufferAllocation::Slice d_amax_buffer ,
BufferAllocation::Slice workspace_buffer);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
bool IsNestedCommandBuffer() const final { return true; }
private:
absl::StatusOr<se::gpu::BlasLt::MatmulPlan*> GetMatmulPlan(
const stream_executor::Stream* stream);
absl::StatusOr<se::gpu::BlasLt::MatmulAlgorithm> GetMatmulAlgorithm(
const se::gpu::BlasLt::MatmulPlan* plan, int64_t max_workspace);
absl::flat_hash_map<const stream_executor::Stream*,
se::gpu::BlasLt::MatmulPlanPtr>
matmul_plans_cache_;
absl::flat_hash_map<const se::gpu::BlasLt::MatmulPlan*,
se::gpu::BlasLt::MatmulAlgorithm>
matmul_algorithm_cache_;
const GemmConfig gemm_config_;
const se::gpu::BlasLt::Epilogue epilogue_;
const int64_t algorithm_idx_;
const BufferAllocation::Slice a_buffer_;
const BufferAllocation::Slice b_buffer_;
const BufferAllocation::Slice c_buffer_;
const BufferAllocation::Slice d_buffer_;
const BufferAllocation::Slice bias_buffer_;
const BufferAllocation::Slice aux_buffer_;
const BufferAllocation::Slice a_scale_buffer_;
const BufferAllocation::Slice b_scale_buffer_;
const BufferAllocation::Slice c_scale_buffer_;
const BufferAllocation::Slice d_scale_buffer_;
const BufferAllocation::Slice d_amax_buffer_;
const BufferAllocation::Slice workspace_buffer_;
};
class CuDnnCmd : public TracedCommandBufferCmd {
public:
CuDnnCmd(ExecutionStreamId execution_stream_id,
absl::Span<const BufferAllocation::Slice> args,
std::shared_ptr<se::dnn::LazyDnnGraph> graph);
absl::Status Initialize(const Thunk::InitializeParams& params,
StateManager& state) override;
absl::Status Record(const Thunk::ExecuteParams& execute_params,
const RecordParams& record_params,
se::CommandBuffer* command_buffer) override;
BufferUsageVector buffers() override;
bool IsNestedCommandBuffer() const final { return true; }
private:
std::vector<BufferAllocation::Slice> args_;
con | #include "xla/service/gpu/runtime/command_buffer_cmd.h"
#include <array>
#include <cstdint>
#include <vector>
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/strings/ascii.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/platform_util.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla::gpu {
using BufferUsage = CommandBufferCmd::BufferUsage;
using BufferUsageVector = CommandBufferCmd::BufferUsageVector;
using MemoryAccess = CommandBufferCmd::MemoryAccess;
static se::StreamExecutor* GpuExecutor() {
auto name =
absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value());
auto* platform = se::PlatformManager::PlatformWithName(name).value();
return platform->ExecutorForDevice(0).value();
}
static constexpr auto s0 = ExecutionStreamId(0);
static constexpr auto s1 = ExecutionStreamId(1);
struct TestOnlyCommandBufferCmd : public CommandBufferCmd {
TestOnlyCommandBufferCmd(ExecutionStreamId execution_stream_id,
BufferUsageVector buffer_usage)
: CommandBufferCmd(execution_stream_id), buffer_usage(buffer_usage) {}
absl::Status Record(const Thunk::ExecuteParams&, const RecordParams&,
se::CommandBuffer*) override {
return absl::OkStatus();
}
BufferUsageVector buffers() override { return buffer_usage; }
BufferUsageVector buffer_usage;
};
TEST(CommandBufferCmdTest, SerializeExecution) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kRead);
auto use1 = BufferUsage(slice1, MemoryAccess::kRead);
CommandBufferCmdSequence commands(
CommandBufferCmdSequence::SynchronizationMode::kSerialize);
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1});
ASSERT_EQ(commands.barriers().size(), 2);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), true);
}
TEST(CommandBufferCmdTest, NoReadBarrier) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kRead);
auto use1 = BufferUsage(slice1, MemoryAccess::kRead);
CommandBufferCmdSequence commands;
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1});
ASSERT_EQ(commands.barriers().size(), 2);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), false);
}
TEST(CommandBufferCmdTest, NoWriteBarrier) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 200, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kWrite);
auto use1 = BufferUsage(slice1, MemoryAccess::kWrite);
CommandBufferCmdSequence commands;
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1});
ASSERT_EQ(commands.barriers().size(), 2);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), false);
}
TEST(CommandBufferCmdTest, WriteConflictBarrier) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kRead);
auto use1 = BufferUsage(slice0, MemoryAccess::kRead);
auto use2 = BufferUsage(slice1, MemoryAccess::kWrite);
CommandBufferCmdSequence commands;
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1});
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use2});
ASSERT_EQ(commands.barriers().size(), 3);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), false);
EXPECT_EQ(commands.barriers().at(2), true);
}
TEST(CommandBufferCmdTest, NoWriteConflictsAcrossStreams) {
BufferAllocation alloc0(0, 1024, 0);
auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100);
auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100);
auto use0 = BufferUsage(slice0, MemoryAccess::kRead);
auto use1 = BufferUsage(slice1, MemoryAccess::kWrite);
CommandBufferCmdSequence commands;
commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0});
commands.Emplace<TestOnlyCommandBufferCmd>(s1, BufferUsageVector{use1});
ASSERT_EQ(commands.barriers().size(), 2);
EXPECT_EQ(commands.barriers().at(0), false);
EXPECT_EQ(commands.barriers().at(1), false);
}
TEST(CommandBufferCmdTest, MemcpyCmd) {
se::StreamExecutor* executor = GpuExecutor();
auto stream = executor->CreateStream().value();
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
CommandBufferCmdSequence commands;
commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_b, slice_a, byte_length);
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b}, 0, &allocator);
CommandBufferCmd::StateManager state;
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
CommandBufferCmd::RecordParams record_params = {state};
auto command_buffer =
executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value();
TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get()));
TF_ASSERT_OK(executor->Submit(stream.get(), *command_buffer));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42));
}
TEST(CommandBufferCmdTest, BarrierCmd) {
se::StreamExecutor* executor = GpuExecutor();
auto stream = executor->CreateStream().value();
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> e = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
TF_ASSERT_OK(stream->MemZero(&e, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation alloc_c(2, byte_length, 0);
BufferAllocation alloc_d(3, byte_length, 0);
BufferAllocation alloc_e(4, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
BufferAllocation::Slice slice_c(&alloc_c, 0, byte_length);
BufferAllocation::Slice slice_d(&alloc_d, 0, byte_length);
BufferAllocation::Slice slice_e(&alloc_e, 0, byte_length);
CommandBufferCmdSequence commands;
commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_b, slice_a, byte_length);
commands.Emplace<BarrierCmd>(s1, s0);
commands.Emplace<MemcpyDeviceToDeviceCmd>(s1, slice_c, slice_b, byte_length);
commands.Emplace<BarrierCmd>(s0, s1);
commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_d, slice_c, byte_length);
commands.Emplace<BarrierCmd>(s1, s0);
commands.Emplace<MemcpyDeviceToDeviceCmd>(s1, slice_e, slice_d, byte_length);
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b, c, d, e}, 0, &allocator);
CommandBufferCmd::StateManager state;
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
CommandBufferCmd::RecordParams record_params = {state};
auto command_buffer =
executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value();
TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get()));
TF_ASSERT_OK(executor->Submit(stream.get(), *command_buffer));
std::vector<int32_t> dst_b(4, 0);
std::vector<int32_t> dst_c(4, 0);
std::vector<int32_t> dst_d(4, 0);
std::vector<int32_t> dst_e(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst_b.data(), b, byte_length));
TF_ASSERT_OK(stream->Memcpy(dst_c.data(), c, byte_length));
TF_ASSERT_OK(stream->Memcpy(dst_d.data(), d, byte_length));
TF_ASSERT_OK(stream->Memcpy(dst_e.data(), e, byte_length));
ASSERT_EQ(dst_b, std::vector<int32_t>(4, 42));
ASSERT_EQ(dst_c, std::vector<int32_t>(4, 42));
ASSERT_EQ(dst_d, std::vector<int32_t>(4, 42));
ASSERT_EQ(dst_e, std::vector<int32_t>(4, 42));
}
TEST(CommandBufferCmdTest, LaunchCmd) {
se::StreamExecutor* executor = GpuExecutor();
auto stream = executor->CreateStream().value();
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
auto args = {slice_a, slice_a, slice_b};
auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead,
MemoryAccess::kWrite};
CommandBufferCmdSequence commands;
commands.Emplace<LaunchCmd>(s0, "add", args, args_access,
LaunchDimensions(1, 4),
0);
Thunk::ExecutableSource source = {
#if defined(GOOGLE_CUDA)
se::gpu::internal::kAddI32Kernel,
{}
#elif defined(TENSORFLOW_USE_ROCM)
{},
se::gpu::internal::kAddI32KernelModule
#endif
};
CommandBufferCmd::StateManager state;
TF_ASSERT_OK(commands.Initialize({executor, source}, state));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
CommandBufferCmd::RecordParams record_params = {state};
auto command_buffer =
executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value();
TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get()));
TF_ASSERT_OK(executor->Submit(stream.get(), *command_buffer));
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
}
TEST(CommandBufferCmdStateManageTest, GetOrCreateState) {
struct TestState : public CommandBufferCmd::State {
int32_t value = 0;
};
CommandBufferCmd* cmd = reinterpret_cast<CommandBufferCmd*>(0x1234567);
CommandBufferCmd::StateManager state_manager;
auto* state0 = state_manager.GetOrNull<TestState>(cmd);
ASSERT_EQ(state0, nullptr);
auto* state1 = state_manager.GetOrCreate<TestState>(cmd);
ASSERT_EQ(state1->value, 0);
state1->value += 42;
auto* state2 = state_manager.GetOrCreate<TestState>(cmd);
ASSERT_EQ(state2->value, 42);
ASSERT_EQ(state1, state2);
}
TEST(TracedCommandBuffer, GetOrUpdateCommandBuffer) {
auto run_traced_test = [](int trace_cache_size) {
se::StreamExecutor* executor = GpuExecutor();
auto stream = executor->CreateStream().value();
BufferAllocation alloc0(0, 1024, 0);
BufferAllocation alloc1(1, 1024, 0);
CommandBufferCmd::BufferUsageVector buffers = {
{BufferAllocation::Slice(&alloc0, 0, 1024), MemoryAccess::kRead},
{BufferAllocation::Slice(&alloc1, 0, 1024), MemoryAccess::kWrite}};
TracedCommandBuffer traced_cmd_buffer(buffers,
trace_cache_size);
se::DeviceMemoryBase mem0(reinterpret_cast<void*>(0x01234567));
se::DeviceMemoryBase mem1(reinterpret_cast<void*>(0x12345670));
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({mem0, mem1}, 0, &allocator);
int64_t num_calls = 0;
auto trace = [&](se::Stream*) {
num_calls++;
return absl::OkStatus();
};
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer0,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer1,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_EQ(command_buffer0, command_buffer1);
EXPECT_EQ(num_calls, 1);
se::DeviceMemoryBase mem2(reinterpret_cast<void*>(0x23456701));
allocations = BufferAllocations({mem0, mem2}, 0, &allocator);
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer2,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_NE(command_buffer0, command_buffer2);
EXPECT_EQ(num_calls, 2);
allocations = BufferAllocations({mem0, mem1}, 0, &allocator);
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer3,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_EQ(command_buffer0, command_buffer3);
EXPECT_EQ(num_calls, 2);
allocations = BufferAllocations({mem0, mem0}, 0, &allocator);
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer4,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_NE(command_buffer4, command_buffer3);
ASSERT_NE(command_buffer4, command_buffer2);
EXPECT_EQ(num_calls, 3);
allocations = BufferAllocations({mem0, mem1}, 0, &allocator);
TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer5,
traced_cmd_buffer.GetOrTraceCommandBuffer(
&allocations, executor, stream.get(), trace));
ASSERT_EQ(command_buffer0, command_buffer5);
EXPECT_EQ(num_calls, 3);
};
run_traced_test(2);
run_traced_test(3);
}
static void BM_GetOrTraceCommandBuffer(benchmark::State& state) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
BufferAllocation alloc0(0, 1024, 0);
BufferAllocation alloc1(1, 1024, 0);
CommandBufferCmd::BufferUsageVector buffers = {
{BufferAllocation::Slice(&alloc0, 0, 1024), MemoryAccess::kRead},
{BufferAllocation::Slice(&alloc1, 0, 1024), MemoryAccess::kWrite}};
se::DeviceMemoryBase mem0(reinterpret_cast<void*>(0x01234567));
se::DeviceMemoryBase mem1(reinterpret_cast<void*>(0x12345670));
se::StreamExecutorMemoryAllocator allocator(executor);
std::array<BufferAllocations, 4> allocations = {
BufferAllocations({mem0, mem1}, 0, &allocator),
BufferAllocations({mem1, mem0}, 0, &allocator),
BufferAllocations({mem0, mem0}, 0, &allocator),
BufferAllocations({mem1, mem1}, 0, &allocator),
};
int32_t index = 0;
TracedCommandBuffer traced_cmd_buffer(buffers);
auto trace = [](se::Stream*) { return absl::OkStatus(); };
absl::FunctionRef<absl::Status(se::Stream*)> trace_ref(trace);
for (auto s : state) {
TF_CHECK_OK(traced_cmd_buffer
.GetOrTraceCommandBuffer(&allocations[index++ % 4],
executor, stream.get(), trace_ref)
.status());
}
}
BENCHMARK(BM_GetOrTraceCommandBuffer);
} | 2,159 |
#ifndef XLA_SERVICE_GPU_RUNTIME_COMMAND_BUFFER_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_COMMAND_BUFFER_THUNK_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/gpu/runtime/command_buffer_cmd.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla::gpu {
class CommandBufferThunk : public Thunk {
public:
CommandBufferThunk(CommandBufferCmdSequence commands, ThunkInfo thunk_info,
std::unique_ptr<SequentialThunk> thunks = nullptr);
const std::unique_ptr<SequentialThunk>& thunks() const { return thunks_; }
absl::Status Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
absl::StatusOr<se::DeviceMemoryBase> GetCommandBufferAllocationAddress(
const ExecuteParams& params, int64_t index);
private:
struct ExecutorCommandBuffer {
explicit ExecutorCommandBuffer(
std::unique_ptr<se::CommandBuffer> command_buffer);
bool ShouldUpdateCommandBuffer(const CommandBufferCmdSequence& commands,
const Thunk::ExecuteParams& params)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex);
absl::Mutex mutex;
std::unique_ptr<se::CommandBuffer> command_buffer ABSL_GUARDED_BY(mutex);
CommandBufferCmd::StateManager state ABSL_GUARDED_BY(mutex);
std::vector<se::DeviceMemoryBase> recorded_allocs ABSL_GUARDED_BY(mutex);
int64_t num_executions ABSL_GUARDED_BY(mutex) = 0;
};
struct State {
absl::Mutex mutex;
absl::flat_hash_map<se::StreamExecutor*,
std::shared_ptr<ExecutorCommandBuffer>>
command_buffers ABSL_GUARDED_BY(mutex);
};
absl::StatusOr<std::shared_ptr<ExecutorCommandBuffer>>
GetOrCreateCommandBuffer(se::StreamExecutor* executor);
struct GlobalState;
static GlobalState* GetGlobalState();
static void TrackCommandBuffers(std::weak_ptr<State> state);
static void EvictCommandBuffers();
CommandBufferCmdSequence commands_;
std::unique_ptr<SequentialThunk> thunks_;
std::shared_ptr<State> state_;
};
}
#endif
#include "xla/service/gpu/runtime/command_buffer_thunk.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/runtime/annotation.h"
#include "xla/service/gpu/runtime/command_buffer_cmd.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/stream_executor.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
#include "tsl/profiler/lib/profiler_lock.h"
#include "tsl/profiler/lib/traceme.h"
#include "tsl/profiler/lib/traceme_encode.h"
namespace xla::gpu {
using tsl::profiler::TraceMe;
using tsl::profiler::TraceMeEncode;
CommandBufferThunk::ExecutorCommandBuffer::ExecutorCommandBuffer(
std::unique_ptr<se::CommandBuffer> command_buffer)
: command_buffer(std::move(command_buffer)) {}
CommandBufferThunk::CommandBufferThunk(CommandBufferCmdSequence commands,
ThunkInfo thunk_info,
std::unique_ptr<SequentialThunk> thunks)
: Thunk(Thunk::kCommandBuffer, std::move(thunk_info)),
commands_(std::move(commands)),
thunks_(std::move(thunks)),
state_(std::make_shared<State>()) {
EvictCommandBuffers();
TrackCommandBuffers(state_);
}
bool CommandBufferThunk::ExecutorCommandBuffer::ShouldUpdateCommandBuffer(
const CommandBufferCmdSequence& commands,
const Thunk::ExecuteParams& params) {
if (commands.force_update()) {
return true;
}
bool should_update = false;
const BufferAllocations* allocs = params.buffer_allocations;
for (BufferAllocation::Index index : commands.allocs_indices()) {
se::DeviceMemoryBase alloc = allocs->GetDeviceAddress(index);
if (recorded_allocs.size() <= index) {
recorded_allocs.resize(index + 1);
should_update = true;
}
if (!recorded_allocs[index].IsSameAs(alloc)) {
recorded_allocs[index] = alloc;
should_update = true;
}
}
return should_update;
}
absl::Status CommandBufferThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
if (commands_.empty()) return absl::OkStatus();
TF_RETURN_IF_ERROR(commands_.Prepare(params, resource_requests));
if (thunks_) {
TF_RETURN_IF_ERROR(thunks_->Prepare(params, resource_requests));
}
return absl::OkStatus();
}
absl::Status CommandBufferThunk::Initialize(const InitializeParams& params) {
if (commands_.empty()) return absl::OkStatus();
TF_ASSIGN_OR_RETURN(std::shared_ptr<ExecutorCommandBuffer> cmd_buffer,
GetOrCreateCommandBuffer(params.executor));
absl::MutexLock lock(&cmd_buffer->mutex);
TF_RETURN_IF_ERROR(commands_.Initialize(params, cmd_buffer->state));
if (thunks_) {
TF_RETURN_IF_ERROR(thunks_->Initialize(params));
}
Thunk::ExecuteParams execute_params(
params.buffer_allocations, params.stream,
params.command_buffer_trace_stream, params.collective_params,
params.collective_cliques, nullptr,
nullptr,
nullptr,
nullptr, params.ffi_execution_context);
if (cmd_buffer->command_buffer->state() ==
se::CommandBuffer::State::kCreate &&
cmd_buffer->ShouldUpdateCommandBuffer(commands_, execute_params)) {
VLOG(3) << "Initialize command buffer on device #"
<< params.executor->device_ordinal()
<< " by recoding command buffer cmd sequence"
<< "; num_commands=" << commands_.size();
TraceMe trace([&] {
return TraceMeEncode("command_buffer::initialize",
{{"device", params.executor->device_ordinal()},
{"num_commands", commands_.size()}});
});
uint64_t start_micros = tsl::Env::Default()->NowMicros();
CommandBufferCmd::RecordParams record_params = {cmd_buffer->state};
TF_RETURN_IF_ERROR(commands_.Record(execute_params, record_params,
cmd_buffer->command_buffer.get()));
uint64_t end_micros = tsl::Env::Default()->NowMicros();
VLOG(3) << "Initialized command buffer on device #"
<< params.executor->device_ordinal() << " in "
<< (end_micros - start_micros)
<< " μs; num_commands=" << commands_.size();
cmd_buffer->num_executions = 0;
}
return absl::OkStatus();
}
absl::Status CommandBufferThunk::ExecuteOnStream(const ExecuteParams& params) {
if (commands_.empty()) return absl::OkStatus();
if (tsl::profiler::ProfilerLock::HasActiveSession() && thunks_) {
VLOG(1) << "Execute command buffer thunk as a regular thunk sequence "
"because we detected active profiling session";
TF_RETURN_IF_ERROR(thunks_->ExecuteOnStream(params));
return absl::OkStatus();
}
se::StreamExecutor* executor = params.stream->parent();
TF_ASSIGN_OR_RETURN(std::shared_ptr<ExecutorCommandBuffer> cmd_buffer,
GetOrCreateCommandBuffer(executor));
absl::MutexLock lock(&cmd_buffer->mutex);
if (cmd_buffer->ShouldUpdateCommandBuffer(commands_, params)) {
VLOG(3) << "Update command buffer on device #" << executor->device_ordinal()
<< " by recoding command buffer cmd sequence" << " after "
<< cmd_buffer->num_executions << " executions since last update"
<< "; num_commands=" << commands_.size();
TraceMe trace([&] {
cmd_buffer->mutex.AssertHeld();
return TraceMeEncode("command_buffer::update",
{{"device", executor->device_ordinal()},
{"num_commands", commands_.size()},
{"num_executions", cmd_buffer->num_executions}});
});
uint64_t start_micros = tsl::Env::Default()->NowMicros();
CommandBufferCmd::RecordParams record_params = {cmd_buffer->state};
TF_RETURN_IF_ERROR(commands_.Record(params, record_params,
cmd_buffer->command_buffer.get()));
uint64_t end_micros = tsl::Env::Default()->NowMicros();
VLOG(3) << "Updated command buffer in " << (end_micros - start_micros)
<< " μs; num_commands=" << commands_.size();
cmd_buffer->num_executions = 0;
}
++cmd_buffer->num_executions;
VLOG(3) << "Execute command buffer on device #" << executor->device_ordinal()
<< "; num_executions=" << cmd_buffer->num_executions;
TraceMe trace([&] {
cmd_buffer->mutex.AssertHeld();
return TraceMeEncode("command_buffer::execute",
{{"device", executor->device_ordinal()},
{"num_commands", commands_.size()},
{"num_executions", cmd_buffer->num_executions}});
});
return executor->Submit(params.stream, *cmd_buffer->command_buffer);
}
absl::StatusOr<std::shared_ptr<CommandBufferThunk::ExecutorCommandBuffer>>
CommandBufferThunk::GetOrCreateCommandBuffer(se::StreamExecutor* executor) {
absl::MutexLock lock(&state_->mutex);
if (auto it = state_->command_buffers.find(executor);
it != state_->command_buffers.end()) {
return it->second;
}
TF_ASSIGN_OR_RETURN(
auto command_buffer,
executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary));
auto emplaced = state_->command_buffers.emplace(
executor,
std::make_shared<ExecutorCommandBuffer>(std::move(command_buffer)));
return emplaced.first->second;
}
struct CommandBufferThunk::GlobalState {
absl::Mutex mutex;
std::vector<std::weak_ptr<CommandBufferThunk::State>> state
ABSL_GUARDED_BY(mutex);
};
CommandBufferThunk::GlobalState* CommandBufferThunk::GetGlobalState() {
static auto* global_state = new GlobalState();
return global_state;
}
void CommandBufferThunk::TrackCommandBuffers(
std::weak_ptr<CommandBufferThunk::State> state) {
auto* global_state = GetGlobalState();
absl::MutexLock global_state_lock(&global_state->mutex);
global_state->state.push_back(state);
}
void CommandBufferThunk::EvictCommandBuffers() {
TraceMe trace([&] { return "EvictCommandBuffers"; });
auto* global_state = GetGlobalState();
absl::MutexLock global_state_lock(&global_state->mutex);
VLOG(3) << "Evict command buffer thunk command buffers; tracked thunks = "
<< global_state->state.size();
global_state->state.erase(
std::remove_if(global_state->state.begin(), global_state->state.end(),
[](auto& weak_ptr) { return weak_ptr.expired(); }),
global_state->state.end());
int64_t num_evicted = 0;
for (auto& weak_ptr : global_state->state) {
auto ptr = weak_ptr.lock();
if (!ptr) continue;
absl::MutexLock state_lock(&ptr->mutex);
num_evicted += ptr->command_buffers.size();
ptr->command_buffers.clear();
}
if (num_evicted > 0) {
VLOG(3) << "Evicted " << num_evicted
<< " command buffer thunk command buffers";
}
}
} | #include "xla/service/gpu/runtime/command_buffer_thunk.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <thread>
#include <utility>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/launch_dimensions.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/runtime/command_buffer_cmd.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/platform_util.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_test_kernels.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/kernel.h"
#include "xla/stream_executor/kernel_spec.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#ifdef GOOGLE_CUDA
#include "third_party/gpus/cuda/include/cuda.h"
#endif
namespace xla::gpu {
using MemoryAccess = CommandBufferCmd::MemoryAccess;
using KernelArgsPacking = se::MultiKernelLoaderSpec::KernelArgsPacking;
static se::StreamExecutor* GpuExecutor() {
auto name =
absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value());
auto* platform = se::PlatformManager::PlatformWithName(name).value();
return platform->ExecutorForDevice(0).value();
}
static Thunk::ExecutableSource ExecutableSource() {
Thunk::ExecutableSource source = {
#if defined(GOOGLE_CUDA)
se::gpu::internal::kAddI32Kernel,
{}
#elif defined(TENSORFLOW_USE_ROCM)
{},
se::gpu::internal::kAddI32KernelModule
#endif
};
return source;
}
static KernelArgsPacking CreateDefaultArgsPacking() {
using Packed = absl::StatusOr<std::unique_ptr<se::KernelArgsPackedArrayBase>>;
return [=](const se::Kernel& kernel, const se::KernelArgs& args) -> Packed {
auto* mem_args = se::Cast<se::KernelArgsDeviceMemoryArray>(&args);
return se::PackKernelArgs(mem_args->device_memory_args(),
args.number_of_shared_bytes());
};
}
static bool IsAtLeastCuda12300() {
#if defined(TENSORFLOW_USE_ROCM)
return false;
#endif
#if CUDA_VERSION >= 12030
return true;
#endif
return false;
}
static constexpr auto s0 = ExecutionStreamId(0);
static constexpr auto s1 = ExecutionStreamId(1);
TEST(CommandBufferThunkTest, MemcpyCmd) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
CommandBufferCmdSequence commands;
commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_b, slice_a, byte_length);
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
se::StreamExecutorMemoryAllocator allocator(executor);
ServiceExecutableRunOptions run_options;
BufferAllocations allocations({a, b}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42));
}
TEST(CommandBufferThunkTest, MemzeroCmd) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
CommandBufferCmdSequence commands;
commands.Emplace<MemzeroCmd>(s0, slice_a);
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 0));
}
TEST(CommandBufferThunkTest, Memset32Cmd) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
CommandBufferCmdSequence commands;
commands.Emplace<Memset32Cmd>(s0, slice_a, int32_t{84});
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 84));
}
TEST(CommandBufferThunkTest, Memset32CmdOnDifferentStreams) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(2, 0);
TF_ASSERT_OK(stream->MemZero(&a, 2 * sizeof(int32_t)));
BufferAllocation alloc(0, a.size(), 0);
BufferAllocation::Slice slice0(&alloc, 0 * sizeof(int32_t), sizeof(int32_t));
BufferAllocation::Slice slice1(&alloc, 1 * sizeof(int32_t), sizeof(int32_t));
CommandBufferCmdSequence commands;
commands.Emplace<Memset32Cmd>(s0, slice0, int32_t{12});
commands.Emplace<Memset32Cmd>(s1, slice1, int32_t{34});
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(2, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), a, a.size()));
ASSERT_EQ(dst, std::vector<int32_t>({12, 34}));
}
TEST(CommandBufferThunkTest, LaunchCmd) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
auto args = {slice_a, slice_a, slice_b};
auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead,
MemoryAccess::kWrite};
CommandBufferCmdSequence commands;
commands.Emplace<LaunchCmd>(s0, "add", args, args_access,
LaunchDimensions(1, 4),
0);
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = ExecutableSource();
TF_ASSERT_OK(
thunk.Initialize({executor, source, &allocations, stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
allocations = BufferAllocations({a, c}, 0, &allocator);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
}
TEST(CommandBufferThunkTest, CustomAddKernelLaunchCmd) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
auto packing = CreateDefaultArgsPacking();
se::MultiKernelLoaderSpec spec(3, std::move(packing));
spec.AddInProcessSymbol(se::gpu::internal::GetAddI32Kernel(), "add");
auto custom_kernel =
CustomKernel("add", std::move(spec), se::BlockDim(),
se::ThreadDim(4, 1, 1), 0);
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
auto args = {slice_a, slice_a, slice_b};
auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead,
MemoryAccess::kWrite};
CommandBufferCmdSequence commands;
commands.Emplace<LaunchCmd>(s0, "add", args, args_access,
LaunchDimensions(1, 4),
0);
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = ExecutableSource();
TF_ASSERT_OK(
thunk.Initialize({executor, source, &allocations, stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
allocations = BufferAllocations({a, c}, 0, &allocator);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
TF_ASSERT_OK(stream->MemZero(&c, byte_length));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
}
TEST(CommandBufferThunkTest, GemmCmd) {
if (!IsAtLeastCuda12300()) {
GTEST_SKIP() << "CUDA graph tracing is not supported";
}
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t lhs_length = sizeof(float) * 2 * 4;
int64_t rhs_length = sizeof(float) * 4 * 3;
int64_t out_length = sizeof(float) * 2 * 3;
se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4);
std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8};
TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(4 * 3);
std::vector<float> rhs_arr(12, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(2 * 3);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
BufferAllocation alloc_lhs(0, lhs_length, 0);
BufferAllocation alloc_rhs(1, rhs_length, 0);
BufferAllocation alloc_out(2, out_length, 0);
BufferAllocation alloc_workspace(3, 1024 * 1024, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length);
BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, rhs_length);
BufferAllocation::Slice slice_out(&alloc_out, 0, out_length);
BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {4, 3}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {2, 3}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
CommandBufferCmdSequence commands;
commands.Emplace<GemmCmd>(s0, config.value(), slice_lhs, slice_rhs, slice_out,
slice_workspace,
true);
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({lhs, rhs, out, workspace}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(6, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({10, 10, 10, 26, 26, 26}));
se::DeviceMemory<float> updated_out = executor->AllocateArray<float>(2 * 3);
TF_ASSERT_OK(stream->MemZero(&updated_out, out_length));
allocations =
BufferAllocations({lhs, rhs, updated_out, workspace}, 0, &allocator);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), updated_out, out_length));
ASSERT_EQ(dst, std::vector<float>({10, 10, 10, 26, 26, 26}));
TF_ASSERT_OK(stream->MemZero(&updated_out, out_length));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), updated_out, out_length));
ASSERT_EQ(dst, std::vector<float>({10, 10, 10, 26, 26, 26}));
}
TEST(CommandBufferThunkTest, CublasLtCmd) {
if (!IsAtLeastCuda12300()) {
GTEST_SKIP() << "CUDA graph tracing is not supported";
}
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream1, executor->CreateStream());
TF_ASSERT_OK_AND_ASSIGN(auto stream2, executor->CreateStream());
int64_t a_length = sizeof(float) * 2 * 4;
int64_t b_length = sizeof(float) * 4 * 3;
int64_t c_length = sizeof(float) * 2 * 3;
int64_t d_length = sizeof(float) * 2 * 3;
BufferAllocation alloc_a(0, a_length, 0);
BufferAllocation alloc_b(1, b_length, 0);
BufferAllocation alloc_c(2, c_length, 0);
BufferAllocation alloc_d(3, d_length, 0);
BufferAllocation alloc_workspace(4, 1024 * 1024, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, a_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, b_length);
BufferAllocation::Slice slice_c(&alloc_c, 0, c_length);
BufferAllocation::Slice slice_d(&alloc_d, 0, d_length);
BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024);
auto config = GemmConfig::For(
ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}),
{}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {4, 3}),
{}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {2, 3}),
nullptr,
ShapeUtil::MakeShape(PrimitiveType::F32, {2, 3}),
1.0, 0,
1.0,
PrecisionConfig::ALG_UNSET,
std::nullopt,
se::blas::kDefaultComputePrecision,
false, false);
ASSERT_TRUE(config.ok());
CommandBufferCmdSequence commands;
commands.Emplace<CublasLtCmd>(
s0, config.value(), se::gpu::BlasLt::Epilogue::kDefault, 0, slice_a,
slice_b, slice_c, slice_d, BufferAllocation::Slice(),
BufferAllocation::Slice(), BufferAllocation::Slice(),
BufferAllocation::Slice(), BufferAllocation::Slice(),
BufferAllocation::Slice(), BufferAllocation::Slice(), slice_workspace);
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
std::vector<float> a_arr_1{1, 2, 3, 4, 5, 6, 7, 8};
std::vector<float> a_arr_2{2, 3, 4, 5, 6, 7, 8, 9};
std::vector<float> result_1{11, 11, 11, 27, 27, 27};
std::vector<float> result_2{15, 15, 15, 31, 31, 31};
auto run_cublaslt_test = [&](std::unique_ptr<se::Stream>& stream,
std::vector<float> a_arr,
std::vector<float> result) {
se::DeviceMemory<float> a = executor->AllocateArray<float>(2 * 4);
TF_ASSERT_OK(stream->Memcpy(&a, a_arr.data(), a_length));
se::DeviceMemory<float> b = executor->AllocateArray<float>(4 * 3);
std::vector<float> b_arr(12, 1);
TF_ASSERT_OK(stream->Memcpy(&b, b_arr.data(), b_length));
se::DeviceMemory<float> c = executor->AllocateArray<float>(2 * 3);
std::vector<float> c_arr(6, 1);
TF_ASSERT_OK(stream->Memcpy(&c, c_arr.data(), c_length));
se::DeviceMemory<float> d = executor->AllocateArray<float>(2 * 3);
TF_ASSERT_OK(stream->MemZero(&d, d_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b, c, d, workspace}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(6, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, d_length));
ASSERT_EQ(dst, result);
se::DeviceMemory<float> updated_d = executor->AllocateArray<float>(2 * 3);
TF_ASSERT_OK(stream->MemZero(&updated_d, d_length));
allocations =
BufferAllocations({a, b, c, updated_d, workspace}, 0, &allocator);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), updated_d, d_length));
ASSERT_EQ(dst, result);
TF_ASSERT_OK(stream->MemZero(&updated_d, d_length));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), updated_d, d_length));
ASSERT_EQ(dst, result);
};
std::thread t1(run_cublaslt_test, std::ref(stream1), a_arr_1, result_1);
std::thread t2(run_cublaslt_test, std::ref(stream2), a_arr_2, result_2);
t1.join();
t2.join();
}
TEST(CommandBufferThunkTest, MultipleLaunchCmd) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = 4;
int64_t byte_length = sizeof(int32_t) * length;
se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0);
se::DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length));
TF_ASSERT_OK(stream->MemZero(&b, byte_length));
TF_ASSERT_OK(stream->Memset32(&c, 21, byte_length));
TF_ASSERT_OK(stream->MemZero(&d, byte_length));
BufferAllocation alloc_a(0, byte_length, 0);
BufferAllocation alloc_b(1, byte_length, 0);
BufferAllocation alloc_c(2, byte_length, 0);
BufferAllocation alloc_d(3, byte_length, 0);
BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length);
BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length);
BufferAllocation::Slice slice_c(&alloc_c, 0, byte_length);
BufferAllocation::Slice slice_d(&alloc_d, 0, byte_length);
auto args = {slice_a, slice_a, slice_b};
auto args_1 = {slice_c, slice_c, slice_d};
auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead,
MemoryAccess::kWrite};
CommandBufferCmdSequence commands;
commands.Emplace<LaunchCmd>(s0, "add", args, args_access,
LaunchDimensions(1, 4),
0);
commands.Emplace<LaunchCmd>(s0, "add", args_1, args_access,
LaunchDimensions(1, 4),
0);
CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo());
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({a, b, c, d}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = ExecutableSource();
TF_ASSERT_OK(
thunk.Initialize({executor, source, &allocations, stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> dst(4, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 21 + 21));
BufferAllocation alloc_e(3, byte_length, 0);
BufferAllocation::Slice slice_e(&alloc_e, 0, byte_length);
se::DeviceMemory<int32_t> e = executor->AllocateArray<int32_t>(length, 0);
TF_ASSERT_OK(stream->MemZero(&e, byte_length));
allocations = BufferAllocations({a, b, c, e}, 0, &allocator);
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42));
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), e, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 21 + 21));
TF_ASSERT_OK(stream->MemZero(&e, byte_length));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::fill(dst.begin(), dst.end(), 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length));
ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); | 2,160 |
#ifndef XLA_SERVICE_GPU_RUNTIME_NCCL_CLIQUE_KEY_H_
#define XLA_SERVICE_GPU_RUNTIME_NCCL_CLIQUE_KEY_H_
#include <array>
#include <cstdint>
#include <functional>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/service/global_device_id.h"
#include "tsl/lib/gtl/int_type.h"
namespace xla::gpu {
TSL_LIB_GTL_DEFINE_INT_TYPE(NcclStreamId, uint64_t);
enum class AsyncStreamKind : int64_t {
kCollective = 0,
kP2P0 = 1,
kP2P1 = 2,
};
constexpr static int64_t kAsyncStreamTotal =
static_cast<int64_t>(AsyncStreamKind::kP2P1) + 1;
inline NcclStreamId GetStreamId(
bool is_async, AsyncStreamKind stream_kind = AsyncStreamKind::kCollective) {
return NcclStreamId(is_async ? static_cast<uint64_t>(stream_kind) + 1 : 0);
}
class NcclCliqueKey {
public:
explicit NcclCliqueKey(
std::vector<GlobalDeviceId> devices,
NcclStreamId stream_id = NcclStreamId(0),
AsyncStreamKind stream_kind = AsyncStreamKind::kCollective);
absl::Span<const GlobalDeviceId> devices() const;
NcclStreamId stream_id() const;
std::optional<int64_t> rank(GlobalDeviceId id) const;
bool IsSubsetOf(const NcclCliqueKey& other) const;
AsyncStreamKind stream_kind() const { return stream_kind_; }
std::string ToString() const;
template <typename H>
friend H AbslHashValue(H h, const NcclCliqueKey& k);
friend bool operator==(const NcclCliqueKey& a, const NcclCliqueKey& b);
friend bool operator<(const NcclCliqueKey& a, const NcclCliqueKey& b);
friend bool operator>(const NcclCliqueKey& a, const NcclCliqueKey& b);
private:
std::vector<GlobalDeviceId> devices_;
NcclStreamId stream_id_;
AsyncStreamKind stream_kind_;
};
template <typename H>
H AbslHashValue(H h, const NcclCliqueKey& k) {
return H::combine(std::move(h), k.devices_, k.stream_id_);
}
bool operator==(const NcclCliqueKey& a, const NcclCliqueKey& b);
bool operator<(const NcclCliqueKey& a, const NcclCliqueKey& b);
class NcclCliqueId {
public:
static constexpr int32_t kSize = 128;
static absl::StatusOr<NcclCliqueId> FromString(std::string_view str);
NcclCliqueId();
explicit NcclCliqueId(char bytes[kSize]);
absl::Span<const char> data() const;
std::string ToString() const;
template <typename H>
friend H AbslHashValue(H h, const NcclCliqueId& id);
private:
std::array<char, kSize> data_;
};
template <typename H>
H AbslHashValue(H h, const NcclCliqueId& id) {
return H::combine(std::move(h), id.data());
}
using NcclCliqueIdCallback =
std::function<absl::StatusOr<NcclCliqueId>(const NcclCliqueKey&)>;
}
#endif
#include "xla/service/gpu/runtime/nccl_clique_key.h"
#include <algorithm>
#include <cstdint>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/service/global_device_id.h"
namespace xla::gpu {
NcclCliqueKey::NcclCliqueKey(std::vector<GlobalDeviceId> devices,
NcclStreamId stream_id,
AsyncStreamKind stream_kind)
: devices_(std::move(devices)),
stream_id_(stream_id),
stream_kind_(stream_kind) {}
absl::Span<const GlobalDeviceId> NcclCliqueKey::devices() const {
return devices_;
}
NcclStreamId NcclCliqueKey::stream_id() const { return stream_id_; }
std::optional<int64_t> NcclCliqueKey::rank(GlobalDeviceId id) const {
if (auto it = absl::c_find(devices_, id); it != devices_.end()) {
return it - devices_.begin();
}
return std::nullopt;
}
bool NcclCliqueKey::IsSubsetOf(const NcclCliqueKey& other) const {
return stream_id_ == other.stream_id_ &&
absl::c_all_of(devices_, [&](GlobalDeviceId id) {
return absl::c_linear_search(other.devices_, id);
});
}
std::string NcclCliqueKey::ToString() const {
return absl::StrFormat("devices=[%s]; stream=%d",
GlobalDeviceIdsToString(devices_), stream_id_.value());
}
bool operator==(const NcclCliqueKey& a, const NcclCliqueKey& b) {
return a.devices_ == b.devices_ && a.stream_id_ == b.stream_id_;
}
bool operator<(const NcclCliqueKey& a, const NcclCliqueKey& b) {
if (a.devices_.size() < b.devices_.size()) return true;
if (b.devices_.size() < a.devices_.size()) return false;
if (a.devices_ < b.devices_) return true;
if (b.devices_ < a.devices_) return false;
return a.stream_id_.value() < b.stream_id_.value();
}
bool operator>(const NcclCliqueKey& a, const NcclCliqueKey& b) {
if (a.devices_.size() > b.devices_.size()) return true;
if (b.devices_.size() > a.devices_.size()) return false;
if (a.devices_ > b.devices_) return true;
if (b.devices_ > a.devices_) return false;
return a.stream_id_.value() < b.stream_id_.value();
}
NcclCliqueId::NcclCliqueId() { std::fill(data_.begin(), data_.end(), 0); }
NcclCliqueId::NcclCliqueId(char bytes[kSize]) {
std::copy(bytes, bytes + kSize, data_.data());
}
absl::StatusOr<NcclCliqueId> NcclCliqueId::FromString(std::string_view str) {
if (str.size() != kSize) {
return absl::InvalidArgumentError(
absl::StrFormat("Invalid NCCL clique id size: %d , expected %d bytes",
str.size(), kSize));
}
char bytes[kSize];
std::copy(str.data(), str.data() + kSize, bytes);
return NcclCliqueId(bytes);
}
absl::Span<const char> NcclCliqueId::data() const { return data_; }
std::string NcclCliqueId::ToString() const {
return std::string(data_.data(), data_.size());
}
} | #include "xla/service/gpu/runtime/nccl_clique_key.h"
#include <cstdint>
#include <functional>
#include "absl/container/btree_map.h"
#include "xla/service/global_device_id.h"
#include "tsl/platform/test.h"
namespace xla::gpu {
TEST(NcclCliqueKeyTest, IsSubsetOf) {
GlobalDeviceId id0 = GlobalDeviceId(0);
GlobalDeviceId id1 = GlobalDeviceId(1);
GlobalDeviceId id2 = GlobalDeviceId(2);
GlobalDeviceId id3 = GlobalDeviceId(3);
NcclCliqueKey key0({id0, id1}, NcclStreamId(0));
NcclCliqueKey key1({id0, id1, id2, id3}, NcclStreamId(0));
NcclCliqueKey key2({id0, id1, id2, id3}, NcclStreamId(1));
NcclCliqueKey key3({id1, id2, id3}, NcclStreamId(0));
EXPECT_TRUE(key0.IsSubsetOf(key1));
EXPECT_FALSE(key0.IsSubsetOf(key2));
EXPECT_FALSE(key0.IsSubsetOf(key3));
}
TEST(NcclCliqueKeyTest, Compare) {
GlobalDeviceId id0 = GlobalDeviceId(0);
GlobalDeviceId id1 = GlobalDeviceId(1);
GlobalDeviceId id2 = GlobalDeviceId(2);
GlobalDeviceId id3 = GlobalDeviceId(3);
NcclCliqueKey key0({id0, id1}, NcclStreamId(0));
NcclCliqueKey key1({id1, id2, id3}, NcclStreamId(0));
EXPECT_LT(key0, key1);
EXPECT_GT(key1, key0);
}
TEST(NcclCliqueKeyTest, BtreeIterationOrder) {
GlobalDeviceId id0 = GlobalDeviceId(0);
GlobalDeviceId id1 = GlobalDeviceId(1);
GlobalDeviceId id2 = GlobalDeviceId(2);
GlobalDeviceId id3 = GlobalDeviceId(3);
NcclCliqueKey key0({id0, id2}, NcclStreamId(0));
NcclCliqueKey key1({id0, id1, id2, id3}, NcclStreamId(0));
absl::btree_map<NcclCliqueKey, int64_t, std::greater<NcclCliqueKey>> map;
map[key0] = 0;
map[key1] = 1;
EXPECT_EQ(map.begin()->first, key1);
}
} | 2,161 |
#ifndef XLA_SERVICE_GPU_RUNTIME_FOR_ALL_THUNKS_H_
#define XLA_SERVICE_GPU_RUNTIME_FOR_ALL_THUNKS_H_
#include "absl/functional/function_ref.h"
#include "xla/service/gpu/runtime/thunk.h"
namespace xla::gpu {
void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn, const Thunk* thunk);
void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn,
const ThunkSequence* thunks);
}
#endif
#include "xla/service/gpu/runtime/for_all_thunks.h"
#include <memory>
#include <optional>
#include "absl/functional/function_ref.h"
#include "xla/service/gpu/runtime/command_buffer_thunk.h"
#include "xla/service/gpu/runtime/conditional_thunk.h"
#include "xla/service/gpu/runtime/dynamic_slice_thunk.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/runtime/while_thunk.h"
#include "tsl/platform/casts.h"
namespace xla::gpu {
void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn,
const Thunk* thunk) {
fn(thunk);
switch (thunk->kind()) {
case Thunk::kAddressComputation:
ForAllThunks(fn, tensorflow::down_cast<const DynamicSliceThunk*>(thunk)
->embedded_thunk());
break;
case Thunk::kCommandBuffer:
if (const std::unique_ptr<SequentialThunk>& sequence =
tensorflow::down_cast<const CommandBufferThunk*>(thunk)->thunks();
sequence != nullptr) {
ForAllThunks(fn, sequence.get());
}
break;
case Thunk::kConditional:
for (const std::unique_ptr<SequentialThunk>& branch :
tensorflow::down_cast<const ConditionalThunk*>(thunk)
->branch_thunks()) {
ForAllThunks(fn, branch.get());
}
break;
case Thunk::kSequential:
ForAllThunks(
fn, &tensorflow::down_cast<const SequentialThunk*>(thunk)->thunks());
break;
case Thunk::kWhile:
ForAllThunks(fn, tensorflow::down_cast<const WhileThunk*>(thunk)
->condition_thunk_sequence());
ForAllThunks(fn, tensorflow::down_cast<const WhileThunk*>(thunk)
->body_thunk_sequence());
break;
case Thunk::kCholesky:
case Thunk::kConvolution:
case Thunk::kConvolutionReorder:
case Thunk::kCopy:
case Thunk::kCopyDone:
case Thunk::kCubSort:
case Thunk::kCublasLtMatmul:
case Thunk::kCustomCall:
case Thunk::kCustomKernel:
case Thunk::kCuDnn:
case Thunk::kFft:
case Thunk::kFusedMHA:
case Thunk::kGemm:
case Thunk::kInfeed:
case Thunk::kKernel:
case Thunk::kMemset32BitValue:
case Thunk::kMemzero:
case Thunk::kNcclAllGather:
case Thunk::kNcclAllGatherStart:
case Thunk::kNcclAllGatherDone:
case Thunk::kNcclAllReduce:
case Thunk::kNcclAllReduceStart:
case Thunk::kNcclAllReduceDone:
case Thunk::kNcclCollectiveBroadcast:
case Thunk::kNcclCollectiveBroadcastStart:
case Thunk::kNcclCollectiveBroadcastDone:
case Thunk::kNcclCollectivePermute:
case Thunk::kNcclCollectivePermuteStart:
case Thunk::kNcclCollectivePermuteDone:
case Thunk::kNcclReduceScatter:
case Thunk::kNcclReduceScatterStart:
case Thunk::kNcclReduceScatterDone:
case Thunk::kNcclAllToAll:
case Thunk::kNcclAllToAllStart:
case Thunk::kNcclAllToAllDone:
case Thunk::kNcclSend:
case Thunk::kNcclSendDone:
case Thunk::kNcclRecv:
case Thunk::kNcclRecvDone:
case Thunk::kNorm:
case Thunk::kOutfeed:
case Thunk::kPartitionId:
case Thunk::kRecv:
case Thunk::kRecvDone:
case Thunk::kReplicaId:
case Thunk::kSend:
case Thunk::kSendDone:
case Thunk::kTriangularSolve:
case Thunk::kWaitForStreams:
break;
}
}
void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn,
const ThunkSequence* thunks) {
for (const std::unique_ptr<Thunk>& thunk : *thunks) {
ForAllThunks(fn, thunk.get());
}
}
} | #include "xla/service/gpu/runtime/for_all_thunks.h"
#include <memory>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/status/status.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/command_buffer_cmd.h"
#include "xla/service/gpu/runtime/command_buffer_thunk.h"
#include "xla/service/gpu/runtime/conditional_thunk.h"
#include "xla/service/gpu/runtime/dynamic_slice_thunk.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/runtime/while_thunk.h"
namespace xla::gpu {
namespace {
using ::testing::IsSupersetOf;
using ::testing::UnorderedElementsAre;
std::vector<const Thunk*> GetAllThunks(Thunk* root) {
std::vector<const Thunk*> thunks;
ForAllThunks([&](const Thunk* thunk) { thunks.push_back(thunk); }, root);
return thunks;
}
struct DummyThunk : public Thunk {
DummyThunk() : Thunk(Thunk::Kind::kGemm, Thunk::ThunkInfo()) {}
absl::Status ExecuteOnStream(const ExecuteParams& params) override {
return absl::OkStatus();
}
};
TEST(ForAllThunksTest, SingleThunk) {
DummyThunk thunk;
EXPECT_THAT(GetAllThunks(&thunk), UnorderedElementsAre(&thunk));
}
TEST(ForAllThunksTest, DynamicSliceThunk) {
auto thunk = std::make_unique<DummyThunk>();
Thunk* thunk_ptr = thunk.get();
auto thunk_sequence = std::make_unique<ThunkSequence>();
thunk_sequence->push_back(std::move(thunk));
DynamicSliceThunk dynamic_slice_thunk(
Thunk::ThunkInfo(), std::move(thunk_sequence), {}, {}, {}, {}, {}, {});
EXPECT_THAT(GetAllThunks(&dynamic_slice_thunk),
IsSupersetOf<const Thunk*>({thunk_ptr, &dynamic_slice_thunk}));
}
TEST(ForAllThunksTest, CommandBufferThunk) {
auto thunk = std::make_unique<DummyThunk>();
Thunk* thunk_ptr = thunk.get();
ThunkSequence thunk_sequence;
thunk_sequence.push_back(std::move(thunk));
auto sequential_thunk = std::make_unique<SequentialThunk>(
Thunk::ThunkInfo(), std::move(thunk_sequence));
Thunk* sequential_thunk_ptr = sequential_thunk.get();
CommandBufferThunk command_buffer_thunk(CommandBufferCmdSequence(),
Thunk::ThunkInfo(),
std::move(sequential_thunk));
EXPECT_THAT(GetAllThunks(&command_buffer_thunk),
UnorderedElementsAre(thunk_ptr, &command_buffer_thunk,
sequential_thunk_ptr));
}
TEST(ForAllThunksTest, ConditionalThunk) {
auto thunk = std::make_unique<DummyThunk>();
Thunk* thunk_ptr = thunk.get();
ThunkSequence thunk_sequence;
thunk_sequence.push_back(std::move(thunk));
auto sequential_thunk = std::make_unique<SequentialThunk>(
Thunk::ThunkInfo(), std::move(thunk_sequence));
SequentialThunk* sequential_thunk_ptr = sequential_thunk.get();
ConditionalThunkConfig config;
config.branch_thunks.push_back(std::move(sequential_thunk));
ConditionalThunk conditional_thunk(Thunk::ThunkInfo(), std::move(config),
BufferAllocation::Slice());
EXPECT_THAT(GetAllThunks(&conditional_thunk),
UnorderedElementsAre(thunk_ptr, sequential_thunk_ptr,
&conditional_thunk));
}
TEST(ForAllThunksTest, WhileThunk) {
auto condition_thunk = std::make_unique<DummyThunk>();
Thunk* condition_thunk_ptr = condition_thunk.get();
ThunkSequence condition_thunk_sequence;
condition_thunk_sequence.push_back(std::move(condition_thunk));
auto body_thunk = std::make_unique<DummyThunk>();
Thunk* body_thunk_ptr = body_thunk.get();
ThunkSequence body_thunk_sequence;
body_thunk_sequence.push_back(std::move(body_thunk));
WhileThunk while_thunk(
Thunk::ThunkInfo(), BufferAllocation::Slice(),
std::make_unique<SequentialThunk>(Thunk::ThunkInfo(),
std::move(condition_thunk_sequence)),
std::make_unique<SequentialThunk>(Thunk::ThunkInfo(),
std::move(body_thunk_sequence)));
EXPECT_THAT(GetAllThunks(&while_thunk),
IsSupersetOf<const Thunk*>(
{condition_thunk_ptr, body_thunk_ptr, &while_thunk}));
}
}
} | 2,162 |
#ifndef XLA_SERVICE_GPU_RUNTIME_DYNAMIC_SLICE_THUNK_H_
#define XLA_SERVICE_GPU_RUNTIME_DYNAMIC_SLICE_THUNK_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <variant>
#include <vector>
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/shape.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream_executor.h"
namespace xla {
namespace gpu {
class DynamicSliceThunk : public Thunk {
public:
struct LoopIter {};
using Offset = std::variant<uint64_t, LoopIter, BufferAllocation::Slice>;
DynamicSliceThunk(
ThunkInfo thunk_info, std::unique_ptr<ThunkSequence> embedded_thunk,
std::vector<std::optional<BufferAllocation::Slice>> arguments,
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations_,
std::vector<std::optional<std::vector<Offset>>> offsets,
std::vector<std::optional<Shape>> orig_shapes,
std::vector<std::optional<Shape>> sliced_shapes,
std::vector<std::optional<uint64_t>> offset_byte_sizes);
DynamicSliceThunk(const DynamicSliceThunk&) = delete;
DynamicSliceThunk& operator=(const DynamicSliceThunk&) = delete;
const Thunk* embedded_thunk() const { return embedded_thunk_.get(); }
absl::Status Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) override;
absl::Status Initialize(const InitializeParams& params) override;
absl::Status ExecuteOnStream(const ExecuteParams& params) override;
private:
std::unique_ptr<SequentialThunk> embedded_thunk_;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations_;
struct SliceDef {
std::optional<BufferAllocation::Slice> embedded_thunk_argument;
std::optional<std::vector<Offset>> offsets;
std::optional<Shape> orig_shape;
std::optional<Shape> sliced_shape;
std::optional<uint64_t> offset_byte_size;
};
std::vector<SliceDef> slices_;
absl::Mutex mutex_;
absl::flat_hash_map<se::StreamExecutor*,
std::unique_ptr<se::MemoryAllocation>>
offsets_allocs_ ABSL_GUARDED_BY(mutex_);
int64_t offsets_allocs_size_ = 0;
std::vector<int64_t> offsets_allocs_base_;
};
}
}
#endif
#include "xla/service/gpu/runtime/dynamic_slice_thunk.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/synchronization/mutex.h"
#include "llvm/ADT/STLExtras.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/ir_emission_utils.h"
#include "xla/service/gpu/runtime/sequential_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/gpu/runtime/while_thunk.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/memory_allocation.h"
#include "xla/stream_executor/stream.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace gpu {
DynamicSliceThunk::DynamicSliceThunk(
ThunkInfo thunk_info, std::unique_ptr<ThunkSequence> embedded_thunk,
std::vector<std::optional<BufferAllocation::Slice>> arguments,
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations,
std::vector<std::optional<std::vector<Offset>>> offsets,
std::vector<std::optional<Shape>> orig_shapes,
std::vector<std::optional<Shape>> sliced_shapes,
std::vector<std::optional<uint64_t>> offset_byte_sizes)
: Thunk(Kind::kAddressComputation, thunk_info),
embedded_thunk_(std::make_unique<SequentialThunk>(
ThunkInfo(), std::move(*embedded_thunk))),
fake_allocations_(std::move(fake_allocations)) {
for (auto [arg, offsets, orig_shape, sliced_shape, offset_byte_size] :
llvm::zip_equal(arguments, offsets, orig_shapes, sliced_shapes,
offset_byte_sizes)) {
slices_.push_back(SliceDef{
std::move(arg),
std::move(offsets),
std::move(orig_shape),
std::move(sliced_shape),
std::move(offset_byte_size),
});
}
for (SliceDef& slice : slices_) {
offsets_allocs_base_.push_back(offsets_allocs_size_);
if (slice.sliced_shape.has_value()) {
offsets_allocs_size_ += slice.sliced_shape->rank() * sizeof(int64_t);
}
}
}
absl::Status DynamicSliceThunk::Prepare(const PrepareParams& params,
ResourceRequests& resource_requests) {
for (SliceDef& slice : slices_) {
if (slice.offsets.has_value()) {
TF_RET_CHECK(slice.embedded_thunk_argument.has_value());
TF_RET_CHECK(slice.orig_shape.has_value());
TF_RET_CHECK(slice.sliced_shape.has_value());
TF_RET_CHECK(slice.offset_byte_size.has_value());
TF_RET_CHECK(slice.orig_shape->IsArray());
TF_RET_CHECK(slice.sliced_shape->IsArray());
TF_RET_CHECK(slice.offsets->size() == slice.orig_shape->rank());
TF_RET_CHECK(slice.sliced_shape->rank() == slice.orig_shape->rank());
}
}
TF_RETURN_IF_ERROR(embedded_thunk_->Prepare(params, resource_requests));
return absl::OkStatus();
}
absl::Status DynamicSliceThunk::Initialize(const InitializeParams& params) {
TF_RETURN_IF_ERROR(embedded_thunk_->Initialize(params));
absl::MutexLock lock(&mutex_);
if (offsets_allocs_.contains(params.executor)) return absl::OkStatus();
VLOG(2) << "Allocate " << offsets_allocs_size_
<< " bytes for transferring offsets on executor: " << params.executor;
TF_ASSIGN_OR_RETURN(
std::unique_ptr<se::MemoryAllocation> allocation,
params.executor->HostMemoryAllocate(offsets_allocs_size_));
offsets_allocs_.emplace(params.executor, std::move(allocation));
return absl::OkStatus();
}
absl::Status DynamicSliceThunk::ExecuteOnStream(const ExecuteParams& params) {
se::Stream& stream = *params.stream;
const BufferAllocations& orig_allocations = *params.buffer_allocations;
absl::InlinedVector<se::DeviceMemoryBase, 8> slice_buffers(
slices_.size(), se::DeviceMemoryBase());
int64_t* offsets_alloc = [&] {
absl::MutexLock lock(&mutex_);
return reinterpret_cast<int64_t*>(
offsets_allocs_.at(stream.parent())->opaque());
}();
auto offset_value = [&](int64_t arg_idx, int64_t offset_idx) -> int64_t& {
return offsets_alloc[offsets_allocs_base_.at(arg_idx) + offset_idx];
};
VLOG(2) << "Execute address computation thunk: slices=" << slices_.size();
for (auto [argument_idx, slice] : llvm::enumerate(slices_)) {
if (!slice.embedded_thunk_argument.has_value()) {
continue;
}
se::DeviceMemoryBase argument_buffer =
orig_allocations.GetDeviceAddress(*slice.embedded_thunk_argument);
if (!slice.offsets.has_value()) {
slice_buffers[argument_idx] = argument_buffer;
continue;
}
const Shape& src_shape = *slice.orig_shape;
const Shape& dst_shape = *slice.sliced_shape;
TF_RET_CHECK(IsContiguousSlice(*slice.orig_shape, *slice.sliced_shape));
absl::InlinedVector<int64_t, 4> slice_starts;
slice_starts.reserve(dst_shape.rank());
int64_t num_transfers = 0;
for (auto [offset_idx, values] : llvm::enumerate(llvm::zip(
*slice.offsets, src_shape.dimensions(), dst_shape.dimensions()))) {
auto [offset, src_dim, dst_dim] = values;
if (uint64_t* const_offset = std::get_if<uint64_t>(&offset)) {
VLOG(2) << " - arg " << argument_idx << "[" << offset_idx
<< "]: constant offset = " << *const_offset;
offset_value(argument_idx, offset_idx) = *const_offset;
} else if (std::holds_alternative<LoopIter>(offset)) {
TF_ASSIGN_OR_RETURN(int64_t iter, WhileThunk::CurrentLoopIteration());
VLOG(2) << " - arg " << argument_idx << "[" << offset_idx
<< "]: loop iteration offset = " << iter;
offset_value(argument_idx, offset_idx) = iter;
} else {
auto alloc_slice = std::get<BufferAllocation::Slice>(offset);
VLOG(2) << " - arg " << argument_idx << "[" << offset_idx
<< "]: transfer offset from device " << alloc_slice.ToString();
se::DeviceMemoryBase offset_src =
orig_allocations.GetDeviceAddress(alloc_slice);
int64_t* offset_dst = &offset_value(argument_idx, offset_idx);
TF_RETURN_IF_ERROR(
stream.Memcpy(offset_dst, offset_src, *slice.offset_byte_size));
++num_transfers;
}
}
if (num_transfers > 0) {
VLOG(2) << "Wait for completion of " << num_transfers << " transfer";
TF_RETURN_IF_ERROR(stream.BlockHostUntilDone());
}
for (auto [offset_idx, values] : llvm::enumerate(
llvm::zip(src_shape.dimensions(), dst_shape.dimensions()))) {
auto [src_dim, dst_dim] = values;
int64_t start_index =
std::min(std::max(offset_value(argument_idx, offset_idx), 0L),
src_dim - dst_dim);
slice_starts.push_back(start_index);
}
int64_t new_size = ShapeUtil::ByteSizeOf(dst_shape);
int64_t new_offset = 0;
for (auto [start, stride] :
llvm::zip(slice_starts, *ShapeUtil::ByteStrides(src_shape))) {
new_offset += start * stride;
}
VLOG(2) << "Create sliced argument " << argument_idx << " of shape "
<< slice.sliced_shape->ToString()
<< " by slicing argument of shape " << slice.orig_shape->ToString()
<< " at offset " << new_offset << " with " << new_size;
slice_buffers[argument_idx] =
argument_buffer.GetByteSlice(new_offset, new_size);
}
BufferAllocations slice_allocations(slice_buffers,
orig_allocations.device_ordinal(),
orig_allocations.memory_allocator());
Thunk::ExecuteParams new_params =
Thunk::ExecuteParams::CloneWithNewAllocations(params, slice_allocations);
TF_RETURN_IF_ERROR(embedded_thunk_->ExecuteOnStream(new_params));
return absl::OkStatus();
}
}
} | #include "xla/service/gpu/runtime/dynamic_slice_thunk.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <memory>
#include <optional>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h"
#include "xla/ffi/ffi.h"
#include "xla/ffi/ffi_api.h"
#include "xla/service/buffer_assignment.h"
#include "xla/service/gpu/buffer_allocations.h"
#include "xla/service/gpu/matmul_utils.h"
#include "xla/service/gpu/runtime/custom_call_thunk.h"
#include "xla/service/gpu/runtime/gemm_thunk.h"
#include "xla/service/gpu/runtime/thunk.h"
#include "xla/service/platform_util.h"
#include "xla/service/service_executable_run_options.h"
#include "xla/shape_util.h"
#include "xla/stream_executor/blas.h"
#include "xla/stream_executor/command_buffer.h"
#include "xla/stream_executor/device_memory.h"
#include "xla/stream_executor/device_memory_allocator.h"
#include "xla/stream_executor/gpu/gpu_types.h"
#include "xla/stream_executor/platform.h"
#include "xla/stream_executor/platform_manager.h"
#include "xla/stream_executor/stream.h"
#include "xla/stream_executor/stream_executor.h"
#include "xla/stream_executor/stream_executor_memory_allocator.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#if GOOGLE_CUDA
#define PLATFORM "CUDA"
#elif TENSORFLOW_USE_ROCM
#define PLATFORM "ROCM"
#endif
namespace xla::gpu {
namespace {
static se::StreamExecutor* GpuExecutor() {
auto name =
absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value());
auto* platform = se::PlatformManager::PlatformWithName(name).value();
return platform->ExecutorForDevice(0).value();
}
}
TEST(DynamicSliceThunkTest, SlicedGemm) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t lhs_length = sizeof(float) * 2 * 4;
int64_t rhs_length = sizeof(float) * 3 * 1;
int64_t out_length = sizeof(float) * 1 * 1;
int64_t offset_length = sizeof(int64_t);
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(0, rhs_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
rhs_length);
BufferAllocation alloc_lhs(0, lhs_length, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(1, rhs_length, 0));
BufferAllocation::Slice slice_rhs(fake_allocations.back().get(), 0,
rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc_lhs_offset_0(4, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(5, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs, slice_out,
slice_workspace, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, std::nullopt, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt,
std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt,
std::nullopt, std::nullopt},
{sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt});
se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4);
std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8};
TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1);
std::vector<float> rhs_arr(3, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{lhs, rhs, out, workspace, lhs_offset_0, lhs_offset_1}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(1, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({9}));
}
TEST(DynamicSliceThunkTest, SlicedNonContiguousGemm) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t lhs_length = sizeof(float) * 2 * 4;
int64_t rhs_length = sizeof(float) * 4 * 3;
int64_t out_length = sizeof(float) * 2 * 2;
int64_t offset_length = sizeof(int64_t);
int64_t slice_length = sizeof(float) * 2 * 2;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
0, slice_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
slice_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
1, slice_length, 0));
BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0,
slice_length);
BufferAllocation alloc_lhs(0, lhs_length, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length);
BufferAllocation alloc_rhs(1, rhs_length, 0);
BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, rhs_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc_lhs_offset_0(4, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(5, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
BufferAllocation alloc_rhs_offset_0(6, offset_length,
0);
BufferAllocation::Slice slice_rhs_offset_0(&alloc_rhs_offset_0, 0,
offset_length);
BufferAllocation alloc_rhs_offset_1(7, offset_length,
0);
BufferAllocation::Slice slice_rhs_offset_1(&alloc_rhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {2, 2}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {2, 2}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {2, 2}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake,
slice_out, slice_workspace, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
std::vector<DynamicSliceThunk::Offset> rhs_offsets{slice_rhs_offset_0,
slice_rhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, rhs_offsets, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}),
ShapeUtil::MakeShape(PrimitiveType::F32, {4, 3}), std::nullopt,
std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 2}),
ShapeUtil::MakeShape(PrimitiveType::F32, {2, 2}), std::nullopt,
std::nullopt},
{sizeof(int64_t), sizeof(int64_t), std::nullopt, std::nullopt});
se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4);
std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8};
TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(4 * 3);
std::vector<float> rhs_arr(12, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(2 * 2);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
se::DeviceMemory<int64_t> rhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> rhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> rhs_offset_arr{2, 1};
TF_ASSERT_OK(
stream->Memcpy(&rhs_offset_0, &rhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&rhs_offset_1, &rhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({lhs, rhs, out, workspace, lhs_offset_0,
lhs_offset_1, rhs_offset_0, rhs_offset_1},
0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
ASSERT_FALSE(thunk.ExecuteOnStream(params).ok());
}
TEST(DynamicSliceThunkTest, MulipleSlicedOperandsGemm) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t length = sizeof(float) * 2 * 4;
int64_t out_length = sizeof(float) * 1;
int64_t offset_length = sizeof(int64_t);
int64_t slice_length = sizeof(float) * 3;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
0, slice_length, 0));
BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0,
slice_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
1, slice_length, 0));
BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0,
slice_length);
BufferAllocation alloc_lhs(0, length, 0);
BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, length);
BufferAllocation alloc_rhs(1, length, 0);
BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(2, out_length, 0));
BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0,
out_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
3, 1024 * 1024, 0));
BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0,
1024 * 1024);
BufferAllocation alloc_lhs_offset_0(4, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0,
offset_length);
BufferAllocation alloc_lhs_offset_1(5, offset_length,
0);
BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0,
offset_length);
BufferAllocation alloc_rhs_offset_0(6, offset_length,
0);
BufferAllocation::Slice slice_rhs_offset_0(&alloc_rhs_offset_0, 0,
offset_length);
BufferAllocation alloc_rhs_offset_1(7, offset_length,
0);
BufferAllocation::Slice slice_rhs_offset_1(&alloc_rhs_offset_1, 0,
offset_length);
auto config =
GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1},
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0},
ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0,
0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt,
se::blas::kDefaultComputePrecision, false, false);
ASSERT_TRUE(config.ok());
ThunkSequence seq;
seq.emplace_back(std::make_unique<GemmThunk>(
Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake,
slice_out, slice_workspace, true));
std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0,
slice_lhs_offset_1};
std::vector<DynamicSliceThunk::Offset> rhs_offsets{slice_rhs_offset_0,
slice_rhs_offset_1};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_lhs, slice_rhs, slice_out, slice_workspace},
std::move(fake_allocations),
{lhs_offsets, rhs_offsets, std::nullopt, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}),
ShapeUtil::MakeShape(PrimitiveType::F32, {8, 1}), std::nullopt,
std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}),
ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), std::nullopt,
std::nullopt},
{sizeof(int64_t), sizeof(int64_t), std::nullopt, std::nullopt});
std::vector<float> arr{1, 2, 3, 4, 5, 6, 7, 8};
se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4);
TF_ASSERT_OK(stream->Memcpy(&lhs, arr.data(), length));
se::DeviceMemory<float> rhs = executor->AllocateArray<float>(8);
std::vector<float> rhs_arr(8, 1);
TF_ASSERT_OK(stream->Memcpy(&rhs, arr.data(), length));
se::DeviceMemory<float> out = executor->AllocateArray<float>(1);
TF_ASSERT_OK(stream->MemZero(&out, out_length));
se::DeviceMemory<float> workspace =
executor->AllocateArray<float>(1024 * 1024);
TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024));
se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> lhs_offset_arr{0, 1};
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length));
se::DeviceMemory<int64_t> rhs_offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> rhs_offset_1 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> rhs_offset_arr{2, 0};
TF_ASSERT_OK(
stream->Memcpy(&rhs_offset_0, &rhs_offset_arr[0], offset_length));
TF_ASSERT_OK(
stream->Memcpy(&rhs_offset_1, &rhs_offset_arr[1], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations({lhs, rhs, out, workspace, lhs_offset_0,
lhs_offset_1, rhs_offset_0, rhs_offset_1},
0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<float> dst(1, 0);
TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length));
ASSERT_EQ(dst, std::vector<float>({2 * 3 + 3 * 4 + 4 * 5}));
}
static absl::Status Memcpy(se::Stream* stream, ffi::AnyBuffer src,
ffi::Result<ffi::AnyBuffer> dst) {
return stream->MemcpyD2D(
&dst->data, src.data,
absl::c_accumulate(src.dimensions, 1.0, std::multiplies<int64_t>()) *
sizeof(float));
}
XLA_FFI_DEFINE_HANDLER(kMemcpy, Memcpy,
ffi::Ffi::Bind()
.Ctx<ffi::Stream>()
.Arg<ffi::AnyBuffer>()
.Ret<ffi::AnyBuffer>()
);
XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$memcpy", PLATFORM,
kMemcpy);
TEST(DynamicSliceThunkTest, SlicedMemcpy) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t src_count = 8 * 8 * 10 * 8;
int64_t dst_count = 8 * 8;
int64_t src_length = sizeof(int32_t) * src_count;
int64_t dst_length = sizeof(int32_t) * dst_count;
int64_t offset_length = sizeof(int64_t);
int64_t slice_length = sizeof(int32_t) * dst_count;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(2);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
0, slice_length, 0));
BufferAllocation::Slice slice_src_fake(fake_allocations.back().get(), 0,
slice_length);
BufferAllocation alloc_src(0, src_length, 0);
BufferAllocation::Slice slice_src(&alloc_src, 0, src_length);
fake_allocations.push_back(
std::make_unique<BufferAllocation>(1, dst_length, 0));
BufferAllocation::Slice slice_dst(fake_allocations.back().get(), 0,
dst_length);
BufferAllocation alloc_offset_0(2, offset_length, 0);
BufferAllocation::Slice slice_offset_0(&alloc_offset_0, 0, offset_length);
BufferAllocation alloc_offset_1(3, offset_length, 0);
BufferAllocation::Slice slice_offset_1(&alloc_offset_1, 0, offset_length);
BufferAllocation alloc_offset_2(4, offset_length, 0);
BufferAllocation::Slice slice_offset_2(&alloc_offset_2, 0, offset_length);
BufferAllocation alloc_offset_3(5, offset_length, 0);
BufferAllocation::Slice slice_offset_3(&alloc_offset_3, 0, offset_length);
auto registration = xla::ffi::FindHandler("__xla_test$$memcpy", PLATFORM);
ASSERT_TRUE(registration.ok());
std::vector<std::optional<CustomCallThunk::Slice>> operands{
CustomCallThunk::Slice{slice_src_fake,
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8})}};
std::vector<std::optional<CustomCallThunk::Slice>> results{
CustomCallThunk::Slice{slice_dst,
ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8})}};
ThunkSequence seq;
seq.emplace_back(std::make_unique<CustomCallThunk>(
Thunk::ThunkInfo(), registration->bundle, operands, results,
CustomCallThunk::AttributesMap(),
nullptr));
std::vector<DynamicSliceThunk::Offset> slice_offsets{
slice_offset_0, slice_offset_1, slice_offset_2, slice_offset_3};
DynamicSliceThunk thunk(
Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)),
{slice_src, slice_dst}, std::move(fake_allocations),
{slice_offsets, std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8, 10, 8}), std::nullopt},
{ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 8, 8}), std::nullopt},
{sizeof(int64_t), std::nullopt});
se::DeviceMemory<int32_t> src = executor->AllocateArray<int32_t>(src_count);
std::vector<int32_t> src_arr(src_count, 0);
for (unsigned i = 0; i < src_count; ++i) src_arr[i] = i;
TF_ASSERT_OK(stream->Memcpy(&src, src_arr.data(), src_length));
se::DeviceMemory<int32_t> dst = executor->AllocateArray<int32_t>(dst_count);
TF_ASSERT_OK(stream->MemZero(&dst, dst_length));
se::DeviceMemory<int64_t> offset_0 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> offset_1 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> offset_2 = executor->AllocateArray<int64_t>(1);
se::DeviceMemory<int64_t> offset_3 = executor->AllocateArray<int64_t>(1);
std::vector<int64_t> offset_arr{3, 5, 2, 0};
TF_ASSERT_OK(stream->Memcpy(&offset_0, &offset_arr[0], offset_length));
TF_ASSERT_OK(stream->Memcpy(&offset_1, &offset_arr[1], offset_length));
TF_ASSERT_OK(stream->Memcpy(&offset_2, &offset_arr[2], offset_length));
TF_ASSERT_OK(stream->Memcpy(&offset_3, &offset_arr[3], offset_length));
ServiceExecutableRunOptions run_options;
se::StreamExecutorMemoryAllocator allocator(executor);
BufferAllocations allocations(
{src, dst, offset_0, offset_1, offset_2, offset_3}, 0, &allocator);
Thunk::ExecuteParams params = Thunk::ExecuteParams::Create(
run_options, allocations, stream.get(), stream.get(), nullptr, nullptr);
Thunk::ExecutableSource source = {"", {}};
TF_ASSERT_OK(thunk.Initialize(
{executor, source, &allocations, stream.get(), stream.get()}));
TF_ASSERT_OK(thunk.ExecuteOnStream(params));
TF_ASSERT_OK(stream->BlockHostUntilDone());
std::vector<int32_t> out(dst_count, 0);
TF_ASSERT_OK(stream->Memcpy(out.data(), dst, dst_length));
std::vector<int32_t> ref(dst_count, 0);
int64_t offset_val =
offset_arr[3] +
8 * (offset_arr[2] + 10 * (offset_arr[1] + 8 * offset_arr[0]));
std::copy(src_arr.begin() + offset_val,
src_arr.begin() + offset_val + dst_count, ref.begin());
ASSERT_EQ(out, ref);
}
TEST(DynamicSliceThunkTest, SlicedOutputMemcpy) {
se::StreamExecutor* executor = GpuExecutor();
TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream());
int64_t src_count = 8 * 8 * 10 * 2;
int64_t dst_count = 2 * 2 * 2 * 2;
int64_t slice_count = 2 * 2;
int64_t src_length = sizeof(int32_t) * src_count;
int64_t dst_length = sizeof(int32_t) * dst_count;
int64_t offset_length = sizeof(int64_t);
int64_t slice_length = sizeof(int32_t) * slice_count;
std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(2);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
0, slice_length, 0));
BufferAllocation::Slice slice_src_fake(fake_allocations.back().get(), 0,
slice_length);
fake_allocations.push_back(std::make_unique<BufferAllocation>(
1, slice_length, 0));
BufferAllocation::Slice slice_dst_fake(fake_allocations.back().get(), 0,
slice_length);
BufferAllocation alloc_src(0, src_length, 0);
BufferAllocation::Slice slice_src(&alloc_src, 0, src_length);
BufferAllocation alloc_dst(1, dst_length, 0);
BufferAllocation::Slice slice_dst(&alloc_dst, 0, dst_length);
BufferAllocation alloc_src_offset_0(2, offset_length, 0);
BufferAllocation::Slice slice_src_offset_0(&alloc_src_offset_0, 0,
offset_length);
BufferAllocation alloc_src_offset_1(3, offset_length, 0);
BufferAllocation::Slice slice_src_offset_1(&alloc_src_offset_1, 0,
offset_length);
BufferAllocation alloc_src_offset_2(4, offset_length, 0);
BufferAllocation::Slice slice_src_offset_2(&alloc_src_offset_2, 0,
offset_length);
BufferAllocation alloc_src_offset_3(5, offset_length, 0);
BufferAllocation::Slice slice_src_offset_3(&alloc_src_offset_3, 0,
offset_length);
BufferAllocation alloc_dst_offset_0(6, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_0(&alloc_dst_offset_0, 0,
offset_length);
BufferAllocation alloc_dst_offset_1(7, offset_length, 0);
BufferAllocation::Slice slice_dst_offset_1(&alloc_dst_offset_1 | 2,163 |
#ifndef MLIR_HLO_UTILS_CYCLE_DETECTOR_H
#define MLIR_HLO_UTILS_CYCLE_DETECTOR_H
#include <optional>
#include <vector>
#include "llvm/ADT/DenseMap.h"
namespace mlir {
template <typename T>
class OrderedSet {
public:
bool Insert(T value) {
bool new_insertion =
value_to_index_.insert({value, value_sequence_.size()}).second;
if (new_insertion) {
value_sequence_.push_back(value);
}
return new_insertion;
}
void Erase(T value) {
auto it = value_to_index_.find(value);
value_to_index_[value_sequence_.back()] = it->second;
std::swap(value_sequence_[it->second], value_sequence_.back());
value_sequence_.pop_back();
value_to_index_.erase(it);
}
void Reserve(size_t new_size) {
value_to_index_.reserve(new_size);
value_sequence_.reserve(new_size);
}
void Clear() {
value_to_index_.clear();
value_sequence_.clear();
}
bool Contains(T value) const { return value_to_index_.count(value); }
size_t Size() const { return value_sequence_.size(); }
const std::vector<T>& GetSequence() const { return value_sequence_; }
private:
std::vector<T> value_sequence_;
llvm::DenseMap<T, int> value_to_index_;
};
class GraphCycles {
public:
explicit GraphCycles(int32_t num_nodes);
~GraphCycles();
bool InsertEdge(int32_t x, int32_t y);
void RemoveEdge(int32_t x, int32_t y);
bool HasEdge(int32_t x, int32_t y) const;
std::optional<int32_t> ContractEdge(int32_t a, int32_t b);
bool IsReachable(int32_t x, int32_t y);
std::vector<int32_t> SuccessorsCopy(int32_t node) const;
std::vector<int32_t> AllNodesInPostOrder() const;
struct Rep;
private:
GraphCycles(const GraphCycles&) = delete;
GraphCycles& operator=(const GraphCycles&) = delete;
Rep* rep_;
};
}
#endif
#include "utils/cycle_detector.h"
#include <algorithm>
#include <optional>
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
namespace mlir {
namespace {
using NodeSet = llvm::DenseSet<int32_t>;
using OrderedNodeSet = OrderedSet<int32_t>;
template <typename T>
struct VecStruct {
using type = llvm::SmallVector<T, 4>;
};
template <typename T>
using Vec = typename VecStruct<T>::type;
struct Node {
int32_t rank;
bool visited;
void* data;
OrderedNodeSet in;
OrderedNodeSet out;
};
}
struct GraphCycles::Rep {
Vec<Node*> nodes;
Vec<int32_t> freeNodes;
Vec<int32_t> deltaf;
Vec<int32_t> deltab;
Vec<int32_t> list;
Vec<int32_t> merged;
Vec<int32_t> stack;
};
GraphCycles::GraphCycles(int32_t numNodes) : rep_(new Rep) {
rep_->nodes.reserve(numNodes);
for (int32_t i = 0; i < numNodes; ++i) {
Node* n = new Node;
n->visited = false;
n->data = nullptr;
n->rank = rep_->nodes.size();
rep_->nodes.push_back(n);
}
}
GraphCycles::~GraphCycles() {
for (Vec<Node*>::size_type i = 0, e = rep_->nodes.size(); i < e; ++i) {
delete rep_->nodes[i];
}
delete rep_;
}
bool GraphCycles::HasEdge(int32_t x, int32_t y) const {
return rep_->nodes[x]->out.Contains(y);
}
void GraphCycles::RemoveEdge(int32_t x, int32_t y) {
rep_->nodes[x]->out.Erase(y);
rep_->nodes[y]->in.Erase(x);
}
static bool forwardDfs(GraphCycles::Rep* r, int32_t n, int32_t upperBound);
static void backwardDfs(GraphCycles::Rep* r, int32_t n, int32_t lowerBound);
static void reorder(GraphCycles::Rep* r);
static void sort(const Vec<Node*>&, Vec<int32_t>* delta);
static void moveToList(GraphCycles::Rep* r, Vec<int32_t>* src,
Vec<int32_t>* dst);
static void clearVisitedBits(GraphCycles::Rep* r, const Vec<int32_t>& nodes);
bool GraphCycles::InsertEdge(int32_t x, int32_t y) {
if (x == y) return false;
Rep* r = rep_;
Node* nx = r->nodes[x];
if (!nx->out.Insert(y)) {
return true;
}
Node* ny = r->nodes[y];
ny->in.Insert(x);
if (nx->rank <= ny->rank) {
return true;
}
if (forwardDfs(r, y, nx->rank)) {
nx->out.Erase(y);
ny->in.Erase(x);
clearVisitedBits(r, r->deltaf);
return false;
}
backwardDfs(r, x, ny->rank);
reorder(r);
return true;
}
static bool forwardDfs(GraphCycles::Rep* r, int32_t n, int32_t upperBound) {
r->deltaf.clear();
r->stack.clear();
r->stack.push_back(n);
while (!r->stack.empty()) {
n = r->stack.back();
r->stack.pop_back();
Node* nn = r->nodes[n];
if (nn->visited) continue;
nn->visited = true;
r->deltaf.push_back(n);
for (auto w : nn->out.GetSequence()) {
Node* nw = r->nodes[w];
if (nw->rank == upperBound) {
return true;
}
if (!nw->visited && nw->rank < upperBound) {
r->stack.push_back(w);
}
}
}
return false;
}
static void backwardDfs(GraphCycles::Rep* r, int32_t n, int32_t lowerBound) {
r->deltab.clear();
r->stack.clear();
r->stack.push_back(n);
while (!r->stack.empty()) {
n = r->stack.back();
r->stack.pop_back();
Node* nn = r->nodes[n];
if (nn->visited) continue;
nn->visited = true;
r->deltab.push_back(n);
for (auto w : nn->in.GetSequence()) {
Node* nw = r->nodes[w];
if (!nw->visited && lowerBound < nw->rank) {
r->stack.push_back(w);
}
}
}
}
static void reorder(GraphCycles::Rep* r) {
sort(r->nodes, &r->deltab);
sort(r->nodes, &r->deltaf);
r->list.clear();
moveToList(r, &r->deltab, &r->list);
moveToList(r, &r->deltaf, &r->list);
r->merged.resize(r->deltab.size() + r->deltaf.size());
std::merge(r->deltab.begin(), r->deltab.end(), r->deltaf.begin(),
r->deltaf.end(), r->merged.begin());
for (Vec<int32_t>::size_type i = 0, e = r->list.size(); i < e; ++i) {
r->nodes[r->list[i]]->rank = r->merged[i];
}
}
static void sort(const Vec<Node*>& nodes, Vec<int32_t>* delta) {
struct ByRank {
const Vec<Node*>* nodes;
bool operator()(int32_t a, int32_t b) const {
return (*nodes)[a]->rank < (*nodes)[b]->rank;
}
};
ByRank cmp;
cmp.nodes = &nodes;
std::sort(delta->begin(), delta->end(), cmp);
}
static void moveToList(GraphCycles::Rep* r, Vec<int32_t>* src,
Vec<int32_t>* dst) {
for (Vec<int32_t>::size_type i = 0, e = src->size(); i < e; i++) {
int32_t w = (*src)[i];
(*src)[i] = r->nodes[w]->rank;
r->nodes[w]->visited = false;
dst->push_back(w);
}
}
static void clearVisitedBits(GraphCycles::Rep* r, const Vec<int32_t>& nodes) {
for (Vec<int32_t>::size_type i = 0, e = nodes.size(); i < e; i++) {
r->nodes[nodes[i]]->visited = false;
}
}
bool GraphCycles::IsReachable(int32_t x, int32_t y) {
if (x == y) return true;
Rep* r = rep_;
Node* nx = r->nodes[x];
Node* ny = r->nodes[y];
if (nx->rank >= ny->rank) {
return false;
}
bool reachable = forwardDfs(r, x, ny->rank);
clearVisitedBits(r, r->deltaf);
return reachable;
}
std::optional<int32_t> GraphCycles::ContractEdge(int32_t a, int32_t b) {
assert(HasEdge(a, b));
RemoveEdge(a, b);
if (IsReachable(a, b)) {
InsertEdge(a, b);
return {};
}
if (rep_->nodes[b]->in.Size() + rep_->nodes[b]->out.Size() >
rep_->nodes[a]->in.Size() + rep_->nodes[a]->out.Size()) {
std::swap(a, b);
}
Node* nb = rep_->nodes[b];
OrderedNodeSet out = std::move(nb->out);
OrderedNodeSet in = std::move(nb->in);
for (int32_t y : out.GetSequence()) {
rep_->nodes[y]->in.Erase(b);
}
for (int32_t y : in.GetSequence()) {
rep_->nodes[y]->out.Erase(b);
}
rep_->freeNodes.push_back(b);
rep_->nodes[a]->out.Reserve(rep_->nodes[a]->out.Size() + out.Size());
for (int32_t y : out.GetSequence()) {
InsertEdge(a, y);
}
rep_->nodes[a]->in.Reserve(rep_->nodes[a]->in.Size() + in.Size());
for (int32_t y : in.GetSequence()) {
InsertEdge(y, a);
}
return a;
}
std::vector<int32_t> GraphCycles::SuccessorsCopy(int32_t node) const {
return rep_->nodes[node]->out.GetSequence();
}
namespace {
void sortInPostOrder(const Vec<Node*>& nodes, std::vector<int32_t>* toSort) {
std::sort(toSort->begin(), toSort->end(), [&](int32_t a, int32_t b) {
return nodes[a]->rank > nodes[b]->rank;
});
}
}
std::vector<int32_t> GraphCycles::AllNodesInPostOrder() const {
llvm::DenseSet<int32_t> freeNodesSet;
for (int32_t n : rep_->freeNodes) freeNodesSet.insert(n);
std::vector<int32_t> allNodes;
allNodes.reserve(rep_->nodes.size() - freeNodesSet.size());
for (size_t i = 0, e = rep_->nodes.size(); i < e; i++) {
if (!freeNodesSet.count(i)) {
allNodes.push_back(i);
}
}
sortInPostOrder(rep_->nodes, &allNodes);
return allNodes;
}
} | #include "utils/cycle_detector.h"
#include "xla/test.h"
class GraphCyclesTest : public ::testing::Test {
public:
GraphCyclesTest() : g_(100) {}
bool AddEdge(int x, int y) { return g_.InsertEdge(x, y); }
void AddMultiples() {
for (int x = 1; x < 25; x++) {
EXPECT_TRUE(AddEdge(x, 2 * x)) << x;
EXPECT_TRUE(AddEdge(x, 3 * x)) << x;
}
}
mlir::GraphCycles g_;
};
TEST_F(GraphCyclesTest, NoCycle) { AddMultiples(); }
TEST_F(GraphCyclesTest, SimpleCycle) {
AddMultiples();
EXPECT_FALSE(AddEdge(8, 4));
}
TEST_F(GraphCyclesTest, IndirectCycle) {
AddMultiples();
EXPECT_TRUE(AddEdge(16, 9));
EXPECT_FALSE(AddEdge(9, 2));
}
TEST_F(GraphCyclesTest, RemoveEdge) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
g_.RemoveEdge(2, 3);
EXPECT_FALSE(g_.HasEdge(2, 3));
}
TEST_F(GraphCyclesTest, IsReachable) {
EXPECT_TRUE(AddEdge(1, 2));
EXPECT_TRUE(AddEdge(2, 3));
EXPECT_TRUE(AddEdge(3, 4));
EXPECT_TRUE(AddEdge(4, 5));
EXPECT_TRUE(g_.IsReachable(1, 5));
EXPECT_FALSE(g_.IsReachable(5, 1));
}
TEST_F(GraphCyclesTest, ContractEdge) {
ASSERT_TRUE(AddEdge(1, 2));
ASSERT_TRUE(AddEdge(1, 3));
ASSERT_TRUE(AddEdge(2, 3));
ASSERT_TRUE(AddEdge(2, 4));
ASSERT_TRUE(AddEdge(3, 4));
EXPECT_FALSE(g_.ContractEdge(1, 3).has_value());
EXPECT_TRUE(g_.HasEdge(1, 3));
EXPECT_EQ(*g_.ContractEdge(1, 2), 2);
EXPECT_TRUE(g_.HasEdge(2, 3));
EXPECT_TRUE(g_.HasEdge(2, 4));
EXPECT_TRUE(g_.HasEdge(3, 4));
EXPECT_EQ(*g_.ContractEdge(2, 3), 2);
EXPECT_TRUE(g_.HasEdge(2, 4));
} | 2,164 |
#ifndef XLA_HLO_UTILS_HLO_SHARDING_UTIL_H_
#define XLA_HLO_UTILS_HLO_SHARDING_UTIL_H_
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/inlined_vector.h"
#include "absl/status/status.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/service/call_graph.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/shape.h"
#include "xla/util.h"
namespace xla {
namespace hlo_sharding_util {
struct GatherScatterParallelDims {
absl::InlinedVector<int64_t, 1> indices_parallel_dims;
absl::InlinedVector<int64_t, 1> operand_parallel_dims;
std::vector<int64_t> index_parallel_in_dim;
};
bool IsSubTilingOrEqualSharding(const Shape& shape,
const HloSharding& potential_subsharding,
const HloSharding& sharding);
bool IsShardingMoreSpecific(const HloSharding& lhs, const HloSharding& rhs);
bool MergeSharding(const HloSharding& to_merge, HloSharding* dst,
bool may_combine_partial_sharding);
bool MergeShardingIfCompatible(const HloSharding& to_merge,
int64_t minimum_tiles, HloSharding* dst);
HloSharding FindCommonSharding(
absl::Span<const HloSharding> shardings,
std::optional<HloSharding> default_sharding = std::nullopt);
std::optional<int64_t> SelectDominantDevice(
const std::map<int64_t, int64_t>& device_map, int64_t* top_count);
void AssignComputationDevice(HloComputation* computation, int64_t device);
std::optional<int64_t> GetMostOccurringDevice(
absl::Span<HloInstruction* const> instructions);
std::optional<int64_t> GetDominantDevice(
absl::Span<HloComputation* const> computations, double dominant_factor);
HloSharding TransposeSharding(const HloSharding& sharding,
absl::Span<const int64_t> dimensions);
std::optional<HloSharding> ReshapeSharding(const Shape& source_shape,
const Shape& target_shape,
const HloSharding& sharding);
HloSharding PropagateShardingThroughReshape(const Shape& source_shape,
const Shape& target_shape,
const HloSharding& sharding);
HloSharding ReverseSharding(const HloSharding& sharding,
absl::Span<const int64_t> dimensions);
HloSharding ReshapeToTileDimension(const HloSharding& sharding, int64_t dim,
absl::Span<const int64_t> dims);
bool ContainsTileSharding(const HloModule& module);
HloSharding GatherOutputShardingFromIndexIndexPassthroughDimensions(
const HloSharding& index_sharding, const HloInstruction* hlo);
HloSharding GatherIndexShardingFromOutputIndexPassthroughDimensions(
const HloSharding& output_sharding, const HloInstruction* hlo);
HloSharding GatherEffectiveOutputSharding(const HloInstruction& hlo);
HloSharding ScatterIndexShardingFromUpdateIndexPassthroughDimensions(
const HloSharding& update_sharding, const HloScatterInstruction* scatter);
HloSharding ScatterUpdateShardingFromIndexIndexPassthroughDimensions(
const HloSharding& index_sharding, const HloScatterInstruction* scatter);
HloSharding ScatterEffectiveIndexSharding(const HloSharding& index_sharding,
const HloScatterInstruction& scatter);
HloSharding ScatterEffectiveDataSharding(const HloSharding& data_sharding,
const HloScatterInstruction& scatter);
std::optional<HloSharding>
GatherOutputShardingFromOperandOperandPassthroughDimensions(
const HloSharding& operand_sharding, const HloInstruction& hlo);
std::optional<HloSharding>
GatherOutputShardingFromOperandOperandPassthroughDimensions(
const Shape& operand_shape, const HloSharding& operand_sharding,
const HloInstruction& hlo, absl::Span<const int64_t> slice_sizes);
std::optional<HloSharding> GatherOperandShardingFromOutputParallelDimensions(
const HloSharding& output_sharding, const HloScatterInstruction& scatter,
const CallGraph& call_graph);
std::optional<HloSharding> GatherOperandShardingFromOutput(
const HloSharding& output_sharding, const HloInstruction& hlo,
const CallGraph& call_graph);
std::vector<int64_t> GetScatterSliceSize(const Shape& operand_shape,
const Shape& update_shape,
const ScatterDimensionNumbers& dnums);
std::optional<HloSharding> ScatterOutputShardingFromUpdate(
const HloSharding& update_sharding, const HloScatterInstruction& scatter);
std::optional<HloSharding> ScatterUpdateShardingFromOutput(
const HloSharding& per_output_sharding,
const HloScatterInstruction& scatter, const CallGraph& call_graph);
std::optional<HloSharding>
ScatterUpdateShardingFromOutputOperandPassthroughDimensions(
const HloSharding& output_sharding, const HloInstruction& hlo);
std::optional<HloSharding>
ScatterUpdateShardingFromOutputOperandPassthroughDimensions(
const Shape& output_shape, const HloSharding& output_sharding,
const HloInstruction& hlo, absl::Span<const int64_t> slice_sizes);
std::optional<HloSharding> ScatterUpdateShardingFromOutputParallelDimensions(
const HloSharding& output_sharding, const HloScatterInstruction& scatter,
const CallGraph& call_graph);
HloSharding GatherOutputOrScatterUpdateShardingFromIndicesParallelDimensions(
const HloSharding& indices_sharding,
const int64_t output_or_update_shape_rank,
absl::Span<const int64_t> indices_parallel_dims,
absl::Span<const int64_t> output_or_update_parallel_dims);
absl::StatusOr<std::pair<std::unique_ptr<HloInstruction>, HloOpcode>>
IdentityValueAndHloOpcodeForScatterReduceComputation(
const HloScatterInstruction& scatter);
std::vector<int64_t> DevicesForSharding(
const HloSharding& sharding, absl::Span<const int64_t> available_devices);
HloSharding PartiallyReplicateTiledShardingOnDims(
const HloSharding& sharding, absl::Span<const int64_t> dims_to_replicate);
HloSharding PartiallyReplicateTiledShardingOnAllDimsExcept(
const HloSharding& sharding, absl::Span<const int64_t> dims_to_keep);
HloSharding ReplicateAllDataDims(const HloSharding& sharding,
int64_t data_rank = -1);
HloSharding RemoveShapeDimensions(const HloSharding& sharding,
absl::Span<const int64_t> dims_to_remove);
std::optional<HloSharding> TransposeShardingWithCollapsedDims(
const HloSharding& source, absl::Span<int64_t const> src_to_tgt,
absl::Span<int64_t const> tgt_to_src);
std::optional<int64_t> GetDimensionForIota(const HloInstruction* maybe_iota,
const CallGraph& call_graph);
std::optional<GatherScatterParallelDims> GetGatherParallelBatchDims(
const HloInstruction& hlo, const CallGraph& call_graph);
std::optional<GatherScatterParallelDims> GetScatterParallelBatchDims(
const HloInstruction& hlo, const CallGraph& call_graph);
absl::InlinedVector<int64_t, 1> GetGatherParallelOutputDims(
const HloInstruction& hlo, const GatherScatterParallelDims& parallel_dim);
absl::InlinedVector<int64_t, 1> GetScatterParallelUpdateDims(
const HloInstruction& hlo, const GatherScatterParallelDims& parallel_dim);
absl::InlinedVector<int64_t, 1> GetGatherOperandPassthroughOperandDims(
const Shape& operand_shape, const HloInstruction& hlo,
absl::Span<const int64_t> slice_sizes);
absl::InlinedVector<int64_t, 1> GetScatterOperandPassthroughOperandDims(
const Shape& operand_shape, const HloSharding& operand_sharding,
const HloInstruction& hlo, absl::Span<const int64_t> slice_sizes);
absl::InlinedVector<int64_t, 1> GetGatherOperandPassthroughOutputDims(
const Shape& output_shape, const Shape& operand_shape,
const HloInstruction& hlo, absl::Span<const int64_t> slice_sizes);
absl::InlinedVector<int64_t, 1> GetScatterOperandPassthroughUpdateDims(
const Shape& update_shape, const Shape& operand_shape,
const HloSharding& operand_sharding, const HloInstruction& hlo,
absl::Span<const int64_t> slice_sizes);
absl::InlinedVector<int64_t, 1> GetGatherScatterIndexPassthroughIndexDims(
const int64_t indices_rank, const int64_t index_vector_dim);
absl::InlinedVector<int64_t, 1>
GetGatherScatterIndexPassthroughOutputOrUpdateDims(
const int64_t output_or_update_rank,
absl::Span<const int64_t> offset_or_window_dims);
HloSharding InferGatherScatterParallelShardingFromOperandSharding(
const HloSharding& operand_sharding, const Shape& operand_shape,
const Shape& shape,
absl::Span<const int64_t> output_aligned_operand_parallel_dims,
absl::Span<const int64_t> output_parallel_dims);
absl::InlinedVector<int64_t, 1> IndexAlignedOperandParallelDims(
const GatherScatterParallelDims& parallel_dims);
struct GroupedSharding {
GroupedSharding(std::vector<std::vector<int64_t>> device_groups,
DimensionVector group_dims, DimensionVector group_dim_sizes,
int64_t data_rank, HloSharding grouped_sharding,
bool subgroup_manual = false)
: device_groups(std::move(device_groups)),
group_dims(std::move(group_dims)),
group_dim_sizes(std::move(group_dim_sizes)),
data_rank(data_rank),
sharding(std::move(grouped_sharding)),
subgroup_manual(subgroup_manual) {}
std::string ToString() const;
std::vector<std::vector<int64_t>> device_groups;
DimensionVector group_dims;
DimensionVector group_dim_sizes;
int64_t data_rank;
HloSharding sharding;
bool subgroup_manual;
};
GroupedSharding GroupShardingOnDims(const HloSharding& sharding,
absl::Span<const int64_t> group_dims,
absl::Span<const int64_t> group_dim_shards,
bool subgroup_manual = false);
GroupedSharding GroupShardingOnDims(const HloSharding& sharding,
absl::Span<const int64_t> group_dims,
bool subgroup_manual = false);
GroupedSharding GroupShardingOnAllDimsExcept(
const HloSharding& sharding, absl::Span<const int64_t> non_group_dims,
bool subgroup_manual = false);
GroupedSharding GroupShardingOnReplicatedDim(
const HloSharding& sharding, int64_t num_groups, int64_t num_tiles,
int64_t data_rank, absl::Span<const int64_t> replicable_dims = {});
GroupedSharding GetGroupedReplicatedSharding(const int64_t num_groups,
const int64_t num_tiles,
const int64_t data_rank);
GroupedSharding GetManualSubgroupSharding(const HloSharding& sharding);
std::optional<GroupedSharding>
PartialReplicatedGroupShardingWithAssignedDeviceGroups(
const HloSharding& sharding, int64_t num_shards,
const std::vector<std::vector<int64_t>>& device_groups);
HloSharding UngroupSharding(const GroupedSharding& grouped_sharding);
bool DeviceGroupsAreMatch(GroupedSharding& lhs, GroupedSharding& rhs,
bool ignore_group_order = true);
HloSharding SplitShardingDimension(const HloSharding& sharding,
int64_t dimension, int64_t new_dim_size);
HloSharding MergeShardingDimension(const HloSharding& sharding,
int64_t dimension);
std::shared_ptr<const HloSharding> CreateTupleSharding(
const Shape& shape, absl::Span<const HloInstruction* const> elements);
bool IsSortOperandShardingMovable(const HloInstruction* sort_operand,
int64_t sort_dim);
std::optional<GatherScatterParallelDims> GetGatherScatterBatchParallelDims(
const HloInstruction* indices, absl::Span<const int64_t> slice_sizes,
int64_t index_vector_dim, absl::Span<const int64_t> index_map,
const CallGraph& call_graph);
std::optional<HloSharding> GetOutputSharding(const HloInstruction* instruction);
Shape UntileShape(const HloSharding& sharding, const Shape& shape);
Shape UntileLeafShape(const HloSharding& sharding, const Shape& shape);
Shape TileShape(const HloSharding& sharding, const Shape& shape);
Shape TileLeafShape(const HloSharding& sharding, const Shape& shape);
absl::Status CanonicalizeLayoutAfterShardingPropagation(
HloModule* module, bool update_output_layout,
bool update_parameters_layout);
bool IsSpatiallyPartitioned(const HloSharding& sharding);
inline bool IsSpatiallyPartitioned(const HloInstruction* hlo) {
return hlo->has_sharding() && IsSpatiallyPartitioned(hlo->sharding());
}
std::optional<HloSharding> ReturnImprovedShardingImpl(
HloSharding from, const HloSharding* to_improved,
const Shape& to_improved_shape, bool may_combine_partial_sharding,
bool allow_aggressive_resharding = false);
HloSharding InferDotOperandSharding(
const HloInstruction* dot, int64_t operand_index,
const dot_as_convolution_util::DotConvolutionDimsInfo& dnums,
bool consider_other_operand, bool may_combine_partial_sharding);
}
}
#endif
#include "xla/hlo/utils/hlo_sharding_util.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstdlib>
#include <iterator>
#include <map>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/literal_util.h"
#include "xla/map_util.h"
#include "xla/protobuf_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace hlo_sharding_util {
bool IsSubTilingOrEqualSharding(const Shape& potential_sharded_shape,
const HloSharding& potential_subsharding,
const HloSharding& sharding) {
if (potential_subsharding.IsManual() || sharding.IsManual()) {
return false;
}
if (sharding.IsTileMaximal()) {
return true;
}
if (potential_subsharding.IsTileMaximal()) {
return false;
}
const int32_t tiled_data_rank = potential_subsharding.TiledDataRank();
if (tiled_data_rank != sharding.TiledDataRank() ||
tiled_data_rank != potential_sharded_shape.dimensions_size()) {
return false;
}
DimensionVector potential_base_tile(tiled_data_rank);
DimensionVector base_tile(tiled_data_rank);
bool shortcut = true;
int64_t diff_dim_counter = 0;
DimensionVector reshape_dims(
potential_subsharding.tile_assignment().dimensions().begin(),
potential_subsharding.tile_assignment().dimensions().end());
for (int64_t i = 0; i < tiled_data_rank; ++i) {
const auto shape_i = potential_sharded_shape.dimensions(i);
const auto p_tile_dim_i = potential_subsharding.tile_assignment().dim(i);
const auto s_tile_dim_i = sharding.tile_assignment().dim(i);
if (p_tile_dim_i < s_tile_dim_i) {
return false;
}
potential_base_tile[i] = CeilOfRatio(shape_i, p_tile_dim_i);
base_tile[i] = CeilOfRatio(shape_i, s_tile_dim_i);
if (s_tile_dim_i != 1 &&
(p_tile_dim_i % s_tile_dim_i != 0 ||
base_tile[i] % potential_base_tile[i] != 0 ||
shape_i <= (p_tile_dim_i - 1) * potential_base_tile[i] ||
shape_i <= (s_tile_dim_i - 1) * base_tile[i])) {
shortcut = false;
}
if (shortcut && p_tile_dim_i != s_tile_dim_i) {
reshape_dims[i + diff_dim_counter] = s_tile_dim_i;
reshape_dims.insert(reshape_dims.begin() + i + diff_dim_counter + 1,
p_tile_dim_i / s_tile_dim_i);
diff_dim_counter++;
}
}
if (shortcut) {
if (!sharding.HasPartialReplication()) {
return potential_subsharding == sharding;
}
std::vector<int> perm(reshape_dims.size());
absl::c_iota(perm, 0);
for (int64_t i = 0; i < tiled_data_rank; ++i) {
if (potential_subsharding.tile_assignment().dim(i) !=
sharding.tile_assignment().dim(i)) {
auto element = perm[i + 1];
perm.erase(perm.begin() + i + 1);
perm.push_back(element);
}
}
auto reshaped_ta = potential_subsharding.tile_assignment()
.Reshape(reshape_dims)
.Transpose(perm)
.Reshape(sharding.tile_assignment().dimensions());
return HloSharding::PartialTile(reshaped_ta).tile_assignment() ==
sharding.tile_assignment();
}
auto storage = std::make_unique<int32_t[]>(
sharding.tile_assignment().num_elements() * tiled_data_rank);
int32_t* storage_cursor = storage.get();
absl::flat_has | #include "xla/hlo/utils/hlo_sharding_util.h"
#include <cstdint>
#include <initializer_list>
#include <optional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/log/log.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/service/dot_as_convolution_util.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace hlo_sharding_util {
namespace {
TEST(HloShardingUtilTest, MergeShardingIfCompatible1) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 4, 2, 16}, {16, 8}, {1, 0}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
EXPECT_EQ(dst, HloSharding::PartialTile(
TileAssignment({4, 4, 2, 4}, {4, 4, 8}, {0, 2, 1})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible2) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 2, 4, 16}, {16, 8}, {1, 0}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
EXPECT_EQ(dst, HloSharding::PartialTile(
TileAssignment({4, 2, 4, 4}, {4, 4, 8}, {0, 2, 1})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible3) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({4, 2, 1, 16}, {16, 8}, {1, 0}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({1, 1, 4, 32}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
EXPECT_EQ(dst, HloSharding::PartialTile(
TileAssignment({4, 2, 4, 4}, {16, 8}, {1, 0})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible4) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 4, 2, 16}, {16, 8}, {1, 0}));
HloSharding dst =
HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}, {4, 32}, {1, 0}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
EXPECT_EQ(dst, HloSharding::PartialTile(
TileAssignment({4, 4, 2, 4}, {4, 32}, {1, 0})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible5) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 4, 2, 16}, {16, 8}, {1, 0}));
HloSharding dst =
HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}, {32, 4}, {1, 0}));
EXPECT_FALSE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible6) {
HloSharding to_merge =
HloSharding::PartialTile(TileAssignment({1, 4, 2, 16}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({4, 1, 1, 32}));
EXPECT_FALSE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible7) {
HloSharding to_merge = HloSharding::PartialTile(
TileAssignment({2, 1, 2, 2}, {2, 2, 2}, {2, 1, 0}));
HloSharding dst = HloSharding::PartialTile(TileAssignment({1, 2, 1, 4}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
EXPECT_EQ(dst,
HloSharding::Tile(TileAssignment({2, 2, 2}, {2, 2, 2}, {2, 0, 1})));
}
TEST(HloShardingUtilTest, MergeShardingIfCompatible8) {
HloSharding to_merge = HloSharding::PartialTile(TileAssignment({2, 1, 4}));
HloSharding dst =
HloSharding::PartialTile(TileAssignment({1, 4, 2}, {2, 2, 2}, {2, 1, 0}));
EXPECT_TRUE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
EXPECT_EQ(dst,
HloSharding::Tile(TileAssignment({2, 4}, {2, 2, 2}, {0, 2, 1})));
}
TEST(HloShardingUtilTest, TransposeShardingReplicated) {
EXPECT_EQ(TransposeSharding(HloSharding::Replicate(), {0, 1, 2}),
HloSharding::Replicate());
}
TEST(HloShardingUtilTest, TransposeShardingTiled) {
HloSharding input = HloSharding::IotaTile({1, 2, 1, 2});
HloSharding output = HloSharding::IotaTile({2, 1, 2, 1}, {2, 2}, {1, 0});
EXPECT_EQ(TransposeSharding(input, {3, 0, 1, 2}), output);
}
TEST(HloShardingUtilTest, TransposeShardingWithCollapsedDimsSubgroupManual) {
HloSharding input =
HloSharding::Subgroup(TileAssignment({1, 2, 4}), {OpSharding::MANUAL});
HloSharding output =
HloSharding::Subgroup(TileAssignment({1, 1, 2, 4}), {OpSharding::MANUAL});
EXPECT_EQ(TransposeShardingWithCollapsedDims(input, {-1, 2}, {-1, -1, 1}),
output);
}
TEST(HloShardingUtilTest, ReshapeShardingMaximal) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 5, 2});
HloSharding sharding = HloSharding::AssignDevice(7);
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledInvalid) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 5, 2});
HloSharding sharding = HloSharding::IotaTile({1, 2, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_FALSE(result.has_value());
}
TEST(HloShardingUtilTest, ReshapeShardingTiledMerge) {
Shape input_shape = ShapeUtil::MakeShape(F32, {4, 5, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {20, 7});
HloSharding input_sharding = HloSharding::IotaTile({2, 1, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledSplit) {
Shape input_shape = ShapeUtil::MakeShape(F32, {16, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 4, 7});
HloSharding input_sharding = HloSharding::IotaTile({2, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledSplit2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {16, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 4, 7});
HloSharding input_sharding = HloSharding::IotaTile({16, 1});
HloSharding output_sharding = HloSharding::IotaTile({4, 4, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledSplit3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {36});
Shape output_shape = ShapeUtil::MakeShape(F32, {6, 6});
HloSharding input_sharding = HloSharding::IotaTile({4});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 2}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledSplitThenMerge) {
Shape input_shape = ShapeUtil::MakeShape(F32, {16, 4, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 16, 7});
HloSharding input_sharding = HloSharding::IotaTile({2, 1, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledArbitraryMinorDimensions) {
Shape input_shape = ShapeUtil::MakeShape(F32, {16, 7, 5, 3});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 15, 2, 14});
HloSharding sharding = HloSharding::IotaTile({2, 1, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTiledTrivialDimensions) {
Shape input_shape = ShapeUtil::MakeShape(F32, {3, 1, 5, 7});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 5, 1, 7});
HloSharding input_sharding = HloSharding::IotaTile({1, 1, 2, 1});
HloSharding output_sharding = HloSharding::IotaTile({1, 2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTrivialDimensionInsertedToEnd) {
Shape input_shape = ShapeUtil::MakeShape(F32, {8, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {8, 16, 1});
HloSharding input_sharding = HloSharding::IotaTile({2, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, NoopReshapeShardingEmptyTile) {
Shape shape = ShapeUtil::MakeShape(F32, {7, 1, 1});
HloSharding sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result = ReshapeSharding(shape, shape, sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingScalar) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 1, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {});
HloSharding sharding = HloSharding::IotaTile({2, 1, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_FALSE(result.has_value());
}
TEST(HloShardingUtilTest, ReshapeShardingSuffixShapeSizeOne1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {64, 1, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {64, 1});
HloSharding input_sharding = HloSharding::IotaTile({4, 1, 1});
HloSharding output_sharding = HloSharding::IotaTile({4, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
result = ReshapeSharding(output_shape, input_shape, output_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), input_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingSuffixShapeSizeOne2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {64, 1, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {64, 1});
HloSharding input_sharding = HloSharding::IotaTile({4, 2, 8});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({4, 2, 8}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingSuffixShapeSizeOne3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {64, 1});
Shape output_shape = ShapeUtil::MakeShape(F32, {64, 1, 1});
HloSharding input_sharding = HloSharding::IotaTile({4, 2});
HloSharding output_sharding = HloSharding::IotaTile({4, 2, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingPrefixShapeSizeOne1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 1, 64});
Shape output_shape = ShapeUtil::MakeShape(F32, {1, 64});
HloSharding input_sharding = HloSharding::IotaTile({1, 1, 4});
HloSharding output_sharding = HloSharding::IotaTile({1, 4});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
result = ReshapeSharding(output_shape, input_shape, output_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), input_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingPrefixShapeSizeOne2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {1, 1, 64});
Shape output_shape = ShapeUtil::MakeShape(F32, {1, 64});
HloSharding input_sharding = HloSharding::IotaTile({2, 1, 1});
HloSharding output_sharding = HloSharding::IotaTile({2, 1});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
result = ReshapeSharding(output_shape, input_shape, output_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), input_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTranspose1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {6, 2, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 3, 5});
HloSharding sharding = HloSharding::IotaTile({2, 1, 5});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTranspose2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5, 7, 11});
Shape output_shape = ShapeUtil::MakeShape(F32, {10, 21, 11});
HloSharding input_sharding = HloSharding::IotaTile({2, 1, 1, 1, 13});
HloSharding output_sharding = HloSharding::IotaTile({2, 1, 13});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeShardingTranspose3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 10});
HloSharding input_sharding = HloSharding::IotaTile({1, 1, 5});
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_FALSE(result.has_value());
}
TEST(HloShardingUtilTest, ReshapeShardingTranspose4) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 3, 5, 7, 11, 13, 17, 19});
Shape output_shape = ShapeUtil::MakeShape(F32, {3, 2, 55, 91, 19, 17});
HloSharding input_sharding = HloSharding::IotaTile({1, 1, 5, 1, 1, 13, 1, 1});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({1, 1, 5, 1, 1, 1, 13}));
std::optional<HloSharding> result =
ReshapeSharding(input_shape, output_shape, input_sharding);
EXPECT_TRUE(result.has_value());
EXPECT_EQ(result.value(), output_sharding);
}
TEST(HloShardingUtilTest, ReshapeToTileDimension2D) {
std::vector<HloSharding> shardings = {HloSharding::IotaTile({2, 2}),
HloSharding::Tile({{0, 1}, {2, 3}})};
for (const HloSharding& sharding : shardings) {
EXPECT_EQ(ReshapeToTileDimension(sharding, 0, {0, 1})
.tile_assignment(),
TileAssignment((absl::Span<const int64_t>){4, 1}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1})
.tile_assignment(),
TileAssignment({1, 4}, {2, 2}, {1, 0}));
}
}
TEST(HloShardingUtilTest, ReshapeToTileDimension3D_Case1) {
std::vector<HloSharding> shardings = {
HloSharding::IotaTile({2, 2, 2}),
HloSharding::Tile({{{0, 1}, {2, 3}}, {{4, 5}, {6, 7}}})};
for (const HloSharding& sharding : shardings) {
EXPECT_EQ(ReshapeToTileDimension(sharding, 0, {0, 1, 2})
.tile_assignment(),
TileAssignment({8, 1, 1}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 8, 1}, {2, 2, 2}, {1, 0, 2}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 2, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 1, 8}, {4, 2}, {1, 0}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 2,
{1, 2})
.tile_assignment(),
TileAssignment({2, 1, 4}, {2, 2, 2}, {0, 2, 1}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 0,
{0, 2})
.tile_assignment(),
TileAssignment({4, 2, 1}, {2, 2, 2}, {1, 0, 2}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 2,
{0, 2})
.tile_assignment(),
TileAssignment({1, 2, 4}, {2, 2, 2}, {1, 2, 0}));
}
}
TEST(HloShardingUtilTest, ReshapeToTileDimension3D_Case2) {
std::vector<HloSharding> shardings = {
HloSharding::IotaTile({2, 2, 2}, {4, 2}, {1, 0}),
HloSharding::Tile({{{0, 2}, {4, 6}}, {{1, 3}, {5, 7}}})};
for (const HloSharding& sharding : shardings) {
EXPECT_EQ(ReshapeToTileDimension(sharding, 0, {0, 1, 2})
.tile_assignment(),
TileAssignment({8, 1, 1}, {4, 2}, {1, 0}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 8, 1}, {2, 2, 2}, {0, 2, 1}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 2, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 1, 8}, {2, 4}, {1, 0}));
}
}
TEST(HloShardingUtilTest, ReshapeToTileDimension4D) {
HloSharding sharding1 = HloSharding::IotaTile({2, 3, 5, 7});
HloSharding sharding2 =
HloSharding::Tile(sharding1.tile_assignment().array());
std::vector<HloSharding> shardings = {sharding1, sharding2};
for (const HloSharding& sharding : shardings) {
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1})
.tile_assignment(),
TileAssignment({1, 6, 5, 7}, {2, 3, 5, 7}, {2, 3, 1, 0}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {1, 2})
.tile_assignment(),
TileAssignment({2, 15, 1, 7}, {2, 3, 5, 7}, {0, 3, 1, 2}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {1, 3})
.tile_assignment(),
TileAssignment({2, 21, 5, 1}, {2, 3, 5, 7}, {0, 2, 1, 3}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 2})
.tile_assignment(),
TileAssignment({1, 30, 1, 7}, {2, 3, 5, 7}, {3, 1, 0, 2}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 3})
.tile_assignment(),
TileAssignment({1, 42, 5, 1}, {2, 3, 5, 7}, {2, 1, 0, 3}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {1, 2, 3})
.tile_assignment(),
TileAssignment({2, 105, 1, 1}, {2, 3, 5, 7}, {0, 1, 2, 3}));
EXPECT_EQ(ReshapeToTileDimension(sharding, 1, {0, 1, 2, 3})
.tile_assignment(),
TileAssignment({1, 210, 1, 1}, {2, 3, 5, 7}, {1, 0, 2, 3}));
}
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTranspose1) {
Shape input_shape = ShapeUtil::MakeShape(F32, {6, 4});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 2, 3, 2});
HloSharding input_sharding = HloSharding::IotaTile({6, 1});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 1, 1, 3}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTranspose2) {
Shape input_shape = ShapeUtil::MakeShape(F32, {6, 4});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 6});
HloSharding input_sharding = HloSharding::IotaTile({6, 1});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 3}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTranspose3) {
Shape input_shape = ShapeUtil::MakeShape(F32, {4, 6, 5});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 2, 2, 5, 3});
HloSharding input_sharding = HloSharding::IotaTile({2, 6, 1});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({2, 1, 2, 1, 1, 3}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTiledSplitPartialMatch) {
Shape input_shape = ShapeUtil::MakeShape(F32, {14, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 7, 4, 4});
HloSharding input_sharding = HloSharding::IotaTile({4, 8});
HloSharding output_sharding =
HloSharding::PartialTile(TileAssignment({1, 1, 4, 2, 4}, {4, 8}, {1, 0}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, PropagateReshapeShardingTiledMergeSplitPartialMatch) {
Shape input_shape = ShapeUtil::MakeShape(F32, {2, 2, 14, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {4, 2, 7, 4, 4});
HloSharding input_sharding = HloSharding::IotaTile({2, 2, 4, 8});
HloSharding output_sharding = HloSharding::PartialTile(
TileAssignment({4, 1, 1, 4, 2, 4}, {2, 2, 4, 8}, {0, 1, 3, 2}));
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest,
PropagateReshapeShardingTiledSplitPartialMatchManual) {
Shape input_shape = ShapeUtil::MakeShape(F32, {14, 16});
Shape output_shape = ShapeUtil::MakeShape(F32, {2, 7, 4, 4});
HloSharding input_sharding =
HloSharding::Subgroup(TileAssignment({4, 8, 2}), {OpSharding::MANUAL});
HloSharding output_sharding = HloSharding::Subgroup(
TileAssignment({1, 1, 4, 2, 4, 2}, {4, 8, 2}, {1, 0, 2}),
{OpSharding::REPLICATED, OpSharding::MANUAL});
HloSharding result = PropagateShardingThroughReshape(
input_shape, output_shape, input_sharding);
EXPECT_EQ(result, output_sharding);
}
TEST(HloShardingUtilTest, MergeManualSubgroupSharding) {
TileAssignment tile_assignment({16, 4});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
HloSharding dst = HloSharding::Subgroup(tile_assignment, subgroup_types);
HloSharding to_merge = dst;
EXPECT_FALSE(MergeShardingIfCompatible(to_merge, dst.NumTiles() + 1, &dst));
}
TEST(HloShardingUtilTest, GetManualSubgroupSharding_ManualOnly) {
TileAssignment tile_assignment({1, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL};
HloSharding sharding = HloSharding::Subgroup(tile_assignment, subgroup_types);
GroupedSharding group_sharding = GetManualSubgroupSharding(sharding);
EXPECT_EQ(group_sharding.sharding.tile_assignment(),
TileAssignment((absl::Span<const int64_t>){1, 2}));
EXPECT_THAT(group_sharding.device_groups[0],
::testing::ElementsAreArray({0, 2}));
EXPECT_THAT(group_sharding.device_groups[1],
::testing::ElementsAreArray({1, 3}));
}
TEST(HloShardingUtilTest, GetManualSubgroupSharding_ManualAndReplicted) {
TileAssignment tile_assignment({1, 2, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::REPLICATED,
OpSharding::MANUAL};
HloSharding sharding = HloSharding::Subgroup(tile_assignment, subgroup_types);
GroupedSharding group_sharding = GetManualSubgroupSharding(sharding);
EXPECT_EQ(group_sharding.sharding.ToString(),
"{devices=[1,2,2]<=[4] last_tile_dim_replicate}");
EXPECT_THAT(group_sharding.device_groups[0],
::testing::ElementsAreArray({0, 2, 4, 6}));
EXPECT_THAT(group_sharding.device_groups[1],
::testing::ElementsAreArray({1, 3, 5, 7}));
}
TEST(HloShardingUtilTest, GetManualSubgroupSharding_ReplicatedAndManual) {
TileAssignment tile_assignment({1, 2, 2, 2});
std::vector<OpSharding::Type> subgroup_types = {OpSharding::MANUAL,
OpSharding::REPLICATED};
HloSharding sharding = HloSharding::Subgroup(tile_assignment, subgroup_types);
GroupedSharding group_sharding = GetManualSubgroupSharding(sharding);
EXPECT_EQ(group_sharding.sharding.ToString(),
"{devices=[1,2,2]<=[4] last_tile_dim_replicate}");
EXPECT_THAT(group_sharding.device_groups[0],
::testing::ElementsAreArray({0, 1, 4, 5}));
EXPECT_THAT(group_sharding.device_groups[1],
::testing::ElementsAreArray({2, 3, 6, 7}));
}
TEST(HloShardingUtilTest, UngroupSharding_ManualOnly) {
HloSharding sharding = HloSharding::IotaTile({1, 2});
std::vector<std::vector<int64_t>> device_groups = {{0, 2}, {1, 3}};
DimensionVector group_dims = {2};
DimensionVector group_dim_sizes = {2};
auto grouped = GroupedSharding(
std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes), sharding.tile_assignment().num_dimensions(),
sharding, true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
EXPECT_EQ(ungroup_sharding.ToString(),
"{devices=[1,2,2]0,1,2,3 last_tile_dims={manual}}");
}
TEST(HloShardingUtilTest, UngroupSharding_ReplicatedAndManual) {
HloSharding sharding = HloSharding::PartialTile(TileAssignment({1, 2, 2}));
std::vector<std::vector<int64_t>> device_groups = {{0, 2, 4, 6},
{1, 3, 5, 7}};
DimensionVector group_dims = {3};
DimensionVector group_dim_sizes = {2};
auto grouped =
GroupedSharding(std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes),
sharding.tile_assignment().num_dimensions() - 1, sharding,
true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
VLOG(1) << "ungroup_sharding: " << ungroup_sharding.ToString();
EXPECT_EQ(
ungroup_sharding.ToString(),
"{devices=[1,2,2,2]0,2,1,3,4,6,5,7 last_tile_dims={manual, replicated}}");
}
TEST(HloShardingUtilTest, UngroupSharding_ManualAndReplicated) {
HloSharding sharding = HloSharding::PartialTile(TileAssignment({1, 2, 2}));
std::vector<std::vector<int64_t>> device_groups = {{0, 1, 4, 5},
{2, 3, 6, 7}};
DimensionVector group_dims = {2};
DimensionVector group_dim_sizes = {2};
auto grouped =
GroupedSharding(std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes),
sharding.tile_assignment().num_dimensions() - 1, sharding,
true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
VLOG(1) << "ungroup_sharding: " << ungroup_sharding.ToString();
EXPECT_EQ(
ungroup_sharding.ToString(),
"{devices=[1,2,2,2]0,1,2,3,4,5,6,7 last_tile_dims={manual, replicated}}");
}
TEST(HloShardingUtilTest, UngroupSharding_Replicated) {
HloSharding sharding = HloSharding::Replicate();
DimensionVector group_dims = {3};
DimensionVector group_dim_sizes = {2};
std::vector<std::vector<int64_t>> device_groups = {{0, 1}, {2, 3}};
auto grouped =
GroupedSharding(std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes), 2, sharding,
true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
VLOG(1) << "ungroup_sharding: " << ungroup_sharding.ToString();
EXPECT_EQ(ungroup_sharding.ToString(),
"{devices=[1,1,2,2]0,1,2,3 last_tile_dims={manual, replicated}}");
}
TEST(HloShardingUtilTest, UngroupSharding_Replicated2) {
HloSharding sharding = HloSharding::Replicate();
DimensionVector group_dims = {2};
DimensionVector group_dim_sizes = {2};
std::vector<std::vector<int64_t>> device_groups = {{0, 2}, {1, 3}};
auto grouped =
GroupedSharding(std::move(device_groups), std::move(group_dims),
std::move(group_dim_sizes), 2, sharding,
true);
HloSharding ungroup_sharding = UngroupSharding(grouped);
VLOG(1) << "ungroup_sharding: " << ungroup_sharding.ToString();
EXPECT_EQ(ungroup_sharding.ToString(),
"{devices=[1,1,2,2]0,2,1,3 last_tile_dims={manual, replicated}}");
}
TEST(HloShardingUtilTest, GroupedAndUngroupedReplicatedSharding) {
GroupedSharding group_sharding = GetGroupedReplicatedSharding(
3, 12, 2);
EXPECT_EQ(UngroupSharding(group_sharding), HloSharding::Replicate());
}
TEST(HloShardingUtilTest, GroupedAndUngroupedIotaSharding) {
std::vector<std::vector<int64_t>> device_groups = {{0, 1, 2, 3, 4, 5},
{6, 7, 8, 9, 10, 11}};
GroupedSharding group_sharding = GroupedSharding(
device_groups, {0}, {2},
2, HloSharding::IotaTile({1, 2, 3}, {2, 3}, {1, 0}));
EXPECT_EQ(UngroupSharding(group_sharding),
HloSharding::IotaTile({2, 2, 3}, {2, 2, 3}, {0, 2, 1}));
}
TEST(HloShardingUtilTest, GroupedAndUngroupedShardingWithUnsortedGroupDims) {
HloSharding sharding = HloSharding::IotaTile({4, 3, 5, 7});
GroupedSharding group_sharding =
GroupShardingOnDims(sharding, {2, 0}, {1, 2});
EXPECT_EQ(group_sharding.sharding, HloSharding::IotaTile({2, 3, 1, 7}));
EXPECT_EQ(UngroupSharding(group_sharding), sharding);
}
TEST(HloShardingUtilTest, UngroupShardingWithUnsortedGroupDims) {
GroupedSharding group_sharding({{0}, {1}, {2}, {3}}, {1, 0}, {2, 2}, 4,
HloSharding::Replicate());
EXPECT_EQ(UngroupSharding(group_sharding),
HloSharding::IotaTile({2, 2, 1, 1}, {2, 2}, {1, 0}));
}
TEST(HloShardingUtilTest, DeviceGroupsDoesNotMatch) {
HloSharding sharding = HloSharding::PartialTile(
TileAssignment((absl::Span<const int64_t>){2, 2}));
DimensionVector group_dim_sizes = {2};
std::vector<std::vector<int64_t>> lhs_device_groups = {{0, 2, 4, 6},
{1, 3, 5, 7}};
DimensionVector lhs_group_dims = {3};
auto lhs =
GroupedSharding(std::move(lhs_device_groups), std::move(lhs_group_dims),
group_dim_sizes, 2, sharding,
true);
std::vector<std::vector<int64_t>> rhs_device_groups = {{0, 1, 4, 5},
{2, 3, 6, 7}};
DimensionVector | 2,165 |
#ifndef XLA_HLO_UTILS_HLO_LIVE_RANGE_H_
#define XLA_HLO_UTILS_HLO_LIVE_RANGE_H_
#include <cstdint>
#include <memory>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_value.h"
namespace xla {
class HloLiveRange {
public:
static absl::StatusOr<std::unique_ptr<HloLiveRange>> Run(
const HloSchedule& schedule, const HloAliasAnalysis& alias_analysis,
const HloComputation* computation, bool module_scoped_analysis = true);
using LogicalTime = int64_t;
struct TimeBound {
LogicalTime start;
LogicalTime end;
HloPosition end_position;
bool friend operator==(const TimeBound& a, const TimeBound& b) {
return a.start == b.start && a.end == b.end;
}
bool friend operator!=(const TimeBound& a, const TimeBound& b) {
return !(a == b);
}
};
std::string ToString() const;
const HloInstructionSequence& flattened_instruction_sequence() const {
return flattened_instruction_sequence_;
}
const absl::flat_hash_map<const HloInstruction*, LogicalTime>&
instruction_schedule() const {
return instruction_schedule_;
}
const absl::flat_hash_map<const HloValue*, TimeBound>& buffer_live_ranges()
const {
return buffer_live_ranges_;
}
absl::flat_hash_map<const HloValue*, TimeBound>& buffer_live_ranges() {
return buffer_live_ranges_;
}
const absl::flat_hash_map<const HloComputation*, TimeBound>&
computation_span_times() const {
return computation_span_times_;
}
LogicalTime schedule_end_time() const {
return flattened_instruction_sequence_.size();
}
bool total_order_scheduled() const { return total_order_scheduled_; }
private:
explicit HloLiveRange(const HloSchedule& schedule,
const HloAliasAnalysis& alias_analysis,
bool module_scoped_analysis)
: schedule_(schedule),
alias_analysis_(alias_analysis),
module_scoped_analysis_(module_scoped_analysis) {}
void FlattenSchedule(const HloComputation& computation,
const HloComputation* async_context = nullptr);
TimeBound GetLastPosition(const HloValue& value,
LogicalTime definition_end_time) const;
LogicalTime GetLastUsageTime(const HloValue& value) const;
void CalculateBufferStartEndMap();
void NormalizeAliasedBuffers();
LogicalTime ComputePeakMemoryMoment() const;
const HloSchedule& schedule_;
const HloAliasAnalysis& alias_analysis_;
bool module_scoped_analysis_;
bool total_order_scheduled_ = true;
HloInstructionSequence flattened_instruction_sequence_;
absl::flat_hash_map<const HloInstruction*, LogicalTime> instruction_schedule_;
absl::flat_hash_map<const HloComputation*, TimeBound> computation_span_times_;
absl::flat_hash_map<const HloValue*, TimeBound> buffer_live_ranges_;
absl::flat_hash_map<const HloComputation*, const HloComputation*>
computations_in_async_context_;
};
}
#endif
#include "xla/hlo/utils/hlo_live_range.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_value.h"
#include "xla/shape_util.h"
#include "tsl/platform/logging.h"
namespace xla {
absl::StatusOr<std::unique_ptr<HloLiveRange>> HloLiveRange::Run(
const HloSchedule& schedule, const HloAliasAnalysis& alias_analysis,
const HloComputation* computation, bool module_scoped_analysis) {
std::unique_ptr<HloLiveRange> hlo_live_range(
new HloLiveRange(schedule, alias_analysis, module_scoped_analysis));
hlo_live_range->FlattenSchedule(*computation);
hlo_live_range->CalculateBufferStartEndMap();
hlo_live_range->NormalizeAliasedBuffers();
return std::move(hlo_live_range);
}
void HloLiveRange::NormalizeAliasedBuffers() {
absl::flat_hash_map<HloBuffer::Id,
std::vector<std::pair<TimeBound*, HloValue::Id>>>
live_ranges_by_buffer;
for (auto& entry : buffer_live_ranges_) {
const HloValue& value = *entry.first;
const HloBuffer& buffer = alias_analysis_.GetBufferContainingValue(value);
live_ranges_by_buffer[buffer.id()].push_back({&entry.second, value.id()});
}
for (auto& entry : live_ranges_by_buffer) {
auto& aliased_live_ranges = entry.second;
absl::c_sort(
aliased_live_ranges, [](std::pair<const TimeBound*, HloValue::Id> a,
std::pair<const TimeBound*, HloValue::Id> b) {
return std::forward_as_tuple(a.first->start, a.first->end, a.second) <
std::forward_as_tuple(b.first->start, b.first->end, b.second);
});
for (int64_t i = 0; i + 1 < aliased_live_ranges.size(); ++i) {
TimeBound& live_range1 = *aliased_live_ranges[i].first;
TimeBound& live_range2 = *aliased_live_ranges[i + 1].first;
live_range2.end = std::max(live_range1.end, live_range2.end);
live_range1.end = std::min(live_range1.end, live_range2.start);
}
}
}
void HloLiveRange::FlattenSchedule(const HloComputation& computation,
const HloComputation* async_context) {
auto it = schedule_.sequences().find(computation.unique_id());
if (it == schedule_.sequences().end()) {
total_order_scheduled_ = false;
return;
}
if (computation_span_times_.contains(&computation)) return;
if (async_context != nullptr) {
computations_in_async_context_[&computation] = async_context;
}
LogicalTime start_time = flattened_instruction_sequence_.size();
const HloInstructionSequence& instruction_sequence = it->second;
for (HloInstruction* instruction : instruction_sequence.instructions()) {
if (module_scoped_analysis_) {
if (instruction->opcode() == HloOpcode::kCall ||
instruction->opcode() == HloOpcode::kConditional ||
instruction->opcode() == HloOpcode::kAsyncStart) {
for (const HloComputation* called_computation :
instruction->called_computations()) {
FlattenSchedule(*called_computation,
instruction->opcode() == HloOpcode::kAsyncStart
? called_computation
: async_context);
}
} else if (instruction->opcode() == HloOpcode::kWhile) {
FlattenSchedule(*instruction->while_condition(), async_context);
FlattenSchedule(*instruction->while_body(), async_context);
}
}
LogicalTime time = flattened_instruction_sequence_.size();
CHECK(instruction_schedule_.insert({instruction, time}).second);
flattened_instruction_sequence_.push_back(instruction);
}
LogicalTime end_time = flattened_instruction_sequence_.size();
computation_span_times_[&computation] = {start_time, end_time};
}
HloLiveRange::TimeBound HloLiveRange::GetLastPosition(
const HloValue& value,
HloLiveRange::LogicalTime definition_end_time) const {
LogicalTime end_time = definition_end_time;
const HloPosition* end_position = &value.defining_position();
for (const HloPosition& position :
absl::Span<const HloPosition>(value.positions()).subspan(1)) {
const HloInstruction* position_inst = position.instruction;
LogicalTime position_time;
if (position_inst->IsRoot()) {
auto it = computation_span_times_.find(position_inst->parent());
if (it == computation_span_times_.end()) continue;
position_time = it->second.end;
} else {
auto it = instruction_schedule_.find(position_inst);
if (it == instruction_schedule_.end()) continue;
position_time = it->second;
}
if (position_time > end_time) {
end_time = position_time;
end_position = &position;
}
}
return {-1, end_time, *end_position};
}
HloLiveRange::LogicalTime HloLiveRange::GetLastUsageTime(
const HloValue& value) const {
LogicalTime end_time = -1;
for (const HloUse& use : value.GetUses()) {
const HloInstruction* used = use.instruction;
if (module_scoped_analysis_ && used->opcode() == HloOpcode::kCall) continue;
if (module_scoped_analysis_ && used->opcode() == HloOpcode::kWhile) {
used = used->while_body()->parameter_instruction(0);
VLOG(1) << "Moved value " << value.ToShortString()
<< " to while param: " << used->ToString();
}
auto it = instruction_schedule_.find(used);
if (it != instruction_schedule_.end()) {
end_time = std::max(end_time, it->second);
}
}
return end_time;
}
void HloLiveRange::CalculateBufferStartEndMap() {
for (const auto& entry : instruction_schedule_) {
const HloInstruction& instruction = *entry.first;
const HloComputation* computation = instruction.parent();
LogicalTime start_time = (instruction.opcode() == HloOpcode::kParameter)
? computation_span_times_[computation].start
: entry.second;
LogicalTime definition_end_time =
instruction.IsRoot() ? computation_span_times_[computation].end
: entry.second;
auto async_context_it = computations_in_async_context_.find(computation);
if (async_context_it != computations_in_async_context_.end()) {
const HloComputation* async_context = async_context_it->second;
CHECK(async_context->IsAsyncComputation());
auto async_done = async_context->AsyncStart()->async_chain_done();
auto async_done_it = instruction_schedule_.find(async_done);
CHECK(async_done_it != instruction_schedule_.end());
definition_end_time =
std::max(definition_end_time, async_done_it->second);
VLOG(2) << "Setting the definition end time for op in async context: "
<< definition_end_time;
}
const InstructionValueSet& value_set_tree =
alias_analysis_.dataflow_analysis().GetInstructionValueSet(
&instruction);
for (const auto& entry : value_set_tree) {
for (const HloValue* value : entry.second.values()) {
if (value->defining_instruction() != &instruction) continue;
TimeBound live_range = GetLastPosition(*value, definition_end_time);
live_range.start = start_time;
const HloModule& module = *computation->parent();
if (instruction.opcode() == HloOpcode::kParameter &&
computation == module.entry_computation() &&
!module.input_output_alias_config().ParameterHasAlias(
instruction.parameter_number(), value->index())) {
live_range.end = schedule_end_time();
} else {
live_range.end = std::max(live_range.end, GetLastUsageTime(*value));
}
CHECK_LE(live_range.start, live_range.end) << instruction.ToString();
CHECK(buffer_live_ranges_.insert({value, live_range}).second);
}
}
}
}
int64_t HloLiveRange::ComputePeakMemoryMoment() const {
std::vector<std::tuple<int64_t , bool , const HloValue*>>
events;
for (const HloValue* value : alias_analysis_.dataflow_analysis().values()) {
auto it = buffer_live_ranges_.find(value);
if (it != buffer_live_ranges_.end()) {
events.emplace_back(it->second.start, false, value);
events.emplace_back(it->second.end + 1, true, value);
}
}
std::sort(events.begin(), events.end());
int64_t memory_usage = 0;
int64_t peak_usage = 0;
std::optional<int64_t> peak_time;
for (const auto& event : events) {
int64_t time;
bool is_end;
const HloValue* value;
std::tie(time, is_end, value) = event;
auto buffer_size = ShapeUtil::ByteSizeOf(value->instruction()->shape(), 8);
if (is_end) {
memory_usage -= buffer_size;
} else {
memory_usage += buffer_size;
}
if (peak_usage < memory_usage) {
peak_usage = memory_usage;
peak_time = time;
}
}
return peak_time.value_or(0);
}
std::string HloLiveRange::ToString() const {
std::string output;
absl::StrAppendFormat(&output, "HloLiveRange (max %d):\n",
schedule_end_time());
absl::StrAppendFormat(&output, " InstructionSequence:\n");
auto& instructions = flattened_instruction_sequence().instructions();
for (int64_t i = 0; i < instructions.size(); ++i) {
absl::StrAppendFormat(&output, " %d:%s\n", i, instructions[i]->name());
}
absl::StrAppendFormat(&output, " BufferLiveRange:\n");
for (const HloValue* value : alias_analysis_.dataflow_analysis().values()) {
auto it = buffer_live_ranges_.find(value);
if (it != buffer_live_ranges_.end()) {
absl::StrAppendFormat(
&output, " %s%s:%d-%d\n", value->instruction()->name(),
value->index().ToString(), it->second.start, it->second.end);
}
}
int64_t peak_moment = ComputePeakMemoryMoment();
absl::StrAppendFormat(&output, " Live ranges at %lld (peak):\n",
peak_moment);
for (const HloValue* value : alias_analysis_.dataflow_analysis().values()) {
auto it = buffer_live_ranges_.find(value);
if (it != buffer_live_ranges_.end()) {
if (it->second.start <= peak_moment && peak_moment <= it->second.end) {
int64_t bytes = ShapeUtil::ByteSizeOf(value->instruction()->shape(), 8);
absl::StrAppendFormat(&output, " %s: %lld bytes\n",
value->instruction()->name(), bytes);
}
}
}
return output;
}
} | #include "xla/hlo/utils/hlo_live_range.h"
#include <cstdint>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_value.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using TimeBound = HloLiveRange::TimeBound;
class HloLiveRangeTest : public HloTestBase {
protected:
HloLiveRangeTest() : module_(CreateNewVerifiedModule()) {}
~HloLiveRangeTest() override {}
void Analyze(const HloSchedule& schedule) {
alias_analysis_ = HloAliasAnalysis::Run(module_.get()).value();
hlo_live_range_ = HloLiveRange::Run(schedule, *alias_analysis_,
module_->entry_computation())
.value();
}
std::unique_ptr<HloModule> module_;
std::unique_ptr<HloLiveRange> hlo_live_range_;
std::unique_ptr<HloAliasAnalysis> alias_analysis_;
Shape f32scalar_ = ShapeUtil::MakeShape(xla::F32, {});
Shape f32vec4_ = ShapeUtil::MakeShape(F32, {4});
const HloValue* BufferAt(const HloInstruction* instruction,
const ShapeIndex& index) const {
return &alias_analysis_->dataflow_analysis().GetUniqueValueAt(instruction,
index);
}
HloLiveRange::TimeBound LiveRangeAt(const HloInstruction* instruction,
const ShapeIndex& index = {}) const {
auto* value = BufferAt(instruction, index);
return hlo_live_range_->buffer_live_ranges().at(value);
}
void CheckSchedule() const {
const auto& flattened_instructions =
hlo_live_range_->flattened_instruction_sequence().instructions();
EXPECT_EQ(flattened_instructions.size(),
hlo_live_range_->instruction_schedule().size());
for (const auto& inst_and_time : hlo_live_range_->instruction_schedule()) {
EXPECT_EQ(flattened_instructions.at(inst_and_time.second),
inst_and_time.first)
<< "(flattened_inst[" << inst_and_time.second
<< "] = " << flattened_instructions.at(inst_and_time.second)->name()
<< ") != (inst_schedule[" << inst_and_time.second
<< "] = " << inst_and_time.first->name() << ")";
}
}
};
TEST_F(HloLiveRangeTest, Multiply) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(), {paramA, paramX, mul});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 3}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 3}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 3}));
}
TEST_F(HloLiveRangeTest, MultiplyAdd) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(),
{paramA, paramX, mul, paramY, add});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(paramY), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 4}));
EXPECT_EQ(LiveRangeAt(add), TimeBound({4, 5}));
}
TEST_F(HloLiveRangeTest, LiveOutBuffers) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({mul, add}));
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(),
{paramA, paramX, mul, paramY, add, tuple});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 6}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 6}));
EXPECT_EQ(LiveRangeAt(paramY), TimeBound({0, 6}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 6}));
EXPECT_EQ(LiveRangeAt(add), TimeBound({4, 6}));
}
TEST_F(HloLiveRangeTest, InstructionScheduledAfterRoot) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
auto add2 = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({mul, add}));
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(),
{paramA, paramX, mul, paramY, add, tuple, add2});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 7}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 7}));
EXPECT_EQ(LiveRangeAt(paramY), TimeBound({0, 7}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 7}));
EXPECT_EQ(LiveRangeAt(add), TimeBound({4, 7}));
EXPECT_EQ(LiveRangeAt(tuple), TimeBound({5, 7}));
EXPECT_EQ(LiveRangeAt(add2), TimeBound({6, 6}));
}
TEST_F(HloLiveRangeTest, AliasedParameter) {
auto builder = HloComputation::Builder(TestName());
auto paramA = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32vec4_, "paramA"));
auto paramX = builder.AddInstruction(
HloInstruction::CreateParameter(1, f32vec4_, "paramX"));
auto mul = builder.AddInstruction(HloInstruction::CreateBinary(
f32vec4_, HloOpcode::kMultiply, paramA, paramX));
auto paramY = builder.AddInstruction(
HloInstruction::CreateParameter(2, f32vec4_, "paramY"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(f32vec4_, HloOpcode::kAdd, mul, paramY));
module_->AddEntryComputation(builder.Build());
TF_ASSERT_OK(module_->input_output_alias_config().SetUpAlias({}, 0, {}));
HloSchedule schedule(module_.get());
schedule.set_sequence(module_->entry_computation(),
{paramA, paramX, mul, paramY, add});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(paramA), TimeBound({0, 2}));
EXPECT_EQ(LiveRangeAt(paramX), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(paramY), TimeBound({0, 5}));
EXPECT_EQ(LiveRangeAt(mul), TimeBound({2, 4}));
EXPECT_EQ(LiveRangeAt(add), TimeBound({4, 5}));
}
TEST_F(HloLiveRangeTest, While) {
Shape shape = ShapeUtil::MakeShape(xla::F32, {2, 3});
Shape scalar_shape = ShapeUtil::MakeShape(xla::F32, {});
Shape tuple_shape = ShapeUtil::MakeTupleShape({shape, scalar_shape});
auto cond_builder = HloComputation::Builder("WhileCond");
HloInstruction* cond_param = cond_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "cond_param"));
HloInstruction* cond_iter = cond_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, cond_param, 1));
HloInstruction* cond_limit = cond_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(50.f)));
HloInstruction* cond_lt = cond_builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), cond_iter,
cond_limit, ComparisonDirection::kLt));
HloComputation* cond_computation =
module_->AddEmbeddedComputation(cond_builder.Build());
auto body_builder = HloComputation::Builder("WhileBody");
HloInstruction* body_param = body_builder.AddInstruction(
HloInstruction::CreateParameter(0, tuple_shape, "body_param"));
HloInstruction* body_iter = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(scalar_shape, body_param, 1));
HloInstruction* body_data = body_builder.AddInstruction(
HloInstruction::CreateGetTupleElement(shape, body_param, 0));
HloInstruction* body_iter_increment = body_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.f)));
HloInstruction* body_iter_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kAdd, body_iter, body_iter_increment));
HloInstruction* body_data_increment =
body_builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<float>({{1.f, 2.f, 3.f}, {4.f, 5.f, 6.f}})));
HloInstruction* body_data_mul =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kMultiply, body_data, body_data));
HloInstruction* body_data_add =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data, body_data_increment));
HloInstruction* body_data_next =
body_builder.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, body_data_add, body_data_mul));
HloInstruction* body_out = body_builder.AddInstruction(
HloInstruction::CreateTuple({body_data_next, body_iter_next}));
HloComputation* body_computation =
module_->AddEmbeddedComputation(body_builder.Build());
auto builder = HloComputation::Builder(TestName());
HloInstruction* data = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param_iter"));
HloInstruction* iter = builder.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "param_data"));
HloInstruction* tuple =
builder.AddInstruction(HloInstruction::CreateTuple({data, iter}));
HloInstruction* while_op = builder.AddInstruction(HloInstruction::CreateWhile(
tuple_shape, cond_computation, body_computation, tuple));
HloComputation* entry_computation =
module_->AddEntryComputation(builder.Build());
HloSchedule schedule(module_.get());
schedule.set_sequence(cond_computation,
{cond_param, cond_iter, cond_limit, cond_lt});
schedule.set_sequence(body_computation,
{body_param, body_iter, body_data, body_iter_increment,
body_iter_next, body_data_increment, body_data_mul,
body_data_add, body_data_next, body_out});
schedule.set_sequence(entry_computation, {iter, data, tuple, while_op});
Analyze(schedule);
CheckSchedule();
EXPECT_EQ(LiveRangeAt(iter).end, LiveRangeAt(cond_iter).start);
EXPECT_EQ(LiveRangeAt(cond_iter).end, LiveRangeAt(body_iter).start);
EXPECT_EQ(LiveRangeAt(body_iter).end, LiveRangeAt(body_iter_next).start);
}
TEST_F(HloLiveRangeTest, Determinism) {
std::string hlo_string = R"(
HloModule While, is_scheduled=true
%WhileBody {
%body_param = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) parameter(0)
%get-tuple-element.2 = f32[2,3]{1,0} get-tuple-element(%body_param), index=0
%constant.2 = f32[2,3]{1,0} constant({ { 1, 2, 3 }, { 4, 5, 6 } })
%add.1 = f32[2,3]{1,0} add(f32[2,3]{1,0} %get-tuple-element.2, f32[2,3]{1,0} %constant.2)
%multiply = f32[2,3]{1,0} multiply(f32[2,3]{1,0} %get-tuple-element.2, f32[2,3]{1,0} %get-tuple-element.2)
%add.2 = f32[2,3]{1,0} add(f32[2,3]{1,0} %add.1, f32[2,3]{1,0} %multiply)
%get-tuple-element.1 = f32[] get-tuple-element(%body_param), index=1
%constant.1 = f32[] constant(1)
%add = f32[] add(f32[] %get-tuple-element.1, f32[] %constant.1)
%get-tuple-element.3 = f32[2,3]{1,0} get-tuple-element(%body_param), index=2
%add.3 = f32[2,3]{1,0} add(f32[2,3]{1,0} %get-tuple-element.3, f32[2,3]{1,0} %constant.2)
ROOT %tuple = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) tuple(f32[2,3]{1,0} %add.2, f32[] %add, f32[2,3]{1,0} %add.3)
}
%WhileCond {
%cond_param = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) parameter(0)
%get-tuple-element = f32[] get-tuple-element(%cond_param), index=1
%constant = f32[] constant(50)
ROOT %compare = pred[] compare(f32[] %get-tuple-element, f32[] %constant), direction=LT
}
ENTRY %While {
%param_iter = f32[2,3]{1,0} parameter(0)
%param_data = f32[] parameter(1)
%tuple.1 = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) tuple(f32[2,3]{1,0} %param_iter, f32[] %param_data, f32[2,3]{1,0} %param_iter)
%while = (f32[2,3]{1,0}, f32[], f32[2,3]{1,0}) while(%tuple.1), condition=%WhileCond, body=%WhileBody
ROOT %get-tuple-element.4 = f32[2,3]{1,0} get-tuple-element(%while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
const HloSchedule& schedule = module_->schedule();
const int32_t num_runs = 20;
std::vector<std::unique_ptr<HloLiveRange>> hlo_live_ranges;
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(module_.get()).value();
for (int i = 0; i < num_runs; ++i) {
hlo_live_ranges.push_back(HloLiveRange::Run(schedule, *alias_analysis,
module_->entry_computation())
.value());
}
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges_0 = hlo_live_ranges[0]->buffer_live_ranges();
for (const auto& iter : buffer_live_ranges_0) {
for (size_t i = 1; i < num_runs; i++) {
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges_i = hlo_live_ranges[i]->buffer_live_ranges();
auto found_iter = buffer_live_ranges_i.find(iter.first);
EXPECT_TRUE(found_iter != buffer_live_ranges_i.end())
<< "value does not exist: " << iter.first->ToString();
EXPECT_EQ(found_iter->second.start, iter.second.start)
<< "value " << iter.first->ToString()
<< " has different start: " << found_iter->second.start << " vs "
<< iter.second.start;
EXPECT_EQ(found_iter->second.end, iter.second.end)
<< "value " << iter.first->ToString()
<< " has different end: " << found_iter->second.end << " vs "
<< iter.second.end;
}
}
}
TEST_F(HloLiveRangeTest, AsyncCall) {
std::string hlo_string = R"(
HloModule AsyncCall, is_scheduled=true, entry_computation_layout={(f32[4096]{0},f32[4096]{0})->f32[4096]{0}}
%called_computation (param_0: f32[4096], param_1: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
%param_1 = f32[4096]{0} parameter(1)
%negate_2 = f32[4096]{0} negate(f32[4096]{0} %param_0)
%negate_3 = f32[4096]{0} negate(f32[4096]{0} %param_1)
ROOT %result.1 = f32[4096]{0} add(f32[4096]{0} %negate_2, f32[4096]{0} %negate_3)
}
%async_wrapped (async_param: f32[4096], async_param.1: f32[4096]) -> f32[4096] {
%async_param = f32[4096]{0} parameter(0)
%async_param.1 = f32[4096]{0} parameter(1)
ROOT %call = f32[4096]{0} call(f32[4096]{0} %async_param, f32[4096]{0} %async_param.1), to_apply=%called_computation
}
ENTRY %main (a: f32[4096], b: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} parameter(1)
%negate_0 = f32[4096]{0} negate(f32[4096]{0} %a)
%negate_1 = f32[4096]{0} negate(f32[4096]{0} %b)
%async-start = ((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) async-start(f32[4096]{0} %negate_0, f32[4096]{0} %negate_1), calls=%async_wrapped
%add_0 = f32[4096]{0} add(f32[4096]{0} %negate_0, f32[4096]{0} %negate_1)
%async-done = f32[4096]{0} async-done(((f32[4096]{0}, f32[4096]{0}), f32[4096]{0}, u32[]) %async-start)
ROOT %add_1 = f32[4096]{0} add(f32[4096]{0} %add_0, f32[4096]{0} %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
const HloSchedule& schedule = module_->schedule();
Analyze(schedule);
CheckSchedule();
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> aa,
HloAliasAnalysis::Run(module_.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module_->schedule(), *aa,
module_->entry_computation()));
absl::flat_hash_map<std::string, std::pair<int32_t, int32_t>> inst_ranges;
for (auto& [value, time_bound] : hlo_live_range->buffer_live_ranges()) {
inst_ranges[value->instruction()->name()] = {time_bound.start,
time_bound.end};
}
EXPECT_EQ(inst_ranges["a"], std::make_pair(0, 16));
EXPECT_EQ(inst_ranges["b"], std::make_pair(0, 16));
EXPECT_EQ(inst_ranges["add_0"], std::make_pair(13, 15));
EXPECT_EQ(inst_ranges["add_1"], std::make_pair(15, 16));
EXPECT_EQ(inst_ranges["negate_0"], std::make_pair(2, 14));
EXPECT_EQ(inst_ranges["negate_1"], std::make_pair(3, 14));
}
TEST_F(HloLiveRangeTest, Call) {
std::string hlo_string = R"(
HloModule Call, is_scheduled=true
%called_computation (param_0: f32[4096]) -> f32[4096] {
%param_0 = f32[4096]{0} parameter(0)
ROOT %negate_0 = f32[4096]{0} negate(f32[4096]{0} %param_0)
}
ENTRY %main (a: f32[4096]) -> f32[4096] {
%a = f32[4096]{0} parameter(0)
%b = f32[4096]{0} negate(%a)
%c = f32[4096]{0} call(%b), to_apply=%called_computation
%d = f32[4096]{0} negate(%c)
ROOT %e = f32[4096]{0} add(%c, %d)
})";
TF_ASSERT_OK_AND_ASSIGN(module_, ParseAndReturnVerifiedModule(hlo_string));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloAliasAnalysis> aa,
HloAliasAnalysis::Run(module_.get()));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(module_->schedule(), *aa,
module_->entry_computation()));
absl::flat_hash_map<std::string, std::pair<int32_t, int32_t>> inst_ranges;
for (auto& [value, time_bound] : hlo_live_range->buffer_live_ranges()) {
inst_ranges[value->instruction()->name()] = {time_bound.start,
time_bound.end};
}
EXPECT_EQ(inst_ranges["a"], std::make_pair(0, 7));
EXPECT_EQ(inst_ranges["b"], std::make_pair(1, 3));
EXPECT_EQ(inst_ranges["negate_0"], std::make_pair(3, 6));
EXPECT_EQ(inst_ranges["d"], std::make_pair(5, 6));
EXPECT_EQ(inst_ranges["e"], std::make_pair(6, 7));
}
}
} | 2,166 |
#ifndef XLA_HLO_UTILS_HLO_QUERY_H_
#define XLA_HLO_UTILS_HLO_QUERY_H_
#include <cstdint>
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
namespace xla {
namespace hlo_query {
bool IsCollectiveCommunicationOp(HloOpcode op);
bool IsAsyncCollectiveStartOp(const HloInstruction* instruction,
bool include_send_recv = false);
bool IsAsyncCollectiveDoneOp(const HloInstruction* instruction,
bool include_send_recv = false);
bool IsConstantR0F32(HloInstruction* instruction, float* out);
bool AllOperandsAreParametersOrConstants(const HloInstruction& instruction);
bool AllOperandsAreParametersOrConstantsWithSingleUser(
const HloInstruction& instruction);
bool AllOperandsAreParameters(const HloInstruction& instruction);
bool AllOperandsAreConstants(const HloInstruction& instruction);
bool IsScalarConstant(const HloInstruction* instruction);
bool IsBroadcastedConstantOrScalar(const HloInstruction& instr);
bool IsBroadcastOfScalarConstant(const HloInstruction& instr);
bool IsBroadcastOfParameter(const HloInstruction& instr);
HloInstruction* GetFirstInstructionWithOpcode(const HloComputation& computation,
HloOpcode opcode);
template <typename Fn>
void ForEachInstructionWithOpcode(HloComputation& computation, HloOpcode opcode,
Fn&& fn) {
for (HloInstruction* instr : computation.instructions()) {
if (instr->opcode() == opcode) {
fn(instr);
}
}
}
template <typename Fn>
void ForEachInstructionWithOpcode(HloModule& module, HloOpcode opcode,
Fn&& fn) {
for (HloComputation* computation : module.computations()) {
ForEachInstructionWithOpcode(*computation, opcode, fn);
}
}
bool ContainsInstrWithOpcode(const HloComputation* comp,
const absl::flat_hash_set<HloOpcode>& opcodes);
HloInstruction* GetMatchingOperand(const HloPredicate& matcher,
HloInstruction* instruction);
bool MatchBinaryInstructionOperand(const HloPredicate& matcher,
HloInstruction* instruction,
HloInstruction** matching_operand,
HloInstruction** other_operand);
bool MatchBinaryInstructionOperandOpcode(HloOpcode opcode,
HloInstruction* instruction,
HloInstruction** matching_operand,
HloInstruction** other_operand);
bool ContainsLayoutConstrainedCollective(const HloModule& module, HloOpcode op);
inline bool ContainsLayoutConstrainedAllReduce(const HloModule& module) {
return ContainsLayoutConstrainedCollective(module, HloOpcode::kAllReduce);
}
int64_t NextChannelId(const HloModule& module);
bool HasX64TransformedHostTransfer(const HloModule& module);
HloInstruction* GetUniqueGteInstruction(const HloInstruction* operand,
int64_t index);
}
}
#endif
#include "xla/hlo/utils/hlo_query.h"
#include <algorithm>
#include <cstdint>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/service/pattern_matcher.h"
#include "xla/shape_util.h"
namespace xla {
namespace hlo_query {
bool IsCollectiveCommunicationOp(HloOpcode op) {
return op == HloOpcode::kAllReduce || op == HloOpcode::kAllGather ||
op == HloOpcode::kAllToAll || op == HloOpcode::kCollectivePermute ||
op == HloOpcode::kCollectiveBroadcast ||
op == HloOpcode::kReduceScatter || op == HloOpcode::kAllReduceStart ||
op == HloOpcode::kAllGatherStart ||
op == HloOpcode::kCollectivePermuteStart;
}
bool IsAsyncCollectiveStartOp(const HloInstruction* instruction,
bool include_send_recv) {
HloOpcode op = instruction->opcode();
if (op == HloOpcode::kAsyncStart) {
return IsCollectiveCommunicationOp(instruction->async_wrapped_opcode());
}
return op == HloOpcode::kAllReduceStart || op == HloOpcode::kAllGatherStart ||
op == HloOpcode::kCollectivePermuteStart ||
(include_send_recv &&
(op == HloOpcode::kSend || op == HloOpcode::kRecv));
}
bool IsAsyncCollectiveDoneOp(const HloInstruction* instruction,
bool include_send_recv) {
HloOpcode op = instruction->opcode();
if (op == HloOpcode::kAsyncDone) {
return IsCollectiveCommunicationOp(instruction->async_wrapped_opcode());
}
return op == HloOpcode::kAllReduceDone || op == HloOpcode::kAllGatherDone ||
op == HloOpcode::kCollectivePermuteDone ||
(include_send_recv &&
(op == HloOpcode::kSendDone || op == HloOpcode::kRecvDone));
}
bool IsConstantR0F32(HloInstruction* instruction, float* out) {
if (instruction->opcode() == HloOpcode::kConstant &&
ShapeUtil::IsScalarWithElementType(instruction->shape(), F32)) {
*out = instruction->literal().Get<float>({});
return true;
}
return false;
}
bool AllOperandsAreParametersOrConstants(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter &&
operand->opcode() != HloOpcode::kConstant) {
return false;
}
}
return true;
}
bool AllOperandsAreParametersOrConstantsWithSingleUser(
const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter &&
operand->opcode() != HloOpcode::kConstant) {
return false;
}
if (operand->user_count() > 1) {
return false;
}
}
return true;
}
bool AllOperandsAreParameters(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kParameter) {
return false;
}
}
return true;
}
bool AllOperandsAreConstants(const HloInstruction& instruction) {
for (const auto& operand : instruction.operands()) {
if (operand->opcode() != HloOpcode::kConstant) {
return false;
}
}
return true;
}
HloInstruction* GetMatchingOperand(const HloPredicate& matcher,
HloInstruction* instruction) {
for (HloInstruction* op : instruction->operands()) {
if (matcher(op)) {
return op;
}
}
return nullptr;
}
bool MatchBinaryInstructionOperand(const HloPredicate& matcher,
HloInstruction* instruction,
HloInstruction** matching_operand,
HloInstruction** other_operand) {
CHECK_EQ(instruction->operand_count(), 2);
if (matcher(instruction->operand(0))) {
*matching_operand = instruction->mutable_operand(0);
*other_operand = instruction->mutable_operand(1);
return true;
}
if (matcher(instruction->operand(1))) {
*matching_operand = instruction->mutable_operand(1);
*other_operand = instruction->mutable_operand(0);
return true;
}
return false;
}
bool MatchBinaryInstructionOperandOpcode(HloOpcode opcode,
HloInstruction* instruction,
HloInstruction** matching_operand,
HloInstruction** other_operand) {
return MatchBinaryInstructionOperand(
[opcode](const HloInstruction* instruction) {
return instruction->opcode() == opcode;
},
instruction, matching_operand, other_operand);
}
bool IsScalarConstant(const HloInstruction* instruction) {
return instruction->IsConstant() && ShapeUtil::IsScalar(instruction->shape());
}
bool IsBroadcastedConstantOrScalar(const HloInstruction& instr) {
return instr.IsConstant() || ShapeUtil::IsScalar(instr.shape()) ||
(HloOpcode::kBroadcast == instr.opcode() &&
(instr.operand(0)->IsConstant() ||
ShapeUtil::IsScalar(instr.operand(0)->shape())));
}
bool IsBroadcastOfScalarConstant(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kBroadcast &&
IsScalarConstant(instr.operand(0));
}
bool IsBroadcastOfParameter(const HloInstruction& instr) {
return instr.opcode() == HloOpcode::kBroadcast &&
instr.operand(0)->opcode() == HloOpcode::kParameter;
}
HloInstruction* GetFirstInstructionWithOpcode(const HloComputation& computation,
const HloOpcode opcode) {
auto instructions = computation.instructions();
auto it = absl::c_find_if(instructions, [&](HloInstruction* instr) {
return instr->opcode() == opcode;
});
return it == instructions.end() ? nullptr : *it;
}
bool ContainsInstrWithOpcode(const HloComputation* comp,
const absl::flat_hash_set<HloOpcode>& opcodes) {
for (const auto* instr : comp->instructions()) {
if (opcodes.count(instr->opcode())) {
return true;
}
for (const HloComputation* subcomp : instr->called_computations()) {
if (ContainsInstrWithOpcode(subcomp, opcodes)) {
return true;
}
}
}
return false;
}
bool ContainsLayoutConstrainedCollective(const HloModule& module,
HloOpcode op) {
CHECK(IsCollectiveCommunicationOp(op));
for (auto computation : module.computations()) {
for (auto hlo : computation->instructions()) {
if (hlo->opcode() == op &&
DynCast<HloCollectiveInstruction>(hlo)->constrain_layout()) {
return true;
}
}
}
return false;
}
int64_t NextChannelId(const HloModule& module) {
int64_t next_channel_id = 1;
for (const HloComputation* comp : module.computations()) {
for (const HloInstruction* hlo : comp->instructions()) {
const HloChannelInstruction* channel_instr =
DynCast<HloChannelInstruction>(hlo);
if (channel_instr && channel_instr->channel_id()) {
next_channel_id =
std::max(next_channel_id, *channel_instr->channel_id() + 1);
}
}
}
return next_channel_id;
}
bool HasX64TransformedHostTransfer(const HloModule& module) {
for (auto computation : module.computations()) {
for (auto hlo : computation->instructions()) {
if (hlo->opcode() == HloOpcode::kSend) {
auto send = DynCast<HloSendInstruction>(hlo);
if (send->is_host_transfer() && send->operand(0)->shape().IsTuple()) {
return true;
}
} else if (hlo->opcode() == HloOpcode::kRecv) {
auto recv = DynCast<HloRecvInstruction>(hlo);
if (recv->is_host_transfer() &&
recv->shape().tuple_shapes(0).IsTuple()) {
return true;
}
}
}
}
return false;
}
HloInstruction* GetUniqueGteInstruction(const HloInstruction* operand,
int64_t index) {
HloInstruction* gte = nullptr;
for (HloInstruction* instr : operand->parent()->MakeInstructionPostOrder()) {
if (!Match(instr, match::GetTupleElement().WithTupleIndex(index))) {
continue;
}
if (instr->operand(0) != operand) {
continue;
}
if (gte != nullptr) {
return nullptr;
}
gte = instr;
}
return gte;
}
}
} | #include "xla/hlo/utils/hlo_query.h"
#include <memory>
#include <gtest/gtest.h>
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_parser.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using HloQueryTest = HloTestBase;
template <typename Hlo>
int CountInstructions(Hlo& module, HloOpcode opcode) {
int counter = 0;
hlo_query::ForEachInstructionWithOpcode(
module, opcode, [&counter](auto& instr) { counter++; });
return counter;
}
TEST_F(HloQueryTest,
GetInstructionWithOpCodeReturnsMatchingInstructionForModule) {
constexpr absl::string_view kHloString = R"(
HloModule m
computation.0 {
param.0 = f32[32]{0} parameter(0)
ROOT _ = f32[32]{0} rsqrt(param.0)
}
ENTRY main {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
add.1 = f32[32]{0} add(param.1,param.2)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
mul.1 = f32[32]{0} multiply(param.1,param.2)
mul.2 = f32[32]{0} multiply(param.2,param.3)
comp.0 = call(param.0), to_apply=computation.0
ROOT _ = (f32[32],f32[32],f32[32],f32[32],f32[32],f32[32],f32[32]) tuple(comp.0,add.0,add.1,sub.0,mul.0,mul.1,mul.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
EXPECT_EQ(CountInstructions(*module, HloOpcode::kAdd), 2);
EXPECT_EQ(CountInstructions(*module, HloOpcode::kSubtract), 1);
EXPECT_EQ(CountInstructions(*module, HloOpcode::kMultiply), 3);
}
TEST_F(HloQueryTest,
GetInstructionWithOpCodeReturnsMatchingInstructionForComputation) {
constexpr absl::string_view kHloString = R"(
HloModule m
computation.0 {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
add.1 = f32[32]{0} add(param.1,param.2)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
mul.1 = f32[32]{0} multiply(param.1,param.2)
ROOT mul.2 = f32[32]{0} multiply(param.2,param.3)
}
ENTRY main {
param.0 = f32[32]{0} parameter(0)
param.1 = f32[32]{0} parameter(1)
param.2 = f32[32]{0} parameter(2)
param.3 = f32[32]{0} parameter(3)
add.0 = f32[32]{0} add(param.0,param.1)
sub.0 = f32[32]{0} subtract(param.0,param.1)
mul.0 = f32[32]{0} multiply(param.0,param.1)
comp.0 = f32[32]{0} call(param.0,param.1,param.2), to_apply=computation.0
ROOT _ = (f32[32],f32[32],f32[32],f32[32]) tuple(add.0,sub.0,mul.0,comp.0)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
HloComputation* computation = module->GetComputationWithName("computation.0");
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kAdd), 2);
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kSubtract), 1);
EXPECT_EQ(CountInstructions(*computation, HloOpcode::kMultiply), 3);
}
TEST_F(HloQueryTest, GetUniqueGteTest) {
constexpr absl::string_view kHloString = R"(
HloModule m
ENTRY main {
param.0 = (f32[32]{0}, f32[32]{0}, f32[32]{0}, f32[32]{0}) parameter(0)
gte1 = f32[32]{0} get-tuple-element(param.0), index=0
gte2 = f32[32]{0} get-tuple-element(param.0), index=1
dup_gte2 = f32[32]{0} get-tuple-element(param.0), index=1
gte3 = f32[32]{0} get-tuple-element(param.0), index=2
ROOT gte4 = f32[32]{0} get-tuple-element(param.0), index=3
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloString));
HloInstruction* param = module->entry_computation()->parameter_instruction(0);
HloInstruction* gte1 = hlo_query::GetUniqueGteInstruction(param, 0);
EXPECT_NE(gte1, nullptr);
HloInstruction* gte2 = hlo_query::GetUniqueGteInstruction(param, 1);
EXPECT_EQ(gte2, nullptr);
}
}
} | 2,167 |
#ifndef XLA_HLO_EXPERIMENTAL_AUTO_SHARDING_AUTO_SHARDING_SOLVER_H_
#define XLA_HLO_EXPERIMENTAL_AUTO_SHARDING_AUTO_SHARDING_SOLVER_H_
#include <string>
#include <vector>
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding.pb.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "ortools/linear_solver/linear_solver.h"
namespace xla {
namespace spmd {
struct AutoShardingSolverOutput {
std::vector<NodeStrategyIdx> s_val;
std::vector<EdgeStrategyIdx> e_val;
double cost = -1.0;
absl::flat_hash_set<LivenessIdx> peak_times;
bool operator==(const AutoShardingSolverOutput& other) const;
};
struct AutoShardingSolverResult {
public:
AutoShardingSolverResult(absl::StatusOr<AutoShardingSolverOutput> status,
bool skip_auto_sharding)
: status(status), skip_auto_sharding(skip_auto_sharding) {}
bool operator==(const AutoShardingSolverResult& other) const;
absl::StatusOr<AutoShardingSolverOutput> status;
bool skip_auto_sharding;
};
AutoShardingSolverResult CallORToolsSolver(
const AutoShardingSolverRequest& request);
enum AutoShardingViolationCode {
kAliasViolationCode,
kFollowerViolationCode,
kInfiniteCostViolationCode,
kMemoryViolationCode,
kMaxDeparturesViolationCode,
};
struct CostComponents {
double communication_cost = 0.0;
double computation_cost = 0.0;
double resharding_cost = 0.0;
double overbudget_cost = 0.0;
double makespan_cost = 0.0;
double cost() const;
bool operator==(const CostComponents& other) const;
};
struct AutoShardingEvaluation {
absl::flat_hash_set<AutoShardingViolationCode> violation_codes;
CostComponents total;
CostComponents lower_bound;
double total_departures = 0.0;
double total_makespan = 0.0;
bool operator==(const AutoShardingEvaluation& other) const;
};
AutoShardingEvaluation Evaluate(const AutoShardingSolverRequest& request,
const AutoShardingSolverResult& result);
std::vector<std::string> Rationalize(const AutoShardingSolverRequest& request,
const AutoShardingSolverResult& result,
const AutoShardingSolverResult& subopt);
operations_research::MPVariable* CreateMakespanVar(
const AutoShardingSolverRequest& request,
const std::vector<std::vector<operations_research::MPVariable*>>& e,
operations_research::MPSolver& solver);
double EvaluateMakespan(const AutoShardingSolverRequest& request,
const AutoShardingSolverResult& result,
AutoShardingEvaluation& evaluation);
AutoShardingSolverRequest ScaleRequest(
const AutoShardingSolverRequest& request);
bool CheckDominance(const AutoShardingSolverRequest& request,
const std::vector<EdgeIdx>& src_edges,
const std::vector<EdgeIdx>& dst_edges,
const std::vector<AliasIdx>& src_aliases,
const std::vector<AliasIdx>& dst_aliases, NodeIdx node_idx,
NodeStrategyIdx first, NodeStrategyIdx second);
class StrategyShaver {
public:
explicit StrategyShaver(const AutoShardingSolverRequest& request);
NodeStrategies FindShavedStrategies() const;
private:
const AutoShardingSolverRequest& request_;
std::vector<std::vector<EdgeIdx>> src_edge_map_;
std::vector<std::vector<EdgeIdx>> dst_edge_map_;
std::vector<std::vector<AliasIdx>> src_alias_map_;
std::vector<std::vector<AliasIdx>> dst_alias_map_;
std::vector<std::vector<NodeIdx>> followers_;
};
absl::Status ValidateRequest(const AutoShardingSolverRequest& request);
}
}
#endif
#include "xla/hlo/experimental/auto_sharding/auto_sharding_solver.h"
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <functional>
#include <limits>
#include <memory>
#include <optional>
#include <string>
#include <string_view>
#include <utility>
#include <vector>
#include "absl/container/btree_set.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding.pb.h"
#ifdef PLATFORM_GOOGLE
#include "file/base/options.h"
#endif
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/fingerprint.h"
#include "tsl/platform/hash.h"
#include "tsl/platform/types.h"
#include "ortools/linear_solver/linear_solver.h"
#include "ortools/linear_solver/linear_solver.pb.h"
#ifdef PLATFORM_GOOGLE
#include "file/base/helpers.h"
#include "util/task/status.pb.h"
#endif
namespace xla {
namespace spmd {
using ::operations_research::MPConstraint;
using ::operations_research::MPSolver;
using ::operations_research::MPVariable;
constexpr double kMaxCostEpsilon = 1.0001;
bool AutoShardingSolverOutput::operator==(
const AutoShardingSolverOutput& other) const {
return s_val == other.s_val && e_val == other.e_val && cost == other.cost &&
peak_times == other.peak_times;
}
bool AutoShardingSolverResult::operator==(
const AutoShardingSolverResult& other) const {
return status == other.status &&
skip_auto_sharding == other.skip_auto_sharding;
}
void PrintLargestInstructions(
const std::vector<NodeStrategyIdx>& chosen_strategy,
const AutoShardingSolverRequest& request) {
if (!request.node_intervals().empty()) return;
std::vector<std::pair<LivenessIdx, double>> time_memory_usage;
for (LivenessIdx time_idx = 0; time_idx < request.live_size(); ++time_idx) {
double mem = 0.0;
for (NodeIdx node_idx : request.live(time_idx).nodes()) {
mem += request.memory_costs(node_idx).costs(chosen_strategy[node_idx]);
}
time_memory_usage.push_back({time_idx, mem});
}
struct {
bool operator()(std::pair<LivenessIdx, double> a,
std::pair<LivenessIdx, double> b) const {
return a.second > b.second;
}
} MemLarger;
std::sort(time_memory_usage.begin(), time_memory_usage.end(), MemLarger);
LOG(INFO) << "using m[] and L[], max memory usage: "
<< time_memory_usage.front().second / (1024 * 1024 * 1024)
<< " GB at time " << time_memory_usage.front().first;
size_t k = 3;
k = std::min(k, time_memory_usage.size());
std::vector<std::pair<NodeIdx, double>> instruction_mem;
absl::flat_hash_set<NodeIdx> instruction_set;
for (auto usage_idx = 0; usage_idx < k; ++usage_idx) {
LivenessIdx time_idx = time_memory_usage.at(usage_idx).first;
for (NodeIdx node_idx : request.live(time_idx).nodes()) {
double mem =
request.memory_costs(node_idx).costs(chosen_strategy[node_idx]);
if (mem > 100 * 1024 * 1024 &&
instruction_set.find(node_idx) == instruction_set.end()) {
instruction_mem.push_back({node_idx, mem});
instruction_set.insert(node_idx);
}
}
}
std::sort(instruction_mem.begin(), instruction_mem.end(), MemLarger);
size_t top_tensors = 10;
top_tensors = std::min(top_tensors, instruction_mem.size());
VLOG(1) << "Top " << top_tensors << " largest tensors:";
for (size_t i = 0; i < top_tensors; ++i) {
VLOG(1) << "instruction name: "
<< request.instruction_names(instruction_mem.at(i).first)
<< " memory usage: "
<< instruction_mem.at(i).second / (1024 * 1024 * 1024) << "GB";
}
}
void AddSalt(const std::string& name, const double saltiplier, double* coeff) {
if (saltiplier <= 0.0) return;
const tsl::uint64 hash = tsl::Hash64(name);
double salt = saltiplier * hash / std::numeric_limits<tsl::uint64>::max();
*coeff = *coeff * (1.0 + salt) + salt;
}
AutoShardingSolverResult SolveAndExtractSolution(
const AutoShardingSolverRequest& request,
const std::vector<std::vector<MPVariable*>>& s,
const std::vector<std::vector<MPVariable*>>& e,
const MPVariable* overbudget_var, const MPVariable* makespan_var,
MPSolver& solver);
double MinimumMemoryBudgetRequired(const AutoShardingSolverRequest& request) {
double min_memory_budget_required_estimate = 0.0;
for (LivenessIdx time_idx = 0; time_idx < request.live_size(); ++time_idx) {
double min_memory_budget_required_estimate_local = 0.0;
for (NodeIdx node_idx : request.live(time_idx).nodes()) {
const auto& m = request.memory_costs(node_idx).costs();
const double fixed_memory_cost = *std::min_element(m.begin(), m.end());
min_memory_budget_required_estimate_local += fixed_memory_cost;
}
min_memory_budget_required_estimate =
std::max(min_memory_budget_required_estimate,
min_memory_budget_required_estimate_local);
}
return min_memory_budget_required_estimate;
}
double MaxCoeff(
const tsl::protobuf::RepeatedPtrField<AutoShardingSolverRequest_Costs>&
cost_mat) {
double max_coeff = 0.0;
for (auto& costs : cost_mat) {
for (auto& cost : costs.costs()) {
if (cost < kInfinityCost) {
max_coeff = std::max(max_coeff, cost);
}
}
}
return max_coeff;
}
void ScaleCoeffs(
double scaling_factor,
tsl::protobuf::RepeatedPtrField<AutoShardingSolverRequest_Costs>*
cost_mat) {
for (auto& costs : *cost_mat) {
for (auto& cost : *costs.mutable_costs()) {
if (cost < kInfinityCost) {
cost = floor(cost * scaling_factor);
}
}
}
}
AutoShardingSolverRequest ScaleRequest(
const AutoShardingSolverRequest& request) {
if (!request.has_coeff_limit()) return request;
VLOG(0) << "Scaling request by coefficient limit: "
<< request.coeff_limit().coeff();
double max_coeff = 0.0;
max_coeff = std::max(max_coeff, MaxCoeff(request.communication_costs()));
max_coeff = std::max(max_coeff, MaxCoeff(request.computation_costs()));
max_coeff = std::max(max_coeff, MaxCoeff(request.resharding_costs()));
if (max_coeff <= request.coeff_limit().coeff()) return request;
const double scaling_factor = request.coeff_limit().coeff() / max_coeff;
AutoShardingSolverRequest scaled_request = request;
ScaleCoeffs(scaling_factor, scaled_request.mutable_communication_costs());
ScaleCoeffs(scaling_factor, scaled_request.mutable_computation_costs());
ScaleCoeffs(scaling_factor, scaled_request.mutable_resharding_costs());
return scaled_request;
}
std::optional<std::pair<int64_t, int64_t>> ReduceMemoryTerms(
const AutoShardingSolverRequest& request, MPSolver& solver,
int64_t num_lives, int64_t num_primitives,
const std::function<
tsl::protobuf::RepeatedField<int64_t>(int64_t)>&
live,
const tsl::protobuf::RepeatedPtrField<
AutoShardingSolverRequest_Pair>& intervals,
const tsl::protobuf::RepeatedPtrField<
AutoShardingSolverRequest_Group>& groups,
const tsl::protobuf::RepeatedPtrField<
AutoShardingSolverRequest_Costs>& memory_costs,
std::string_view prim_type,
std::vector<std::vector<MPVariable*>>& prim_vars,
std::vector<std::pair<int64_t, int64_t>>& reduced_intervals,
std::vector<MPVariable*>& group_vars,
absl::flat_hash_set<int64_t>& reduced_times) {
std::optional<std::pair<int64_t, int64_t>> num_terms = std::nullopt;
std::vector<absl::btree_set<int64_t>> reduced_groups;
if (groups.empty()) {
for (const auto& interval : intervals) {
if (interval.first() > interval.second()) continue;
num_lives = std::max(num_lives, interval.second() + 1);
}
auto Intervals =
[intervals](int64_t prim_idx) -> std::pair<int64_t, int64_t> {
return {intervals.at(prim_idx).first(), intervals.at(prim_idx).second()};
};
MemoryTermReducer reducer;
num_terms =
intervals.empty()
? reducer.Reduce(num_lives, num_primitives, live)
: reducer.Reduce(num_lives, num_primitives, std::move(Intervals));
reduced_intervals = reducer.GetReducedIntervals();
reduced_groups = reducer.GetReducedGroups();
} else {
for (const auto& interval : intervals) {
reduced_intervals.push_back({interval.first(), interval.second()});
}
for (const auto& group : groups) {
reduced_groups.push_back({group.prims().begin(), group.prims().end()});
}
}
solver.MakeIntVarArray(reduced_groups.size(), 0.0, MPSolver::infinity(),
absl::StrCat("group_", prim_type), &group_vars);
for (int64_t group_idx = 0; group_idx < group_vars.size(); ++group_idx) {
MPConstraint* constraint = solver.MakeRowConstraint(
-MPSolver::infinity(), 0.0,
absl::StrCat("group_", prim_type, "[", group_idx, "]"));
constraint->SetCoefficient(group_vars[group_idx], -1.0);
for (const int64_t prim_idx : reduced_groups[group_idx]) {
for (int64_t j = 0; j < prim_vars[prim_idx].size(); ++j) {
double memory_cost = memory_costs.at(prim_idx).costs(j);
memory_cost /= request.memory_budget() / 100.0;
const double accumulated_coefficient =
constraint->GetCoefficient(prim_vars[prim_idx][j]);
constraint->SetCoefficient(prim_vars[prim_idx][j],
accumulated_coefficient + memory_cost);
}
}
}
const absl::flat_hash_set<int64_t> times = MemoryTermReducer::GetReducedTimes(
num_primitives, reduced_intervals, reduced_groups);
reduced_times.insert(times.begin(), times.end());
return num_terms;
}
void AddMemoryTerms(
const AutoShardingSolverRequest& request, MPSolver& solver,
int64_t num_primitives,
const std::vector<std::pair<int64_t, int64_t>>& intervals,
const tsl::protobuf::RepeatedPtrField<
AutoShardingSolverRequest_Costs>& memory_costs,
const MPVariable* overbudget_var,
const absl::flat_hash_set<int64_t>& reduced_times,
std::vector<std::vector<MPVariable*>>& prim_vars,
std::vector<MPVariable*>& group_vars,
absl::flat_hash_map<LivenessIdx, MPConstraint*>& constraints) {
for (int64_t prim_idx = 0; prim_idx < intervals.size(); ++prim_idx) {
for (int64_t time_idx = intervals[prim_idx].first;
time_idx <= intervals[prim_idx].second; ++time_idx) {
if (!reduced_times.contains(time_idx)) continue;
if (!constraints.contains(time_idx)) {
MPConstraint* constraint = solver.MakeRowConstraint(
-MPSolver::infinity(), 100.0, absl::StrCat("mem[", time_idx, "]"));
if (overbudget_var) constraint->SetCoefficient(overbudget_var, -100.0);
constraints[time_idx] = constraint;
}
MPConstraint* constraint = constraints[time_idx];
if (prim_idx >= num_primitives) {
constraint->SetCoefficient(group_vars[prim_idx - num_primitives], 1.0);
continue;
}
for (int64_t j = 0; j < prim_vars[prim_idx].size(); ++j) {
double memory_cost = memory_costs.at(prim_idx).costs(j);
memory_cost /= request.memory_budget() / 100.0;
const double accumulated_coefficient =
constraint->GetCoefficient(prim_vars[prim_idx][j]);
constraint->SetCoefficient(prim_vars[prim_idx][j],
accumulated_coefficient + memory_cost);
}
}
}
}
AutoShardingSolverResult CallORToolsSolver(
const AutoShardingSolverRequest& unscaled_request) {
const absl::Time start_time = absl::Now();
const AutoShardingSolverRequest& request = ScaleRequest(unscaled_request);
const size_t num_edges = request.edges_size();
const int num_workers = 32;
std::unique_ptr<MPSolver> solver(std::make_unique<MPSolver>("", MPSolver::SAT_INTEGER_PROGRAMMING));
CHECK(solver);
solver->MutableObjective()->SetMinimization();
std::string solver_parameter_str;
#ifdef PLATFORM_GOOGLE
if (solver->ProblemType() ==
operations_research::MPSolver::SAT_INTEGER_PROGRAMMING) {
solver_parameter_str =
request.deterministic_mode()
? absl::StrCat(
"share_binary_clauses:false,random_seed:1,interleave_"
"search:true,num_workers:",
num_workers)
: absl::StrCat("num_workers:", num_workers);
solver->SetSolverSpecificParametersAsString(solver_parameter_str);
}
#endif
std::vector<std::vector<MPVariable*>> s(request.num_nodes());
std::vector<std::vector<MPVariable*>> e(num_edges);
MPVariable* overbudget_var = nullptr;
MPVariable* makespan_var = nullptr;
size_t unique_nodes = 0;
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (request.s_follow(node_idx) < 0) {
unique_nodes += 1;
solver->MakeBoolVarArray(request.s_len(node_idx),
absl::StrCat("s[", node_idx, "]"), &s[node_idx]);
}
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (request.s_follow(node_idx) >= 0) {
CHECK_EQ(request.s_len(node_idx),
request.s_len(request.s_follow(node_idx)));
s[node_idx] = s[request.s_follow(node_idx)];
}
}
size_t unique_edges = 0;
std::vector<EdgeIdx> e_follow(num_edges, -1);
absl::flat_hash_map<std::pair<NodeIdx, NodeIdx>, EdgeIdx> edge_map;
for (EdgeIdx edge_idx = 0; edge_idx < num_edges; ++edge_idx) {
const auto& raw_edge = request.edges(edge_idx);
const std::pair<NodeIdx, NodeIdx> edge(raw_edge.first(), raw_edge.second());
auto followed_edge = edge;
if (int f = request.s_follow(edge.first); f >= 0) followed_edge.first = f;
if (int f = request.s_follow(edge.second); f >= 0) followed_edge.second = f;
if (const auto& it = edge_map.find(followed_edge); it != edge_map.end()) {
e[edge_idx] = e[it->second];
e_follow[edge_idx] = it->second;
continue;
}
unique_edges += 1;
solver->MakeBoolVarArray(
request.s_len(edge.first) * request.s_len(edge.second),
absl::StrCat("e[", edge.first, ",", edge.second, "]"), &e[edge_idx]);
edge_map.insert({followed_edge, edge_idx});
}
if (request.memory_budget() > 0 && request.has_overbudget_coeff()) {
overbudget_var =
solver->MakeNumVar(0.0, MPSolver::infinity(), "overbudget");
}
if (request.has_makespan_coeff()) {
makespan_var = CreateMakespanVar(request, e, *solver);
}
absl::flat_hash_set<MPVariable*> infinity_vars;
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
double accumulated_coefficient =
solver->MutableObjective()->GetCoefficient(s[node_idx][j]);
double coefficient = request.computation_costs(node_idx).costs(j) +
request.communication_costs(node_idx).costs(j);
if (coefficient >= kInfinityCost) {
infinity_vars.insert(s[node_idx][j]);
continue;
}
AddSalt(absl::StrCat(node_idx, "S", j), request.saltiplier(),
&coefficient);
solver->MutableObjective()->SetCoefficient(
s[node_idx][j], accumulated_coefficient + coefficient);
}
}
for (EdgeIdx edge_idx = 0; edge_idx < num_edges; ++edge_idx) {
for (EdgeStrategyIdx j = 0; j < e[edge_idx].size(); ++j) {
double accumulated_coefficient =
solver->MutableObjective()->GetCoefficient(e[edge_idx][j]);
double coefficient = request.resharding_costs(edge_idx).costs(j);
if (coefficient >= kInfinityCost) {
infinity_vars.insert(e[edge_idx][j]);
continue;
}
AddSalt(absl::StrCat(edge_idx, "E", j), request.saltiplier(),
&coefficient);
solver->MutableObjective()->SetCoefficient(
e[edge_idx][j], accumulated_coefficient + coefficient);
}
}
LOG(INFO) << "Number of infinity terms: " << infinity_vars.size();
const NodeStrategies shaved_strategies =
StrategyShaver(request).FindShavedStrategies();
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (s[node_idx].empty() || request.s_follow(node_idx) >= 0) continue;
bool all_infinity = true;
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
if (infinity_vars.contains(s[node_idx][j]) ||
shaved_strategies.contains({node_idx, j})) {
MPConstraint* constraint = solver->MakeRowConstraint(
0.0, 0.0,
absl::StrCat("infinitycost: s[", node_idx, "][", j, "] = 0"));
constraint->SetCoefficient(s[node_idx][j], 1.0);
} else {
all_infinity = false;
}
}
if (all_infinity) {
LOG(FATAL) << "All of s[" << node_idx << "][*] have infinity costs";
}
}
for (EdgeIdx edge_idx = 0; edge_idx < num_edges; ++edge_idx) {
if (e[edge_idx].empty() || e_follow[edge_idx] >= 0) continue;
bool all_infinity = true;
for (EdgeStrategyIdx j = 0; j < e[edge_idx].size(); ++j) {
if (infinity_vars.contains(e[edge_idx][j])) {
MPConstraint* constraint = solver->MakeRowConstraint(
0.0, 0.0,
absl::StrCat("infinitycost: e[", edge_idx, "][", j, "] = 0"));
constraint->SetCoefficient(e[edge_idx][j], 1.0);
} else {
all_infinity = false;
}
}
if (all_infinity) {
auto err_msg = absl::StrCat("All of e[", request.edges(edge_idx).first(),
"][", request.edges(edge_idx).second(),
"][*] have infinity costs");
if (request.crash_at_infinity_costs_check()) {
LOG(FATAL) << err_msg;
} else {
LOG(WARNING) << err_msg;
return AutoShardingSolverResult(absl::InternalError(err_msg), false);
}
}
}
for (NodeIdx node_idx = 0; node_idx < request.num_nodes(); ++node_idx) {
if (request.s_follow(node_idx) >= 0) continue;
MPConstraint* constraint = solver->MakeRowConstraint(
1.0, 1.0,
absl::StrCat("sum(s[", node_idx, "][j] for j = [0 .. ",
s[node_idx].size(), ")) = 1"));
for (NodeStrategyIdx j = 0; j < s[node_idx].size(); ++j) {
constraint->SetCoefficient(s[node_idx][j], 1.0);
}
}
if (request.memory_budget() > 0) {
auto LiveNodes =
[request](int64_t live_idx) -> tsl::protobuf::RepeatedField<int64_t> {
return request.live(live_idx).nodes();
};
auto LiveEdges =
[request](int64_t live_idx) -> tsl::protobuf::RepeatedField<int64_t> {
return request.live_edges(live_idx).edges();
};
std::vector<std::pair<int64_t, int64_t>> reduced_intervals_nodes,
reduced_intervals_edges;
absl::flat_hash_set<int64_t> reduced_times;
std::vector<MPVariable*> group_node_vars, group_edge_vars;
const absl::Time term_reduction_start_time = absl::Now();
auto num_node_terms = ReduceMemoryTerms(
request, *solver, request.live_size(), request.num_nodes(),
std::move(LiveNodes), request.node_intervals(), request.node_groups(),
request.memory_costs(), "node", s, reduced_intervals_nodes,
group_node_vars, reduced_times);
auto num_edge_terms = ReduceMemoryTerms(
request, *solver, request.live_edges_size(), request.edges_size(),
std::move(LiveEdges), request.edge_intervals(), request.edge_groups(),
request.memory_edge_costs(), "edge", e, reduced_intervals_edges,
group_edge_vars, reduced_times);
const absl::Time term_reduction_end_time = absl::Now();
if (num_node_terms && num_edge_terms) {
const auto term_reduction_duration =
term_reduction_end_time - term_reduction_start_time;
LOG(INFO) << "Memory Term Reducer took "
<< absl::ToInt64Milliseconds(term_reduction_duration)
<< " ms and reduced the number of terms from "
<< num_node_terms->first + num_edge_terms->first << " to "
<< num_node_terms->second + num_edge_terms->second;
}
absl::flat_hash_map<LivenessIdx, MPConstraint*> constraints;
AddMemoryTerms(request, *solver, request.num_nodes(),
reduced_intervals_nodes, request.memory_costs(),
overbudget_var, | #include "xla/hlo/experimental/auto_sharding/auto_sharding_solver.h"
#include <cstdint>
#include <string>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding.pb.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
namespace xla {
namespace spmd {
namespace {
using CostMatrix = std::vector<std::vector<double>>;
using NodeMatrix = std::vector<std::vector<int64_t>>;
using EdgeMatrix = std::vector<std::vector<int64_t>>;
void AddCosts(proto2::RepeatedPtrField<AutoShardingSolverRequest_Costs>* costs,
const CostMatrix& cost_matrix) {
for (const auto& cost_row : cost_matrix) {
AutoShardingSolverRequest_Costs cost;
cost.mutable_costs()->Add(cost_row.begin(), cost_row.end());
costs->Add(std::move(cost));
}
}
void AddNodes(proto2::RepeatedPtrField<AutoShardingSolverRequest_Nodes>* nodes,
const NodeMatrix& node_matrix) {
for (const auto& node_row : node_matrix) {
AutoShardingSolverRequest_Nodes node;
node.mutable_nodes()->Add(node_row.begin(), node_row.end());
nodes->Add(std::move(node));
}
}
void AddEdges(proto2::RepeatedPtrField<AutoShardingSolverRequest_Edges>* edges,
const EdgeMatrix& edge_matrix) {
for (const auto& edge_row : edge_matrix) {
AutoShardingSolverRequest_Edges edge;
edge.mutable_edges()->Add(edge_row.begin(), edge_row.end());
edges->Add(std::move(edge));
}
}
void AddIntervals(
proto2::RepeatedPtrField<AutoShardingSolverRequest_Pair>* pairs,
const std::vector<std::pair<int64_t, int64_t>>& intervals) {
for (const auto& interval : intervals) {
AutoShardingSolverRequest_Pair pair;
pair.set_first(interval.first);
pair.set_second(interval.second);
pairs->Add(std::move(pair));
}
}
void AddGroups(
proto2::RepeatedPtrField<AutoShardingSolverRequest_Group>* groups,
const std::vector<std::vector<int64_t>>& reduced_groups) {
for (const auto& reduced_group : reduced_groups) {
AutoShardingSolverRequest_Group group;
group.mutable_prims()->Add(reduced_group.begin(), reduced_group.end());
groups->Add(std::move(group));
}
}
AutoShardingSolverRequest DefaultAutoShardingSolverRequest() {
const auto s_len = {4, 3, 4, 4, 3};
const auto s_follow = {-1, -1, -1, 2, -1};
AutoShardingSolverRequest_Pair edge1, edge2;
edge1.set_first(0);
edge1.set_second(2);
edge2.set_first(1);
edge2.set_second(2);
const auto edges = {edge1, edge2};
const NodeMatrix live = {{1, 0},
{1, 0},
{1, 2, 0},
{1, 2, 3, 0},
{1, 3, 0}};
const CostMatrix c = {{10, 11, 12, 13},
{20, 21, 22},
{30, 31, 32, 33},
{40, 41, 42, 43},
{50, 51, 52}};
const CostMatrix d = {{100, 110, 120, 130},
{200, 210, 220},
{300, 310, 320, 330},
{400, 410, 420, 430},
{500, 510, 520}};
const CostMatrix m = {{100000, 110000, 990000, 130000},
{200000, 210000, 220000},
{300000, 310000, 320000, 330000},
{400000, 410000, 420000, 430000},
{500000, 510000, 520000}};
const CostMatrix p = {{1.0, 0.0, 1.0, 1.0},
{1.0, 0.0, 1.0},
{1.0, 0.0, 1.0, 1.0},
{1.0, 0.0, 1.0, 1.0},
{1.0, 0.0, 1.0}};
const CostMatrix r = {{1000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
const CostMatrix t = {{73000, 72000, 71000, 70000,
63000, 62000, 61000, 60000,
53000, 52000, 51000, 50000,
43000, 42000, 41000, 40000},
{33000, 32000, 31000, 30000,
23000, 22000, 21000, 20000,
13000, 12000, 11000, 10000}};
AutoShardingSolverRequest_Pair alias;
alias.set_first(1);
alias.set_second(4);
const auto aliases = {alias};
const CostMatrix v = {{0, 1, 1,
1, 0, 1,
1, 1, 0}};
const std::vector<std::string> instruction_names = {"A", "B", "C", "D", "E"};
AutoShardingSolverRequest request;
request.set_num_nodes(5);
request.set_memory_budget(1500000);
request.mutable_s_len()->Add(s_len.begin(), s_len.end());
request.mutable_s_follow()->Add(s_follow.begin(), s_follow.end());
request.mutable_edges()->Add(edges.begin(), edges.end());
AddNodes(request.mutable_live(), live);
AddCosts(request.mutable_computation_costs(), c);
AddCosts(request.mutable_communication_costs(), d);
AddCosts(request.mutable_memory_costs(), m);
AddCosts(request.mutable_departure_costs(), p);
AddCosts(request.mutable_resharding_costs(), r);
AddCosts(request.mutable_duration_costs(), t);
request.mutable_aliases()->Add(aliases.begin(), aliases.end());
AddCosts(request.mutable_value_costs(), v);
request.mutable_instruction_names()->Add(instruction_names.begin(),
instruction_names.end());
return request;
}
AutoShardingSolverRequest AutoShardingSolverRequestWithEquivalences() {
const auto s_len = {4, 3, 7, 7, 3};
const auto s_follow = {-1, -1, -1, 2, -1};
AutoShardingSolverRequest_Pair edge1, edge2;
edge1.set_first(0);
edge1.set_second(2);
edge2.set_first(1);
edge2.set_second(2);
const auto edges = {edge1, edge2};
const NodeMatrix live = {{1, 0},
{1, 0},
{1, 2, 0},
{1, 2, 3, 0},
{1, 3, 0}};
const CostMatrix c = {{10, 10, 10, 10},
{20, 20, 20},
{30, 30, 31, 30, 30, 30, 30},
{40, 40, 40, 40, 40, 40, 40},
{50, 50, 50}};
const CostMatrix d = {{100, 100, 100, 100},
{200, 200, 200},
{300, 300, 300, 300, 300, 300, 300},
{400, 400, 400, 400, 400, 400, 410},
{500, 500, 500}};
const CostMatrix m = {{10000, 10000, 10000, 10000},
{20000, 20000, 20000},
{30000, 30000, 30000, 31000, 30000, 30000, 30000},
{40000, 40000, 40000, 40000, 40000, 40000, 40000},
{50000, 50000, 50000}};
const CostMatrix p = {{1.0, 0.0, 1.0, 1.0},
{1.0, 0.0, 1.0},
{1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0},
{1.0, 0.0, 1.0, 1.0, 1.0, 1.0, 1.0},
{1.0, 0.0, 1.0}};
const CostMatrix r = {{1000, 1000, 1000, 1000, 1000, 1000, 1000,
2000, 2000, 2000, 2000, 2000, 2000, 2000,
3000, 3000, 3000, 3000, 3100, 3000, 3000,
4000, 4000, 4000, 4000, 4000, 4000, 4000},
{5000, 5000, 5000, 5000, 5000, 5000, 5000,
6000, 6000, 6000, 6000, 6000, 6000, 6000,
7000, 7000, 7000, 7000, 7000, 7000, 7000}};
const CostMatrix t = {{70000, 70000, 70000, 70000, 70000, 70000, 70000,
60000, 60000, 60000, 60000, 60000, 60000, 60000,
50000, 50000, 50000, 50000, 50000, 50000, 50000,
40000, 40000, 40000, 40000, 40000, 40000, 40000},
{30000, 30000, 30000, 30000, 30000, 30000, 30000,
20000, 20000, 20000, 20000, 20000, 20000, 20000,
10000, 10000, 10000, 10000, 10000, 10000, 10000}};
AutoShardingSolverRequest_Pair alias;
alias.set_first(2);
alias.set_second(4);
const auto aliases = {alias};
const CostMatrix v = {{0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
0, 1, 0,
1, 0, 1,
0, 1, 0}};
const std::vector<std::string> instruction_names = {"A", "B", "C", "D", "E"};
AutoShardingSolverRequest request;
request.set_num_nodes(5);
request.set_memory_budget(1500000);
request.mutable_s_len()->Add(s_len.begin(), s_len.end());
request.mutable_s_follow()->Add(s_follow.begin(), s_follow.end());
request.mutable_edges()->Add(edges.begin(), edges.end());
AddNodes(request.mutable_live(), live);
AddCosts(request.mutable_computation_costs(), c);
AddCosts(request.mutable_communication_costs(), d);
AddCosts(request.mutable_memory_costs(), m);
AddCosts(request.mutable_departure_costs(), p);
AddCosts(request.mutable_resharding_costs(), r);
AddCosts(request.mutable_duration_costs(), t);
request.mutable_aliases()->Add(aliases.begin(), aliases.end());
AddCosts(request.mutable_value_costs(), v);
request.mutable_instruction_names()->Add(instruction_names.begin(),
instruction_names.end());
return request;
}
TEST(CallORToolsSolverTest, SolvesOptimally) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const std::vector<EdgeStrategyIdx> e_val = {0, 0};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, SolvesOverbudget) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.set_memory_budget(100000);
request.mutable_overbudget_coeff()->set_coeff(10.0);
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const std::vector<EdgeStrategyIdx> e_val = {0, 0};
const double objective_value = 9007650.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, SolvesMaxDepartures) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_max_departures()->set_coeff(3.0);
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const std::vector<EdgeStrategyIdx> e_val = {1, 1};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, AvoidsInfiniteNodeCosts) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_computation_costs(0)->set_costs(0, kInfinityCost);
request.mutable_computation_costs(0)->set_costs(1, kInfinityCost);
request.mutable_computation_costs(0)->set_costs(2, kInfinityCost);
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {3, 0, 0, 0, 0};
const std::vector<EdgeStrategyIdx> e_val = {12, 0};
const double objective_value = 10683.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, AvoidsInfiniteEdgeCosts) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_resharding_costs(0)->set_costs(0, kInfinityCost);
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const std::vector<EdgeStrategyIdx> e_val = {1, 1};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, HandlesFollowedEdges) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
AutoShardingSolverRequest_Pair edge;
edge.set_first(1);
edge.set_second(3);
*request.mutable_edges()->Add() = edge;
const CostMatrix r = {{5000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
AddCosts(request.mutable_resharding_costs(), r);
const CostMatrix t = {{50000, 51000, 52000, 53000,
60000, 61000, 62000, 63000,
70000, 71000, 72000, 73000}};
AddCosts(request.mutable_duration_costs(), t);
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const std::vector<EdgeStrategyIdx> e_val = {0, 0, 0};
const double objective_value = 12650.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, UsesHint) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const auto s_hint = {1, 0, 0, 0, 0};
request.mutable_s_hint()->Add(s_hint.begin(), s_hint.end());
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 0, 0, 0};
const std::vector<EdgeStrategyIdx> e_val = {0, 0};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, HonorsMaxCost) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_max_cost()->set_coeff(7600.0);
const AutoShardingSolverResult result = CallORToolsSolver(request);
EXPECT_TRUE(absl::IsInternal(result.status.status()));
}
TEST(CallORToolsSolverTest, HandlesMemoryEdgeCosts) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const EdgeMatrix live_edges = {{}, {0}, {0, 1}, {1}, {}};
const CostMatrix memory_edge_costs = {{1000000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
AddEdges(request.mutable_live_edges(), live_edges);
AddCosts(request.mutable_memory_edge_costs(), memory_edge_costs);
request.set_enable_memory_edge_costs(true);
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const std::vector<EdgeStrategyIdx> e_val = {1, 1};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, HandlesIntervals) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{0, 4}, {0, 4}, {2, 3}, {3, 4}, {100, -1}};
const std::vector<std::pair<int64_t, int64_t>> edge_intervals =
{{1, 2}, {2, 3}};
const CostMatrix memory_edge_costs = {{1000000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
AddIntervals(request.mutable_edge_intervals(), edge_intervals);
AddCosts(request.mutable_memory_edge_costs(), memory_edge_costs);
request.set_enable_memory_edge_costs(true);
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const std::vector<EdgeStrategyIdx> e_val = {1, 1};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, HandlesReducedIntervalsAndGroups) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{5, -1}, {5, -1}, {2, 3}, {3, 4}, {100, -1}, {0, 4}};
const std::vector<std::pair<int64_t, int64_t>> edge_intervals =
{{1, 2}, {2, 3}};
const std::vector<std::vector<int64_t>> node_groups = {{0, 1}};
const std::vector<std::vector<int64_t>> edge_groups = {};
const CostMatrix memory_edge_costs = {{1000000, 1100, 1200, 1300,
2000, 2100, 2200, 2300,
3000, 3100, 3200, 3300,
4000, 4100, 4200, 4300},
{5000000, 5100, 5200, 5300,
6000, 6100, 6200, 6300,
7000, 7100, 7200, 7300}};
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
AddIntervals(request.mutable_edge_intervals(), edge_intervals);
AddGroups(request.mutable_node_groups(), node_groups);
AddGroups(request.mutable_edge_groups(), edge_groups);
AddCosts(request.mutable_memory_edge_costs(), memory_edge_costs);
request.set_enable_memory_edge_costs(true);
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 1, 1, 0};
const std::vector<EdgeStrategyIdx> e_val = {1, 1};
const double objective_value = 7872.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(CallORToolsSolverTest, SolvesWithEquivalences) {
const AutoShardingSolverRequest request =
AutoShardingSolverRequestWithEquivalences();
const AutoShardingSolverResult result = CallORToolsSolver(request);
const std::vector<NodeStrategyIdx> s_val = {0, 0, 5, 5, 1};
const std::vector<EdgeStrategyIdx> e_val = {5, 5};
const double objective_value = 7650.0;
const AutoShardingSolverOutput expected_output =
{s_val, e_val, objective_value};
const AutoShardingSolverResult expected_result = {expected_output, false};
EXPECT_EQ(result, expected_result);
}
TEST(AutoShardingEvaluatorTest, NoViolations) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<NodeStrategyIdx> s_val = {3, 1, 2, 2, 1};
const std::vector<EdgeStrategyIdx> e_val = {14, 6};
const double objective_value = 12149.0;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.total.computation_cost = 159.0;
expected_evaluation.total.communication_cost = 1590.0;
expected_evaluation.total.resharding_cost = 10400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, EvaluatesOverbudget) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.set_memory_budget(100000);
request.mutable_overbudget_coeff()->set_coeff(10.0);
const std::vector<NodeStrategyIdx> s_val = {2 , 1, 2, 2, 1};
const std::vector<EdgeStrategyIdx> e_val = {10, 6};
const double objective_value = 11138.0;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 9400.0;
expected_evaluation.total.overbudget_cost = 18400000.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.lower_bound.overbudget_cost = 9000000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, EvaluatesOverbudgetWithIntervals) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{0, 4}, {0, 4}, {2, 3}, {3, 4}, {100, -1}};
request.set_memory_budget(100000);
request.mutable_overbudget_coeff()->set_coeff(10.0);
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
const std::vector<NodeStrategyIdx> s_val = {2 , 1, 2, 2, 1};
const std::vector<EdgeStrategyIdx> e_val = {10, 6};
const double objective_value = 11138.0;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 9400.0;
expected_evaluation.total.overbudget_cost = 18400000.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.lower_bound.overbudget_cost = 9000000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest,
EvaluatesOverbudgetWithReducedIntervalsAndGroups) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<std::pair<int64_t, int64_t>> node_intervals =
{{5, -1}, {5, -1}, {2, 3}, {3, 4}, {100, -1}, {0, 4}};
const std::vector<std::vector<int64_t>> node_groups = {{0, 1}};
request.set_memory_budget(100000);
request.mutable_overbudget_coeff()->set_coeff(10.0);
request.clear_live();
AddIntervals(request.mutable_node_intervals(), node_intervals);
AddGroups(request.mutable_node_groups(), node_groups);
const std::vector<NodeStrategyIdx> s_val = {2 , 1, 2, 2, 1};
const std::vector<EdgeStrategyIdx> e_val = {10, 6};
const double objective_value = 11138.0;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 9400.0;
expected_evaluation.total.overbudget_cost = 18400000.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.lower_bound.overbudget_cost = 9000000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesFollower) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<NodeStrategyIdx> s_val = {3, 1, 2, 1 , 1};
const std::vector<EdgeStrategyIdx> e_val = {14, 6};
const double objective_value = 12138.0;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kFollowerViolationCode};
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 10400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 2.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesAlias) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<NodeStrategyIdx> s_val = {3, 1, 2, 2, 0 };
const std::vector<EdgeStrategyIdx> e_val = {14, 6};
const double objective_value = 12138.0;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kAliasViolationCode};
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 10400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 4.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesMemory) {
const AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
const std::vector<NodeStrategyIdx> s_val = {2 , 1, 2, 2, 1};
const std::vector<EdgeStrategyIdx> e_val = {10, 6};
const double objective_value = 11138.0;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kMemoryViolationCode};
expected_evaluation.total.computation_cost = 158.0;
expected_evaluation.total.communication_cost = 1580.0;
expected_evaluation.total.resharding_cost = 9400.0;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesInfiniteCostForNode) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_computation_costs(0)->set_costs(0, kInfinityCost);
request.mutable_computation_costs(0)->set_costs(1, kInfinityCost);
request.mutable_computation_costs(0)->set_costs(2, kInfinityCost);
const std::vector<NodeStrategyIdx> s_val = {0 , 1, 2, 2, 1};
const std::vector<EdgeStrategyIdx> e_val = {2, 6};
const double objective_value = 1e+20;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kInfiniteCostViolationCode};
expected_evaluation.total.computation_cost = 1e+20;
expected_evaluation.total.communication_cost = 1560.0;
expected_evaluation.total.resharding_cost = 7400.0;
expected_evaluation.lower_bound.computation_cost = 153.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesInfiniteCostForEdge) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_resharding_costs(0)->set_costs(2, kInfinityCost);
const std::vector<NodeStrategyIdx> s_val = {0, 1, 2, 2, 1};
const std::vector<EdgeStrategyIdx> e_val = {2 , 6};
const double objective_value = 1e+20;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, result);
AutoShardingEvaluation expected_evaluation;
expected_evaluation.violation_codes = {kInfiniteCostViolationCode};
expected_evaluation.total.computation_cost = 156.0;
expected_evaluation.total.communication_cost = 1560.0;
expected_evaluation.total.resharding_cost = 1e+20;
expected_evaluation.lower_bound.computation_cost = 150.0;
expected_evaluation.lower_bound.communication_cost = 1500.0;
expected_evaluation.lower_bound.resharding_cost = 6000.0;
expected_evaluation.total_departures = 3.0;
EXPECT_EQ(evaluation, expected_evaluation);
}
TEST(AutoShardingEvaluatorTest, ViolatesMaxDepartures) {
AutoShardingSolverRequest request = DefaultAutoShardingSolverRequest();
request.mutable_max_departures()->set_coeff(2.0);
const std::vector<NodeStrategyIdx> s_val = {3, 1, 2, 2, 1};
const std::vector<EdgeStrategyIdx> e_val = {14, 6};
const double objective_value = 12149.0;
const AutoShardingSolverOutput output = {s_val, e_val, objective_value};
const AutoShardingSolverResult result = {output, false};
const AutoShardingEvaluation evaluation = Evaluate(request, r | 2,168 |
#ifndef XLA_HLO_EXPERIMENTAL_AUTO_SHARDING_AUTO_SHARDING_H_
#define XLA_HLO_EXPERIMENTAL_AUTO_SHARDING_AUTO_SHARDING_H_
#include <cstddef>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_cost_graph.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_option.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_solver.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/hlo/experimental/auto_sharding/cluster_environment.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/service/call_graph.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_pass_interface.h"
#include "xla/shape.h"
namespace xla {
class DummyAutoSharding : public HloModulePass {
public:
DummyAutoSharding() = default;
~DummyAutoSharding() override = default;
absl::string_view name() const override { return "dummy_auto_sharding"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
};
enum class AutoShardingResult {
kModuleUnchanged,
kModuleChangedShardingPerformed,
kModuleUnchangedNoShardingPerformed
};
class AutoShardingImplementation {
public:
explicit AutoShardingImplementation(const AutoShardingOption& option);
~AutoShardingImplementation() = default;
absl::StatusOr<AutoShardingResult> RunAutoSharding(
HloModule* module,
const absl::flat_hash_set<std::string>& replicated_small_tensors,
const absl::flat_hash_set<absl::string_view>& execution_threads,
const absl::flat_hash_map<std::string, const HloInstruction*>&
sharding_propagation_solution = {});
std::pair<absl::flat_hash_map<std::string, std::vector<HloSharding>>, bool>
SaveAndRemoveShardingAnnotation(
HloModule* module,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const absl::flat_hash_set<std::string>& replicated_small_tensors,
const absl::flat_hash_set<absl::string_view>& execution_threads);
absl::Status CanonicalizeLayouts(HloModule* module);
double GetSolverOptimalObjectiveValue() {
return solver_optimal_objective_value_;
}
private:
AutoShardingOption option_;
double solver_optimal_objective_value_ = -1.0;
};
class AutoSharding : public HloModulePass {
public:
explicit AutoSharding(const AutoShardingOption& option);
~AutoSharding() override = default;
absl::string_view name() const override { return "auto_sharding"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
double GetSolverOptimalObjectiveValue() {
return solver_optimal_objective_value_;
}
std::vector<int64_t> GetChosenDeviceMeshShape() { return chosen_mesh_shape_; }
private:
AutoShardingOption option_;
double solver_optimal_objective_value_ = -1.0;
std::vector<int64_t> chosen_mesh_shape_;
};
namespace spmd {
HloSharding Tile(const Shape& shape, absl::Span<const int64_t> tensor_dims,
absl::Span<const int64_t> mesh_dims,
const Array<int64_t>& device_mesh);
std::vector<double> CommunicationReshardingCostVector(
const StrategyGroup* strategy_group, const Shape& shape,
const HloSharding& required_sharding,
const ClusterEnvironment& cluster_env);
std::vector<double> MemoryReshardingCostVector(
const StrategyGroup* strategy_group, const Shape& operand_shape,
const HloSharding& required_sharding,
const ClusterEnvironment& cluster_env);
std::vector<double> FollowInsCostVector(int64_t source_len, int64_t index);
std::unique_ptr<StrategyGroup> CreateLeafStrategyGroup(
size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, StrategyGroups& strategy_groups);
void SetInNodesWithInstruction(std::unique_ptr<StrategyGroup>& strategy_group,
const HloInstruction* ins,
const StrategyMap& strategy_map);
void RemoveDuplicatedStrategy(std::unique_ptr<StrategyGroup>& strategy_group);
absl::Status FilterStrategy(const HloInstruction* ins, const Shape& shape,
std::unique_ptr<StrategyGroup>& strategy_group,
const ClusterEnvironment& cluster_env,
const InstructionBatchDimMap& batch_map,
const AutoShardingOption& option);
absl::Status HandleDot(std::unique_ptr<StrategyGroup>& strategy_group,
StrategyGroups& strategy_groups,
StrategyMap& strategy_map, const HloInstruction* ins,
size_t instruction_id,
const HloInstructionSequence& instruction_sequence,
const HloCostAnalysis& hlo_cost_analysis,
const ClusterEnvironment& cluster_env,
const InstructionBatchDimMap& batch_map,
const AutoShardingOption& option,
const CallGraph& call_graph);
absl::Status HandleConv(std::unique_ptr<StrategyGroup>& strategy_group,
StrategyGroups& strategy_groups,
StrategyMap& strategy_map, const HloInstruction* ins,
size_t instruction_id,
const HloInstructionSequence& instruction_sequence,
const HloCostAnalysis& hlo_cost_analysis,
const ClusterEnvironment& cluster_env,
const InstructionBatchDimMap& batch_map,
const AutoShardingOption& option,
const CallGraph& call_graph);
void AnnotateShardingWithSimpleHeuristic(HloModule* module,
const std::string& heuristic,
const AliasMap& alias_map,
const ClusterEnvironment& cluster_env);
AliasMap BuildAliasMap(const HloModule* module);
AliasSet BuildAliasSet(const HloModule* module,
const StrategyMap& strategy_map);
absl::Status CheckAliasSetCompatibility(const AliasSet& alias_set,
const StrategyGroups& strategy_groups,
const HloInstructionSequence& sequence,
bool crash_on_error);
absl::Status GenerateReduceScatter(
const HloInstructionSequence& sequence, const AliasMap& alias_map,
const InstructionDepthMap& depth_map, const StrategyMap& strategy_map,
const CostGraph& cost_graph, absl::Span<const int64_t> s_val,
const ClusterEnvironment& cluster_env, const AutoShardingOption& option);
bool HasReduceScatterOpportunity(
const HloInstruction* inst, const StrategyMap& strategy_map,
const CostGraph& cost_graph, absl::Span<const int64_t> s_val,
const StableHashSet<const HloInstruction*>& modified);
HloSharding GetReduceScatterOutput(const HloInstruction* ins,
const ShardingStrategy& strategy,
const ClusterEnvironment& cluster_env);
AutoShardingSolverResult Solve(
const HloModule& hlo_module, const HloLiveRange& hlo_live_range,
const StrategyMap& strategy_map, const StrategyGroups& strategy_groups,
const CostGraph& cost_graph, const AliasSet& alias_set,
const std::vector<std::pair<LivenessIdx, LivenessIdx>>& node_intervals,
const std::vector<std::pair<LivenessIdx, LivenessIdx>>& edge_intervals,
const std::vector<absl::btree_set<int64_t>>& node_groups,
const std::vector<absl::btree_set<int64_t>>& edge_groups,
const AutoShardingOption& option, absl::string_view request_prefix,
const absl::flat_hash_map<std::string, const HloInstruction*>&
sharding_propagation_solution = {});
void PopulateTemporalValues(const CostGraph& cost_graph,
AutoShardingSolverRequest& request);
void AddReplicatedStrategy(
const HloInstruction* ins, const Shape& shape,
const ClusterEnvironment& cluster_env, const StrategyMap& strategy_map,
std::unique_ptr<StrategyGroup>& strategy_group, double replicated_penalty,
absl::flat_hash_set<int64_t> operands_to_consider_all_strategies_for = {});
void CheckMemoryCosts(StrategyGroup* strategy_group, const Shape& shape);
std::pair<int64_t, bool> ChooseOperandToFollow(
const StrategyMap& strategy_map, const InstructionDepthMap& depth_map,
const AliasMap& alias_map, int64_t max_depth, const HloInstruction* ins);
void FillAllStrategiesForArray(
std::unique_ptr<StrategyGroup>& strategy_group, const HloInstruction* ins,
const Shape& shape, const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map, const AutoShardingOption& option,
double replicated_penalty, const InstructionBatchDimMap& batch_dim_map,
const CallGraph& call_graph, bool only_allow_divisible,
bool create_replicated_strategies,
bool create_partially_replicated_strategies);
absl::StatusOr<std::unique_ptr<StrategyGroup>> CreateAllStrategiesGroup(
const HloInstruction* ins, const Shape& shape, size_t instruction_id,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map, const AutoShardingOption& option,
double replicated_penalty, const InstructionBatchDimMap& batch_dim_map,
const CallGraph& call_graph, bool only_allow_divisible,
bool create_replicated_strategies,
bool create_partially_replicated_strategies);
std::unique_ptr<StrategyGroup> CreateElementwiseOperatorStrategies(
size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const InstructionDepthMap& depth_map, const AliasMap& alias_map,
const StableHashMap<int64_t, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map,
int64_t max_depth, StrategyGroups& strategy_groups,
AssociativeDotPairs& associative_dot_pairs);
std::unique_ptr<StrategyGroup> HandleManuallyShardedInstruction(
const HloInstruction* ins, const Shape& shape, size_t instruction_id,
StrategyGroups& strategy_groups, StrategyMap& strategy_map);
std::unique_ptr<StrategyGroup> HandlePartialReduce(
const HloInstruction* ins, size_t instruction_id,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
StrategyMap& strategy_map, const CallGraph& call_graph);
std::unique_ptr<StrategyGroup> CreateLeafStrategyGroupWithoutInNodes(
size_t instruction_id, StrategyGroups& strategy_groups);
std::unique_ptr<StrategyGroup> CreateReshapeStrategies(
size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
bool only_allow_divisible, double replicated_penalty,
const InstructionBatchDimMap& batch_dim_map,
const AutoShardingOption& option, StrategyGroups& strategy_groups,
const CallGraph& call_graph);
std::unique_ptr<StrategyGroup> CreateTupleStrategyGroup(size_t instruction_id);
void EnumerateAll1DPartition(const HloInstruction* ins, const Shape& shape,
const Array<int64_t>& device_mesh,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
std::unique_ptr<StrategyGroup>& strategy_group,
bool only_allow_divisible,
const std::string& suffix,
const CallGraph& call_graph);
void EnumerateAllPartition(const HloInstruction* ins, const Shape& shape,
const Array<int64_t>& device_mesh,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
std::unique_ptr<StrategyGroup>& strategy_group,
const InstructionBatchDimMap& batch_dim_map,
bool only_allow_divisible,
const CallGraph& call_graph,
int64_t partition_dimensions,
const std::vector<int64_t>& tensor_dims = {});
absl::StatusOr<std::unique_ptr<StrategyGroup>> FollowReduceStrategy(
const HloInstruction* ins, const Shape& output_shape,
const HloInstruction* operand, const HloInstruction* unit,
size_t instruction_id, StrategyMap& strategy_map,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
bool allow_mixed_mesh_shape, bool crash_at_error);
void GenerateOutfeedStrategy(const HloInstruction* ins, const Shape& shape,
const ClusterEnvironment& cluster_env,
const StrategyMap& strategy_map,
std::unique_ptr<StrategyGroup>& strategy_group,
double replicated_penalty);
std::pair<ReshardingCosts, ReshardingCosts>
GenerateReshardingCostsAndMissingShardingsForAllOperands(
const HloInstruction* ins, const HloSharding& output_sharding,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const CallGraph& call_graph,
std::vector<std::optional<HloSharding>>& input_shardings);
std::unique_ptr<StrategyGroup> MaybeFollowInsStrategyGroup(
const StrategyGroup* src_strategy_group, const Shape& shape,
size_t instruction_id, bool have_memory_cost,
StrategyGroups& strategy_groups, const ClusterEnvironment& cluster_env,
const StableHashMap<NodeIdx, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map);
void RemoveShardingsWhereSmallDimsShardedAcrossManyDevices(
const Shape& shape, StrategyGroup* strategy_group,
bool instruction_has_user_sharding);
void ScaleCostsWithExecutionCounts(StrategyGroup* strategy_group,
int64_t execution_count);
void TrimOrGenerateStrategiesBasedOnExistingSharding(
const Shape& output_shape, StrategyGroup* strategy_group,
const StrategyMap& strategy_map,
const std::vector<HloInstruction*>& instructions,
const HloSharding& existing_sharding, const ClusterEnvironment& cluster_env,
StableHashMap<int64_t, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map,
const CallGraph& call_graph, bool strict);
absl::StatusOr<std::tuple<StrategyMap, StrategyGroups, AssociativeDotPairs>>
BuildStrategyAndCost(
const HloInstructionSequence& sequence, const HloModule* module,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const absl::flat_hash_map<const HloInstruction*, int64_t>&
instruction_execution_counts,
const InstructionDepthMap& depth_map,
const InstructionBatchDimMap& batch_dim_map, const AliasMap& alias_map,
const ClusterEnvironment& cluster_env, AutoShardingOption& option,
const CallGraph& call_graph, const HloCostAnalysis& hlo_cost_analysis,
bool trying_multiple_mesh_shapes);
int64_t MemoryBudgetLowerBound(
const HloModule& module,
const absl::flat_hash_set<const HloInstruction*>& instructions_to_shard,
const LivenessSet& liveness_set, const HloAliasAnalysis& alias_analysis,
int64_t num_devices,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserved_shardings);
}
}
#endif
#include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#include <algorithm>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/match.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_cost_graph.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_option.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_solver.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_util.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_wrapper.h"
#include "xla/hlo/experimental/auto_sharding/cluster_environment.h"
#include "xla/hlo/experimental/auto_sharding/matrix.h"
#include "xla/hlo/experimental/auto_sharding/metrics.h"
#include "xla/hlo/experimental/auto_sharding/profiling_result.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_sharding_util.h"
#include "xla/service/buffer_value.h"
#include "xla/service/call_graph.h"
#include "xla/service/computation_layout.h"
#include "xla/service/dump.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_buffer.h"
#include "xla/service/hlo_cost_analysis.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_value.h"
#include "xla/service/optimize_input_output_buffer_alias.h"
#include "xla/service/sharding_propagation.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace spmd {
namespace {
constexpr double kSaltiplier = 0.0;
}
std::vector<double> CommunicationReshardingCostVector(
const StrategyGroup* strategy_group, const Shape& operand_shape,
const HloSharding& required_sharding,
const ClusterEnvironment& cluster_env) {
CHECK(!strategy_group->is_tuple) << "Only works with strategy vector.";
std::vector<double> ret;
ret.reserve(strategy_group->strategies.size());
auto required_sharding_for_resharding = required_sharding.IsTileMaximal()
? HloSharding::Replicate()
: required_sharding;
for (const auto& x : strategy_group->strategies) {
ret.push_back(cluster_env.ReshardingCost(operand_shape, x.output_sharding,
required_sharding_for_resharding));
}
return ret;
}
double ComputeMemoryReshardingCost(const Shape& shape,
const HloSharding& src_sharding,
const HloSharding& dst_sharding,
const Array<int64_t>& device_mesh) {
int64_t src_n_dim = NumTileDimensions(src_sharding);
int64_t dst_n_dim = NumTileDimensions(dst_sharding);
int64_t src_sharded_bytes = GetShardedInstructionSize(
shape, device_mesh.num_elements(), src_sharding);
double result = std::max(
src_sharded_bytes, GetShardedInstructionSize(
shape, device_mesh.num_elements(), dst_sharding));
if (src_n_dim != dst_n_dim && src_n_dim != -1 && dst_n_dim != -1) {
absl::StatusOr<Shape> inter_shape = ComputeIntermediateShape(
src_sharding, dst_sharding, shape, device_mesh);
if (inter_shape.ok()) {
std::optional<HloSharding> src_inter_sharding =
hlo_sharding_util::ReshapeSharding(shape, *inter_shape, src_sharding);
std::optional<HloSharding> dst_inter_sharding =
hlo_sharding_util::ReshapeSharding(shape, *inter_shape, dst_sharding);
if (!src_inter_sharding.has_value() || !dst_inter_sharding.has_value()) {
src_inter_sharding = HloSharding::Replicate();
dst_inter_sharding = HloSharding::Replicate();
}
result = std::max(
result,
static_cast<double>(std::max(
GetShardedInstructionSize(
*inter_shape, device_mesh.num_elements(), src_inter_sharding),
GetShardedInstructionSize(*inter_shape,
device_mesh.num_elements(),
dst_inter_sharding))));
}
}
return result - src_sharded_bytes;
}
std::vector<double> MemoryReshardingCostVector(
const StrategyGroup* strategy_group, const Shape& operand_shape,
const HloSharding& required_sharding,
const ClusterEnvironment& cluster_env) {
CHECK(!strategy_group->is_tuple) << "Only works with strategy vector.";
std::vector<double> ret;
ret.reserve(strategy_group->strategies.size());
auto required_sharding_for_resharding = required_sharding.IsTileMaximal()
? HloSharding::Replicate()
: required_sharding;
CHECK_OK(required_sharding.Validate(operand_shape))
<< strategy_group->ToString();
for (const auto& x : strategy_group->strategies) {
ret.push_back(ComputeMemoryReshardingCost(operand_shape, x.output_sharding,
required_sharding_for_resharding,
cluster_env.device_mesh_));
}
return ret;
}
std::unique_ptr<StrategyGroup> CreateLeafStrategyGroupWithoutInNodes(
const size_t instruction_id, StrategyGroups& strategy_groups) {
auto strategy_group = std::make_unique<StrategyGroup>();
strategy_group->is_tuple = false;
strategy_group->node_idx = strategy_groups.size();
strategy_groups.push_back(strategy_group.get());
strategy_group->instruction_id = instruction_id;
return strategy_group;
}
std::unique_ptr<StrategyGroup> CreateLeafStrategyGroup(
const size_t instruction_id, const HloInstruction* ins,
const StrategyMap& strategy_map, StrategyGroups& strategy_groups) {
auto strategy_group =
CreateLeafStrategyGroupWithoutInNodes(instruction_id, strategy_groups);
for (int64_t i = 0; i < ins->operand_count(); ++i) {
strategy_group->in_nodes.push_back(strategy_map.at(ins->operand(i)).get());
}
return strategy_group;
}
std::unique_ptr<StrategyGroup> CreateTupleStrategyGroup(
const size_t instruction_id) {
auto strategy_group = std::make_unique<StrategyGroup>();
strategy_group->is_tuple = true;
strategy_group->node_idx = -1;
strategy_group->instruction_id = instruction_id;
return strategy_group;
}
std::pair<ReshardingCosts, ReshardingCosts>
GenerateReshardingCostsAndMissingShardingsForAllOperands(
const HloInstruction* ins, const HloSharding& output_sharding,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const CallGraph& call_graph,
std::vector<std::optional<HloSharding>>& input_shardings) {
ReshardingCosts communication_resharding_costs;
ReshardingCosts memory_resharding_costs;
if (input_shardings.empty() && ins->operand_count() > 0) {
input_shardings.resize(ins->operand_count());
}
for (int64_t k = 0; k < ins->operand_count(); ++k) {
auto operand = ins->operand(k);
if (operand->shape().IsToken() || operand->shape().rank() == 0) {
communication_resharding_costs.push_back(std::vector<double>(
strategy_map.at(operand)->strategies.size(), 0.0));
memory_resharding_costs.push_back(std::vector<double>(
strategy_map.at(operand)->strategies.size(), 0.0));
if (!input_shardings[k].has_value()) {
input_shardings[k] = HloSharding::Replicate();
}
} else {
std::optional<HloSharding> cur_input_sharding;
CHECK_EQ(input_shardings.size(), ins->operand_count());
if (input_shardings[k].has_value()) {
cur_input_sharding = input_shardings[k];
} else {
cur_input_sharding = GetInputSharding(
ins, k, output_sharding, call_graph, cluster_env.NumDevices());
}
bool is_sharding_default_replicated = false;
if (!cur_input_sharding.has_value()) {
if ((ins->opcode() == HloOpcode::kGather && k == 0) ||
(ins->opcode() == HloOpcode::kScatter && k != 0)) {
is_sharding_default_replicated = true;
cur_input_sharding = HloSharding::Replicate();
} else if (ins->opcode() == HloOpcode::kCustomCall) {
is_sharding_default_replicated = true;
cur_input_sharding = HloSharding::Replicate();
} else if (ins->opcode() == HloOpcode::kRngBitGenerator) {
cur_input_sharding = HloSharding::Replicate();
}
}
CHECK(cur_input_sharding.has_value());
if (!input_shardings[k].has_value()) {
input_shardings[k] = cur_input_sharding;
}
auto operand_strategies = strategy_map.at(operand).get();
auto operand_shape = operand->shape();
if (ins->opcode() == HloOpcode::kGather && k == 0 &&
is_sharding_default_replicated) {
VLOG(2) << "Zeroing out operand 0 resharding costs for gather sharding "
<< output_sharding.ToString();
communication_resharding_costs.push_back(
std::vector<double>(operand_strategies->strategies.size(), 0));
memory_resharding_costs.push_back(
std::vector<double>(operand_strategies->strategies.size(), 0));
input_shardings[k] = std::nullopt;
} else {
communication_resharding_costs.push_back(
CommunicationReshardingCostVector(
operand_strategies, ins->operand(k)->shape(),
*cur_input_sharding, cluster_env));
memory_resharding_costs.push_back(MemoryReshardingCostVector(
operand_strategies, ins->operand(k)->shape(), *cur_input_sharding,
cluster_env));
}
}
}
return std::make_pair(communication_resharding_costs,
memory_resharding_costs);
}
std::tuple<ReshardingCosts, ReshardingCosts,
std::vector<std::optional<HloSharding>>>
GenerateReshardingCostsAndShardingsForAllOperands(
const HloInstruction* ins, const HloSharding& output_sharding,
const StrategyMap& strategy_map, const ClusterEnvironment& cluster_env,
const CallGraph& call_graph) {
std::vector<std::optional<HloSharding>> input_shardings_optional;
std::pair<ReshardingCosts, ReshardingCosts> resharding_costs =
GenerateReshardingCostsAndMissingShardingsForAllOperands(
ins, output_sharding, strategy_map, cluster_env, call_graph,
input_shardings_optional);
for (const auto& sharding_optional : input_shardings_optional) {
CHECK(sharding_optional.has_value());
}
return {resharding_costs.first, resharding_costs.second,
input_shardings_optional};
}
void FollowArrayOrTokenStrategyGroup(
const StrategyGroup& src_strategy_group, const Shape& shape,
const size_t instruction_id, const bool have_memory_cost,
const ClusterEnvironment& cluster_env,
const StableHashMap<NodeIdx, std::vector<ShardingStrategy>>&
pretrimmed_strategy_map,
StrategyGroup& strategy_group) {
CHECK(shape.IsArray() | #include "xla/hlo/experimental/auto_sharding/auto_sharding.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include <numeric>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_option.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_strategy.h"
#include "xla/hlo/experimental/auto_sharding/auto_sharding_util.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/utils/hlo_live_range.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/buffer_value.h"
#include "xla/service/hlo_alias_analysis.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/hlo_value.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace op = xla::testing::opcode_matchers;
namespace xla {
namespace spmd {
namespace {
using ::testing::Contains;
using ::testing::Each;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::Eq;
using ::testing::FieldsAre;
using ::testing::IsEmpty;
using ::testing::IsFalse;
using ::testing::IsTrue;
using ::testing::Not;
using ::testing::Pair;
using ::testing::ResultOf;
using ::testing::UnorderedElementsAre;
using DummyAutoShardingTest = HloTestBase;
TEST_F(DummyAutoShardingTest, ReplicatedShardingDummy) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[5,7,11,13]{3,2,1,0} parameter(0)
%param1 = f32[5,7,11,13]{3,2,1,0} parameter(1)
%add = f32[5,7,11,13]{3,2,1,0} add(%param0, %param1)
ROOT %copy = f32[5,7,11,13]{3,2,1,0} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, DummyAutoSharding().Run(module.get()));
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
class AutoShardingTest : public HloTestBase {
protected:
const absl::string_view kDotHloString = R"(
HloModule module
ENTRY matmul {
parameter.1 = f32[32,64]{1,0} parameter(0)
parameter.2 = f32[64,128]{1,0} parameter(1)
ROOT root = f32[32,128]{1,0} dot(parameter.1, parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}
})";
const absl::string_view kAddHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16,32,64]{2,1,0} parameter(0)
%param1 = f32[16,32,64]{2,1,0} parameter(1)
ROOT root = f32[16,32,64]{2,1,0} add(%param0, %param1)
})";
void RunMatMulAutoShardingWithOptions(
AutoShardingOption option, size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptions(module.get(), option, expected_num_tiles,
expected_sharded_dimensions);
}
void RunAddAutoShardingWithOptions(AutoShardingOption option,
size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kAddHloString));
RunAutoShardingWithOptions(module.get(), option, expected_num_tiles,
expected_sharded_dimensions);
}
void RunAutoShardingWithOptions(HloModule* module, AutoShardingOption option,
size_t expected_num_tiles,
size_t expected_sharded_dimensions = 1) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module));
EXPECT_TRUE(changed);
auto* root = FindInstruction(module, "root");
ASSERT_NE(root, nullptr);
EXPECT_EQ(root->sharding().NumTiles(), expected_num_tiles);
EXPECT_EQ(VectorGreaterThanOneElementCount(
root->sharding().tile_assignment().dimensions(),
root->sharding().ReplicateOnLastTileDim()),
expected_sharded_dimensions);
}
void RunMatMulAutoShardingWithOptionsExpectFail(AutoShardingOption option) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptionsExpectFail(module.get(), option);
}
void RunAutoShardingWithOptionsExpectFail(HloModule* module,
AutoShardingOption option) {
EXPECT_FALSE(AutoSharding(option).Run(module).ok());
}
void RunMatMulAutoShardingWithOptionsNoDeviceIds(
AutoShardingOption option, std::vector<int64_t> expected_tile,
bool expeted_last_dim_replicate = false) {
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kDotHloString));
RunAutoShardingWithOptionsNoDeviceIds(module.get(), option, expected_tile,
expeted_last_dim_replicate);
}
void RunAutoShardingWithOptionsNoDeviceIds(HloModule* module,
AutoShardingOption option,
std::vector<int64_t> expected_tile,
bool expeted_last_dim_replicate) {
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module));
EXPECT_TRUE(changed);
HloInstruction* root = FindInstruction(module, "root");
ASSERT_NE(root, nullptr);
EXPECT_EQ(root->sharding().ReplicateOnLastTileDim(),
expeted_last_dim_replicate);
EXPECT_THAT(root->sharding().tile_assignment().dimensions(),
ElementsAreArray(expected_tile));
}
};
TEST_F(AutoShardingTest, MemoryBudgetTest) {
auto compute_memory_budget_lower_bound =
[](const HloModule& module, int64_t num_devices,
const absl::flat_hash_map<std::string, std::vector<HloSharding>>&
preserved_shardings = {}) -> absl::StatusOr<int64_t> {
auto size_fn = [](const BufferValue& buffer) {
return spmd::GetBytes(buffer.shape());
};
TF_ASSIGN_OR_RETURN(HloSchedule schedule,
ScheduleModule(&module, size_fn,
ComputationSchedulerToModuleScheduler(
DFSMemoryScheduler),
{}));
const HloComputation* entry_computation = module.entry_computation();
std::unique_ptr<HloAliasAnalysis> alias_analysis =
HloAliasAnalysis::Run(&module).value();
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloLiveRange> hlo_live_range,
HloLiveRange::Run(schedule, *alias_analysis, entry_computation));
absl::flat_hash_map<const HloValue*, HloLiveRange::TimeBound>&
buffer_live_ranges = hlo_live_range->buffer_live_ranges();
spmd::LivenessSet liveness_set(hlo_live_range->schedule_end_time() + 1);
for (const auto& [hlo_value, live_range] : buffer_live_ranges) {
for (spmd::LivenessIdx i = live_range.start; i <= live_range.end; ++i) {
liveness_set[i].push_back(hlo_value);
}
}
absl::flat_hash_set<const HloInstruction*> instructions_to_shard(
module.entry_computation()->instructions().begin(),
module.entry_computation()->instructions().end());
return spmd::MemoryBudgetLowerBound(module, instructions_to_shard,
liveness_set, *alias_analysis,
num_devices, preserved_shardings);
};
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[16384,16384]{0,1} parameter(0)
%param1 = f32[16384,16384]{0,1} parameter(1)
%add = f32[16384,16384]{0,1} add(%param0, %param1)
ROOT %copy = f32[16384,16384]{0,1} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(HloSharding partial_sharding,
ParseSharding("{devices=[64,1]<=[64]}"));
TF_ASSERT_OK_AND_ASSIGN(
int64_t partial_mesh_64x1_budget_lower_bound,
compute_memory_budget_lower_bound(*module, 64));
for (HloInstruction* ins : module->entry_computation()->instructions()) {
ins->set_sharding(partial_sharding);
}
TF_ASSERT_OK_AND_ASSIGN(
int64_t full_mesh_64x8_budget_lower_bound,
compute_memory_budget_lower_bound(*module, 512));
CHECK_LT(full_mesh_64x8_budget_lower_bound,
partial_mesh_64x1_budget_lower_bound)
<< "The memory budget lower bound per device should be lower with a "
"larger number of devices. Instead, the bound was "
<< partial_mesh_64x1_budget_lower_bound << " bytes for 64 devices and "
<< full_mesh_64x8_budget_lower_bound << " bytes for 512 devices.";
}
TEST_F(AutoShardingTest, DISABLED_ElementWiseOperator) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[128,128]{0,1} parameter(0)
%param1 = f32[128,128]{0,1} parameter(1)
%add = f32[128,128]{0,1} add(%param0, %param1)
ROOT %copy = f32[128,128]{0,1} copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "param0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{devices=[2,2]0,2,1,3}"));
}
TEST_F(AutoShardingTest, Unsupported3DShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
%param0 = f32[32,32,32,32] parameter(0)
%param1 = f32[32,32,32,32] parameter(1)
%add = f32[32,32,32,32] add(%param0, %param1), sharding={devices=[2,2,1,2]<=[8]}
ROOT %copy = f32[32,32,32,32] copy(%add)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
EXPECT_DEATH(auto status = AutoSharding(option).Run(module.get()),
".*too many axes.*");
}
TEST_F(AutoShardingTest, NDIterativeSolveTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[256,1]0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23,8,9,10,11,12,13,14,15,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255}
sharding_call = s32[512,3084]{1,0} custom-call(param), custom_call_target="Sharding", sharding={devices=[256,1]<=[256]}
ROOT slice = s32[512,2048]{1,0} slice(sharding_call), slice={[0:512], [0:2048]}
})";
AutoShardingOption option;
option.enable = true;
option.solve_nd_sharding_iteratively = true;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings;
option.device_mesh_shape = {16, 16};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
HloInstruction* slice = FindInstruction(module.get(), "slice");
EXPECT_NE(slice, nullptr);
EXPECT_THAT(slice, op::Sharding("{devices=[256,1]<=[256]}"));
}
TEST_F(AutoShardingTest, SliceDeviceMeshTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0)
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed, AutoSharding( {.enable = true,
.device_mesh_shape = {2, 2},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* slice = FindInstruction(module.get(), "slice");
ASSERT_NE(slice, nullptr);
EXPECT_THAT(
slice,
AnyOf(op::Sharding("{devices=[4,1]0,1,2,3}"),
op::Sharding("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}")));
}
TEST_F(AutoShardingTest, SliceMixedUserShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[4,1]0,2,1,3}
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = true,
.device_mesh_shape = {2, 2},
.device_mesh_ids = {0, 2, 1, 3},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Each(ResultOf(
[](const HloInstruction* ins) { return ins->has_sharding(); },
IsTrue())));
EXPECT_THAT(instructions, Each(op::Sharding("{devices=[4,1]0,2,1,3}")));
}
TEST_F(AutoShardingTest, SlicedTensorDimensionShardedTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %slicemodule {
param = s32[512,3084]{1,0} parameter(0), sharding={devices=[1,4]0,2,1,3}
slice = s32[512,2048]{1,0} slice(param), slice={[0:512], [0:2048]}, sharding={devices=[1,4]0,2,1,3}
ROOT copy = s32[512,2048]{1,0} copy(slice)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.solve_nd_sharding_iteratively = true,
.device_mesh_shape = {2, 2},
.device_mesh_ids = {0, 2, 1, 3},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
std::vector<HloInstruction*> instructions =
module->entry_computation()->MakeInstructionPostOrder();
EXPECT_THAT(instructions,
Not(Contains(ResultOf(
[](const HloInstruction* ins) { return ins->opcode(); },
Eq(HloOpcode::kReshape)))));
}
TEST_F(AutoShardingTest, UserShardingTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
concatenate.76306 = bf16[1,4096,8,256]{3,2,1,0} parameter(0)
constant.15158 = bf16[] constant(0)
pad.70 = bf16[1,4352,8,256]{3,2,1,0} pad(concatenate.76306, constant.15158), padding=0_0x0_256x0_0x0_0, sharding={devices=[1,1,128,1]<=[128]}
ROOT copy.45 = bf16[1,4352,8,256]{3,2,1,0} copy(pad.70)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.device_mesh_shape = {128, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0}})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
}
TEST_F(AutoShardingTest, AllowShardingsSmallDimsAcrossManyDevicesTest) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %elementwise {
parameter.1 = bf16[8,1024]{1,0} parameter(0), sharding={devices=[16,16]<=[256]}
add.1 = bf16[8,1024]{1,0} add(parameter.1, parameter.1)
ROOT copy.45 = bf16[8,1024]{1,0} copy(add.1)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
bool changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.device_mesh_shape = {128, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = true})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* add1 = FindInstruction(module.get(), "add.1");
EXPECT_THAT(add1, op::Sharding("{devices=[16,16]<=[256]}"));
TF_ASSERT_OK_AND_ASSIGN(module, ParseAndReturnVerifiedModule(kHloString));
TF_ASSERT_OK_AND_ASSIGN(
changed,
AutoSharding(
AutoShardingOption{
.enable = true,
.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kKeepAllShardings,
.device_mesh_shape = {128, 1},
.device_mesh_alpha = {1.0, 1.0},
.device_mesh_beta = {0.01, 1.0},
.allow_shardings_small_dims_across_many_devices = false})
.Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
add1 = FindInstruction(module.get(), "add.1");
EXPECT_THAT(add1, Not(op::Sharding("{devices=[16,16]<=[256]}")));
}
TEST_F(AutoShardingTest, RngBitGeneratorArrayInput) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
ENTRY %RngBitGenerator (p0: u64[2]) -> (u64[2], u32[16,16]) {
%p0 = u64[2]{0} parameter(0)
ROOT %rand = (u64[2]{0}, u32[16,16]{1,0}) rng-bit-generator(u64[2]{0} %p0), algorithm=rng_three_fry
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
auto* instruction = FindInstruction(module.get(), "p0");
ASSERT_NE(instruction, nullptr);
EXPECT_THAT(instruction, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, SPMDShardToFullShapeWithConstantTest) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
add.6.clone {
y.13 = bf16[]{:T(256)} parameter(1)
x.13 = bf16[]{:T(256)} parameter(0)
ROOT add.9011 = bf16[]{:T(256)} add(x.13, y.13)
}
ENTRY main {
input.1 = bf16[512,512]{1,0} parameter(0)
constant.1 = bf16[] constant(16.7)
broadcast.1 = bf16[128,128]{1,0} broadcast(constant.1), dimensions={}
broadcast.2 = bf16[512,512]{1,0} broadcast(constant.1), dimensions={}
custom-call.1 = bf16[512,512]{1,0} custom-call(input.1), custom_call_target="Sharding", sharding={devices=[4,4]<=[16]}
custom-call.2 = bf16[128,128]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
all-reduce.1 = bf16[128,128]{1,0} all-reduce(custom-call.2), channel_id=621, replica_groups={{0,1,2,3},{4,5,6,7},{8,9,10,11},{12,13,14,15}}, use_global_device_ids=true, to_apply=add.6.clone, frontend_attributes={from-cross-replica-sharding="true"}, backend_config={"flag_configs":[],"barrier_config":{"barrier_type":"CUSTOM","id":"9"},"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
add.1 = bf16[128,128]{1,0} add(bf16[128,128]{1,0} all-reduce.1, bf16[128,128]{1,0} broadcast.1)
custom-call.3 = bf16[512,512]{1,0} custom-call(add.1), custom_call_target="SPMDShardToFullShape", sharding={devices=[4,1,4]<=[16]last_tile_dim_replicate}
add.2 = bf16[512,512]{1,0} add(bf16[512,512]{1,0} custom-call.3, bf16[512,512]{1,0} broadcast.2)
ROOT copy.1 = bf16[512,512]{1,0} copy(add.2)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
option.enable = true;
option.device_mesh_shape = {4, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
const HloInstruction* custom_call2 =
FindInstruction(module.get(), "custom-call.2");
ASSERT_NE(custom_call2, nullptr);
EXPECT_THAT(custom_call2, op::Sharding("{manual}"));
const HloInstruction* custom_call3 =
FindInstruction(module.get(), "custom-call.3");
ASSERT_NE(custom_call3, nullptr);
EXPECT_THAT(custom_call3,
op::Sharding("{devices=[4,1,4]<=[16]last_tile_dim_replicate}"));
const HloInstruction* custom_call1 = custom_call2->operand(0);
ASSERT_NE(custom_call1, nullptr);
EXPECT_THAT(custom_call1, op::Sharding("{devices=[4,4]<=[16]}"));
std::vector<const HloInstruction*> instructions(
module->entry_computation()->instructions().begin(),
module->entry_computation()->instructions().end());
EXPECT_THAT(
module->entry_computation()->instructions(),
Contains(ResultOf(
"opcode",
[](const HloInstruction* ins) { return ins->opcode(); },
Eq(HloOpcode::kConstant)))
.Times(2));
}
TEST_F(AutoShardingTest, SPMDShardToFullShapeMultipleValidMeshShapeTest) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
add.6.clone {
y.13 = bf16[]{:T(256)} parameter(1)
x.13 = bf16[]{:T(256)} parameter(0)
ROOT add.9011 = bf16[]{:T(256)} add(x.13, y.13)
}
ENTRY main {
input.1 = bf16[512,512]{1,0} parameter(0)
custom-call.1 = bf16[512,512]{1,0} custom-call(input.1), custom_call_target="Sharding", sharding={devices=[4,4]<=[16]}
custom-call.2 = bf16[128,128]{1,0} custom-call(custom-call.1), custom_call_target="SPMDFullToShardShape", sharding={manual}
all-reduce.1 = bf16[128,128]{1,0} all-reduce(custom-call.2), channel_id=621, replica_groups={{0,1,2,3},{4,5,6,7},{8,9,10,11},{12,13,14,15}}, use_global_device_ids=true, to_apply=add.6.clone, frontend_attributes={from-cross-replica-sharding="true"}, backend_config={"flag_configs":[],"barrier_config":{"barrier_type":"CUSTOM","id":"9"},"scoped_memory_configs":[],"compute_type":"COMPUTE_TYPE_DEFAULT","device_type":"DEVICE_TYPE_INVALID","used_scoped_memory_configs":[]}
reshape.1 = bf16[64,2,128]{2,1,0} reshape(bf16[128,128]{1,0} all-reduce.1)
reshape.2 = bf16[64,256]{1,0} reshape(bf16[64,2,128]{2,1,0} reshape.1)
custom-call.3 = bf16[512,512]{1,0} custom-call(reshape.2), custom_call_target="SPMDShardToFullShape", sharding={devices=[8,2]<=[16]}
ROOT copy.1 = copy(custom-call.3)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.preserve_shardings =
AutoShardingOption::PreserveShardingsType::kRemoveAllShardings;
option.enable = true;
option.try_multiple_mesh_shapes = false;
option.device_mesh_shape = {4, 4};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {1.0, 1.0};
EXPECT_DEATH(auto status = AutoSharding(option).Run(module.get()),
"Auto-sharding cannot infer a single appropriate mesh shape for "
"this HLO, and AutoShardingption::try_multiple_mesh_shapes is "
"set to false. Please re-run with the option set to true.");
}
TEST_F(AutoShardingTest, RngBitGeneratorTupleInput) {
constexpr absl::string_view kHloString = R"(
HloModule rng_bit_generator
ENTRY %RngBitGenerator {
param.0 = u32[2]{0:T(128)} parameter(0)
param.1 = u32[2]{0:T(128)} parameter(1)
tuple.3 = (u32[2]{0:T(128)}, u32[2]{0:T(128)}) tuple(param.0, param.1)
ROOT rng-bit-generator = u32[100,100]{1,0:T(8,128)} rng-bit-generator(tuple.3), algorithm=rng_default
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(10) << module->ToString();
EXPECT_TRUE(changed);
auto* param0 = FindInstruction(module.get(), "param.0");
auto* param1 = FindInstruction(module.get(), "param.1");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param0, nullptr);
EXPECT_THAT(param0, op::Sharding("{replicated}"));
EXPECT_THAT(param1, op::Sharding("{replicated}"));
}
TEST_F(AutoShardingTest, DotLHSTwoNonContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4,256,64]{2,1,0} parameter(0)
%param1 = f32[64,32]{0,1} parameter(1)
%dot = f32[4,256,32]{2,1,0} dot(f32[4,256,64]{2,1,0} %param0, f32[64,32]{0,1} %param1), lhs_contracting_dims={2}, rhs_contracting_dims={0}
ROOT %copy = f32[4,256,32]{2,1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
auto* param0 = FindInstruction(module.get(), "param0");
auto* param1 = FindInstruction(module.get(), "param1");
auto* dot = FindInstruction(module.get(), "dot");
ASSERT_NE(param0, nullptr);
ASSERT_NE(param1, nullptr);
ASSERT_NE(dot, nullptr);
EXPECT_THAT(
std::make_tuple(param0, param1, dot),
AnyOf(
FieldsAre(
op::Sharding(
"{devices=[1,2,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3}")),
FieldsAre(
op::Sharding(
"{devices=[1,2,1,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3}")),
FieldsAre(
op::Sharding(
"{devices=[2,1,1,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,1,2,3}")),
FieldsAre(
op::Sharding(
"{devices=[2,1,1,2]0,2,1,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[1,2,2]0,1,2,3 last_tile_dim_replicate}"),
op::Sharding("{devices=[2,1,2]0,2,1,3}"))));
}
TEST_F(AutoShardingTest, DotRHSTwoNonContractingDims) {
constexpr absl::string_view kHloString = R"(
HloModule module
ENTRY %entry {
%param0 = f32[4,256,32]{2,1,0} parameter(0)
%param1 = f32[4,256,4,8]{1,3,2,0} parameter(1)
%dot = f32[32,4,8]{2,1,0} dot(f32[4,256,32]{2,1,0} %param0, f32[4,256,4,8]{1,3,2,0} %param1), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}
ROOT %copy = f32[32,4,8]{2,1,0} copy(%dot)
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(kHloString));
AutoShardingOption option;
option.enable = true;
option.device_mesh_shape = {2, 2};
option.device_mesh_ids = {0, 1, 2, 3};
option.device_mesh_alpha = {1.0, 1.0};
option.device_mesh_beta = {0.01, 1.0};
option.allow_mixed_mesh_shape = false;
TF_ASSERT_OK_AND_ASSIGN(bool changed, AutoSharding(option).Run(module.get()));
VLOG(2) << module->ToString();
EXPECT_TRUE(changed);
auto* param0 = FindInstruction(module.get(), "param0");
auto* param1 = FindInstruction(module.get(), "param1");
auto* dot = FindInst | 2,169 |
#ifndef XLA_HLO_EXPERIMENTAL_AUTO_SHARDING_AUTO_SHARDING_MEMORY_H_
#define XLA_HLO_EXPERIMENTAL_AUTO_SHARDING_AUTO_SHARDING_MEMORY_H_
#include <cstdint>
#include <functional>
#include <limits>
#include <utility>
#include <vector>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace spmd {
class MemoryTermReducer {
public:
std::pair<int64_t, int64_t> Reduce(
int64_t num_lives, int64_t num_primitives,
const std::function<
tsl::protobuf::RepeatedField<int64_t>(int64_t)>&
live,
int64_t max_iterations = std::numeric_limits<int64_t>::max());
std::pair<int64_t, int64_t> Reduce(
int64_t num_lives, int64_t num_primitives,
const std::function<std::pair<int64_t, int64_t>(int64_t)>& intervals,
int64_t max_iterations = std::numeric_limits<int64_t>::max());
const std::vector<std::vector<int64_t>>& GetReducedLive() const;
const std::vector<std::pair<int64_t, int64_t>>& GetReducedIntervals() const;
const std::vector<absl::btree_set<int64_t>>& GetReducedGroups() const;
absl::flat_hash_set<int64_t> GetReducedTimes(int64_t num_primitives);
static absl::flat_hash_set<int64_t> GetReducedTimes(
int64_t num_primitives,
const std::vector<std::pair<int64_t, int64_t>>& reduced_intervals,
const std::vector<absl::btree_set<int64_t>>& reduced_groups);
private:
void Reduce(int64_t num_lives, int64_t num_primitives,
int64_t max_iterations);
std::vector<std::vector<int64_t>> reduced_live_;
std::vector<std::pair<int64_t, int64_t>> reduced_intervals_;
std::vector<absl::btree_set<int64_t>> reduced_groups_;
};
}
}
#endif
#include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include <algorithm>
#include <cstdint>
#include <functional>
#include <limits>
#include <optional>
#include <utility>
#include <vector>
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace spmd {
namespace {
using PrimIdx = int64_t;
using LiveIdx = int64_t;
using GroupIdx = int64_t;
using PrimPair = std::pair<PrimIdx, PrimIdx>;
using Interval = std::pair<LiveIdx, LiveIdx>;
using ActivePrim = std::pair<Interval, PrimIdx>;
bool IsValid(const Interval& interval) {
return interval.first <= interval.second;
}
int64_t length(const Interval& interval) {
return interval.second - interval.first + 1;
}
}
std::pair<int64_t, int64_t> MemoryTermReducer::Reduce(
int64_t num_lives, int64_t num_primitives,
const std::function<
tsl::protobuf::RepeatedField<int64_t>(int64_t)>&
live,
int64_t max_iterations) {
LOG(INFO) << "Memory Term Reducer beginning to reduce number of terms ...";
reduced_live_.clear();
reduced_intervals_.clear();
reduced_groups_.clear();
int64_t num_terms = 0;
reduced_intervals_.reserve(num_primitives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
reduced_intervals_.push_back({std::numeric_limits<LiveIdx>::max(), 0});
}
for (LiveIdx live_idx = 0; live_idx < num_lives; ++live_idx) {
for (const PrimIdx prim_idx : live(live_idx)) {
Interval& interval = reduced_intervals_[prim_idx];
interval.first = std::min(interval.first, live_idx);
interval.second = std::max(interval.second, live_idx);
++num_terms;
}
}
Reduce(num_lives, num_primitives, max_iterations);
int64_t num_reduced_terms = 0;
reduced_live_.resize(num_lives);
for (PrimIdx prim_idx = 0; prim_idx < reduced_intervals_.size(); ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
for (LiveIdx live_idx = interval.first; live_idx <= interval.second;
++live_idx) {
reduced_live_[live_idx].push_back(prim_idx);
++num_reduced_terms;
}
}
for (const auto& group : reduced_groups_) num_reduced_terms += group.size();
LOG(INFO) << "Memory Term Reducer finished reducing the number of terms.";
return {num_terms, num_reduced_terms};
}
std::pair<int64_t, int64_t> MemoryTermReducer::Reduce(
int64_t num_lives, int64_t num_primitives,
const std::function<std::pair<int64_t, int64_t>(int64_t)>& intervals,
int64_t max_iterations) {
LOG(INFO) << "Memory Term Reducer beginning to reduce number of terms ...";
reduced_live_.clear();
reduced_intervals_.clear();
reduced_groups_.clear();
int64_t num_terms = 0;
reduced_intervals_.reserve(num_primitives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
reduced_intervals_.push_back(intervals(prim_idx));
const Interval& interval = reduced_intervals_.back();
if (IsValid(interval)) num_terms += length(interval);
}
Reduce(num_lives, num_primitives, max_iterations);
int64_t num_reduced_terms = 0;
for (PrimIdx prim_idx = 0; prim_idx < reduced_intervals_.size(); ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
if (IsValid(interval)) num_reduced_terms += length(interval);
}
for (const auto& group : reduced_groups_) num_reduced_terms += group.size();
LOG(INFO) << "Memory Term Reducer finished reducing the number of terms.";
return {num_terms, num_reduced_terms};
}
void MemoryTermReducer::Reduce(int64_t num_lives, int64_t num_primitives,
int64_t max_iterations) {
std::vector<absl::btree_set<PrimIdx>> enter(num_lives), evict(num_lives);
for (PrimIdx prim_idx = 0; prim_idx < num_primitives; ++prim_idx) {
const Interval& interval = reduced_intervals_[prim_idx];
if (!IsValid(interval)) continue;
enter[interval.first].insert(prim_idx);
evict[interval.second].insert(prim_idx);
}
auto Splits = [this](PrimIdx large_idx, PrimIdx small_idx) -> bool {
const Interval& large = reduced_intervals_[large_idx];
const Interval& small = reduced_intervals_[small_idx];
return large.first < small.first && large.second > small.second;
};
auto CalcOverlap = [this, Splits](
int64_t prim0_idx,
int64_t prim1_idx) -> std::optional<Interval> {
if (prim0_idx == prim1_idx) return std::nullopt;
const Interval& interval0 = reduced_intervals_[prim0_idx];
const Interval& interval1 = reduced_intervals_[prim1_idx];
if (!IsValid(interval0) || !IsValid(interval1)) return std::nullopt;
if (Splits(prim0_idx, prim1_idx)) return std::nullopt;
if (Splits(prim1_idx, prim0_idx)) return std::nullopt;
return Interval(std::max(interval0.first, interval1.first),
std::min(interval0.second, interval1.second));
};
auto MergeIntoGroup = [num_primitives, this](
PrimIdx prim_idx,
absl::btree_set<PrimIdx>& reduced_group) {
if (prim_idx < num_primitives) {
reduced_group.insert(prim_idx);
} else {
const auto& group = reduced_groups_[prim_idx - num_primitives];
reduced_group.insert(group.begin(), group.end());
}
};
auto CalcNumTerms = [num_primitives, this](
PrimIdx prim_idx,
std::optional<Interval> overlap = std::nullopt) {
int64_t num_terms = length(reduced_intervals_[prim_idx]);
if (overlap) num_terms -= length(*overlap);
if (prim_idx >= num_primitives && num_terms > 0) {
num_terms += reduced_groups_[prim_idx - num_primitives].size();
}
return num_terms;
};
auto UpdatePrimitive = [this, &enter, &evict](
PrimIdx prim_idx,
const Interval& overlap) mutable {
Interval& interval = reduced_intervals_[prim_idx];
enter[interval.first].erase(prim_idx);
evict[interval.second].erase(prim_idx);
if (auto& t = interval.first; t == overlap.first) t = overlap.second + 1;
if (auto& t = interval.second; t == overlap.second) t = overlap.first - 1;
if (!IsValid(interval)) return;
enter[interval.first].insert(prim_idx);
evict[interval.second].insert(prim_idx);
};
auto SweepAndMerge = [&num_lives, &enter, &evict, &CalcOverlap, &CalcNumTerms,
&MergeIntoGroup, &UpdatePrimitive, this]() -> bool {
absl::btree_set<ActivePrim> actives;
absl::btree_multimap<int64_t, PrimPair> overlaps;
for (LiveIdx live_idx = 0; live_idx < num_lives; ++live_idx) {
for (const PrimIdx prim_idx : enter[live_idx]) {
actives.insert({reduced_intervals_[prim_idx], prim_idx});
}
for (const PrimIdx prim_idx : evict[live_idx]) {
auto active = actives.find({reduced_intervals_[prim_idx], prim_idx});
if (++active == actives.end()) continue;
std::optional<Interval> overlap = CalcOverlap(prim_idx, active->second);
if (!overlap) continue;
overlaps.insert({-length(*overlap), {prim_idx, active->second}});
}
for (const PrimIdx prim_idx : evict[live_idx]) {
actives.erase({reduced_intervals_[prim_idx], prim_idx});
}
}
bool changed = false;
for (const auto& [_, prim_pair] : overlaps) {
const PrimIdx prim0_idx = prim_pair.first, prim1_idx = prim_pair.second;
const std::optional<Interval> overlap = CalcOverlap(prim0_idx, prim1_idx);
if (!overlap) continue;
absl::btree_set<PrimIdx> reduced_group;
MergeIntoGroup(prim0_idx, reduced_group);
MergeIntoGroup(prim1_idx, reduced_group);
if (CalcNumTerms(prim0_idx) + CalcNumTerms(prim1_idx) <=
CalcNumTerms(prim0_idx, overlap) + CalcNumTerms(prim1_idx, overlap) +
length(*overlap) + reduced_group.size()) {
continue;
}
enter[overlap->first].insert(reduced_intervals_.size());
evict[overlap->second].insert(reduced_intervals_.size());
reduced_intervals_.push_back({overlap->first, overlap->second});
reduced_groups_.push_back(reduced_group);
UpdatePrimitive(prim0_idx, *overlap);
UpdatePrimitive(prim1_idx, *overlap);
changed = true;
}
return changed;
};
for (int64_t iteration = 0; iteration < max_iterations; ++iteration) {
if (!SweepAndMerge()) break;
}
for (GroupIdx group_idx = reduced_groups_.size() - 1; group_idx >= 0;
--group_idx) {
if (IsValid(reduced_intervals_[num_primitives + group_idx])) continue;
reduced_intervals_.erase(reduced_intervals_.begin() + num_primitives +
group_idx);
reduced_groups_.erase(reduced_groups_.begin() + group_idx);
}
}
const std::vector<std::vector<int64_t>>& MemoryTermReducer::GetReducedLive()
const {
return reduced_live_;
}
const std::vector<std::pair<int64_t, int64_t>>&
MemoryTermReducer::GetReducedIntervals() const {
return reduced_intervals_;
}
const std::vector<absl::btree_set<int64_t>>&
MemoryTermReducer::GetReducedGroups() const {
return reduced_groups_;
}
absl::flat_hash_set<int64_t> MemoryTermReducer::GetReducedTimes(
int64_t num_primitives) {
return GetReducedTimes(num_primitives, reduced_intervals_, reduced_groups_);
}
absl::flat_hash_set<int64_t> MemoryTermReducer::GetReducedTimes(
int64_t num_primitives,
const std::vector<std::pair<int64_t, int64_t>>& reduced_intervals,
const std::vector<absl::btree_set<int64_t>>& reduced_groups) {
std::vector<std::pair<int64_t, int64_t>> intervals;
for (int64_t reduced_interval_idx = 0;
reduced_interval_idx < reduced_intervals.size();
++reduced_interval_idx) {
const Interval& reduced_interval = reduced_intervals[reduced_interval_idx];
if (reduced_interval_idx < num_primitives) {
intervals.push_back(reduced_interval);
continue;
}
const GroupIdx group_idx = reduced_interval_idx - num_primitives;
for (const PrimIdx prim_idx : reduced_groups[group_idx]) {
Interval& interval = intervals[prim_idx];
if (!IsValid(interval)) {
interval.first = reduced_interval.first;
interval.second = reduced_interval.second;
continue;
}
interval.first = std::min(interval.first, reduced_interval.first);
interval.second = std::max(interval.second, reduced_interval.second);
}
}
absl::btree_set<std::pair<int64_t, bool>> times;
for (const Interval& interval : intervals) {
if (!IsValid(interval)) continue;
times.insert({interval.first, false});
times.insert({interval.second, true});
}
int64_t last_entering_time = -1;
absl::flat_hash_set<int64_t> reduced_times;
for (const auto& time : times) {
if ( time.second) {
reduced_times.insert(last_entering_time);
} else {
last_entering_time = time.first;
}
}
reduced_times.insert(last_entering_time);
return reduced_times;
}
}
} | #include "xla/hlo/experimental/auto_sharding/auto_sharding_memory.h"
#include <cstdint>
#include <functional>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
namespace xla {
namespace spmd {
namespace {
std::function<tsl::protobuf::RepeatedField<int64_t>(int64_t)>
Convert(const std::vector<std::vector<int64_t>>& live) {
return [live](int64_t live_idx) {
return ::proto2::RepeatedField<int64_t>(live[live_idx].begin(),
live[live_idx].end());
};
}
std::function<std::pair<int64_t, int64_t>(int64_t)> Convert(
const std::vector<std::pair<int64_t, int64_t>>& intervals) {
return [intervals](int64_t prim_idx) { return intervals[prim_idx]; };
}
TEST(AutoShardingMemoryTest, WithoutOverlap) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0 },
{0 },
{ 1},
{ 1},
{ 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{0 },
{0 },
{ 1},
{ 1},
{ 1}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 2}, {3, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {6, 6};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, PartialOverlap) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{ 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 2},
{ 2},
{ 2},
{ 2},
{ 1 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {5, 5}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, PartialOverlapReversed) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{ 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 1 },
{ 2},
{ 2},
{ 2},
{ 2},
{0 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, 5}, {0, 0}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, DoesNotSplitPrimitive) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 5}, {1, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {10, 10};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OnePrimitiveVanishes) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 2},
{ 2},
{ 2},
{ 2},
{ 2}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {6, 0}, {1, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {11, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, BothPrimitivesVanish) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{2},
{2},
{2},
{2},
{2},
{2}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, -1}, {6, -1}, {0, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {12, 8};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OneGroupingPreventsAnother) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2},
{ 1, 2},
{ 1, 2},
{ 1, 2},
{ 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 3},
{ 3},
{ 3},
{ 3},
{ 2, 3},
{1, 2 },
{1, 2 },
{1, 2 },
{ 2 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, -1}, {5, 7}, {4, 8}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {18, 15};
const absl::flat_hash_set<int64_t> expected_reduced_times = {4};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, TwoGroups) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 2},
{0, 2},
{0, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{3},
{3},
{3},
{4},
{4},
{4}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, 2}, {3, -1}, {6, 2}, {0, 2}, {3, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {0, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {12, 10};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, TwoGroupsMutuallyExclusive) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1 },
{0, 1 },
{0, 1 },
{ 2, 3},
{ 2, 3},
{ 2, 3},
{ 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 4},
{ 4},
{ 4},
{ 5},
{ 5},
{ 5},
{ 3 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {4, 0}, {7, 3}, {7, 7}, {1, 3}, {4, 6}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {14, 12};
const absl::flat_hash_set<int64_t> expected_reduced_times = {1, 4};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergingPrimitivesWouldNotReduceTerms) {
const int num_primitives = 2;
const std::vector<std::vector<int64_t>> live =
{{0, 1},
{0, 1}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0, 1},
{0, 1}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 1}, {0, 1}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {4, 4};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, AllPrimitivesVanish) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2},
{0, 1, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{3},
{3},
{3},
{3},
{3},
{3}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{6, -1}, {6, -1}, {6, -1}, {0, 5}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {18, 9};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergingGroupsWouldNotReduceTerms) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{ 2, 3},
{ 2, 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{4 },
{4 },
{4 },
{4, 5},
{4, 5},
{4, 5},
{4, 5},
{ 5},
{ 5}};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{7, -1}, {7, -1}, {9, 2}, {9, 2}, {0, 6}, {3, 8}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {26, 17};
const absl::flat_hash_set<int64_t> expected_reduced_times = {3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, ExampleFromDocumentation) {
const int num_primitives = 4;
const std::vector<std::vector<int64_t>> live =
{{0 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1 },
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{0, 1, 2, 3},
{ 2, 3},
{ 2, 3},
{ 2, 3},
{ 3}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{0 },
{ 4},
{ 4},
{ 4},
{ 4},
{ 6},
{ 6},
{ 6},
{ 6},
{ 6},
{ 5},
{ 5},
{ 5},
{ 3 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 4}, {10, 12}, {5, 9}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}, {0, 1, 2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 22};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, MergesWithRightmost) {
const int num_primitives = 3;
const std::vector<std::vector<int64_t>> live =
{{0, 2},
{0, 2},
{0, 2},
{ 1, 2}};
MemoryTermReducer reducer;
const auto num_terms =
reducer.Reduce(live.size(), num_primitives, Convert(live));
const std::vector<std::vector<int64_t>> expected_reduced_live =
{{ 3},
{ 3},
{ 3},
{1, 2 }};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{3, -1}, {3, 3}, {3, 3}, {0, 2}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 2}};
const std::pair<int64_t, int64_t> expected_num_terms = {8, 7};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0, 3};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, ExampleFromDocumentationUsingIntervals) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 9}, {1, 9}, {5, 12}, {5, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals));
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 4}, {10, 12}, {5, 9}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}, {0, 1, 2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 22};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, InvalidIntervals) {
const int num_primitives = 3;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 4}, {9223372036854775807, 0}, {9223372036854775807, 0}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(5, num_primitives,
Convert(intervals));
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 4}, {9223372036854775807, 0}, {9223372036854775807, 0}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups = {};
const std::pair<int64_t, int64_t> expected_num_terms = {5, 5};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, OneIterationOnly) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 9}, {1, 9}, {5, 12}, {5, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 0}, {10, 0}, {13, 4}, {13, 13}, {1, 9}, {5, 12}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {36, 23};
const absl::flat_hash_set<int64_t> expected_reduced_times = {5};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsBottomLeft) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 13}, {0, 10}, {0, 7}, {0, 4}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{11, 13}, {11, -1}, {5, 7}, {5, -1}, {0, 10}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsTopLeft) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 4}, {0, 7}, {0, 10}, {0, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{5, -1}, {5, 7}, {11, -1}, {11, 13}, {0, 10}, {0, 4}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{2, 3}, {0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {0};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsTopRight) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{9, 13}, {6, 13}, {3, 13}, {0, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{14, 8}, {6, 8}, {14, 2}, {0, 2}, {3, 13}, {9, 13}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{2, 3}, {0, 1}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {9};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
TEST(AutoShardingMemoryTest, StairsBottomRight) {
const int num_primitives = 4;
const std::vector<std::pair<int64_t, int64_t>> intervals =
{{0, 13}, {3, 13}, {6, 13}, {9, 13}};
MemoryTermReducer reducer;
const auto num_terms = reducer.Reduce(14, num_primitives,
Convert(intervals),
1);
const std::vector<std::vector<int64_t>> expected_reduced_live = {};
const std::vector<std::pair<int64_t, int64_t>> expected_reduced_intervals =
{{0, 2}, {14, 2}, {6, 8}, {14, 8}, {3, 13}, {9, 13}};
const std::vector<absl::btree_set<int64_t>> expected_reduced_groups =
{{0, 1}, {2, 3}};
const std::pair<int64_t, int64_t> expected_num_terms = {38, 26};
const absl::flat_hash_set<int64_t> expected_reduced_times = {9};
EXPECT_EQ(num_terms, expected_num_terms);
EXPECT_EQ(reducer.GetReducedLive(), expected_reduced_live);
EXPECT_EQ(reducer.GetReducedIntervals(), expected_reduced_intervals);
EXPECT_EQ(reducer.GetReducedGroups(), expected_reduced_groups);
EXPECT_EQ(reducer.GetReducedTimes(num_primitives), expected_reduced_times);
}
}
}
} | 2,170 |
#ifndef XLA_HLO_IR_HLO_SCHEDULE_H_
#define XLA_HLO_IR_HLO_SCHEDULE_H_
#include <algorithm>
#include <ostream>
#include <string>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
class HloModule;
class HloInstructionSequence {
public:
HloInstructionSequence() = default;
explicit HloInstructionSequence(
absl::Span<HloInstruction* const> instructions) {
for (HloInstruction* instruction : instructions) {
push_back(instruction);
}
}
void push_back(HloInstruction* instruction) {
instruction_sequence_.push_back(instruction);
id_sequence_.push_back(instruction->unique_id());
}
void remove_instruction(HloInstruction* instruction) {
auto instruction_it = std::find(instruction_sequence_.begin(),
instruction_sequence_.end(), instruction);
if (instruction_it != instruction_sequence_.end()) {
auto id_it = std::find(id_sequence_.begin(), id_sequence_.end(),
instruction->unique_id());
instruction_sequence_.erase(instruction_it);
id_sequence_.erase(id_it);
}
}
void replace_instruction(HloInstruction* old_instruction,
HloInstruction* new_instruction) {
auto instruction_it =
std::find(instruction_sequence_.begin(), instruction_sequence_.end(),
old_instruction);
auto id_it = std::find(id_sequence_.begin(), id_sequence_.end(),
old_instruction->unique_id());
CHECK(instruction_it != instruction_sequence_.end())
<< "Do not find instruction id " << old_instruction->unique_id();
CHECK(id_it != id_sequence_.end());
*instruction_it = new_instruction;
*id_it = new_instruction->unique_id();
}
void insert_instruction(HloInstruction* instruction, int64_t index) {
CHECK(0 <= index && index < size()) << "Index out of bounds";
instruction_sequence_.insert(instruction_sequence_.begin() + index,
instruction);
id_sequence_.insert(id_sequence_.begin() + index, instruction->unique_id());
}
void clear() {
instruction_sequence_.clear();
id_sequence_.clear();
}
int64_t size() const { return instruction_sequence_.size(); }
const std::vector<HloInstruction*>& instructions() const {
return instruction_sequence_;
}
const std::vector<int>& ids() const { return id_sequence_; }
private:
std::vector<HloInstruction*> instruction_sequence_;
std::vector<int> id_sequence_;
};
class HloSchedule {
public:
explicit HloSchedule(const HloModule* module) : module_(module) {}
static absl::StatusOr<HloSchedule> CreateFromProto(
const HloModule* module, const HloScheduleProto& proto);
absl::StatusOr<HloScheduleProto> ToProto() const;
const HloInstructionSequence& sequence(
const HloComputation* computation) const;
HloInstructionSequence& GetOrCreateSequence(
const HloComputation* computation);
void set_sequence(const HloComputation* computation,
absl::Span<HloInstruction* const> sequence);
void set_sequence(const HloComputation* computation,
HloInstructionSequence sequence);
const absl::flat_hash_map<int64_t, HloInstructionSequence>& sequences()
const {
return sequences_;
}
absl::flat_hash_map<std::string, int64_t> num_sequences_by_execution_thread()
const;
bool is_computation_scheduled(const HloComputation* computation) const {
return sequences_.contains(computation->unique_id());
}
void remove_computation(const HloComputation* computation) {
auto it = sequences_.find(computation->unique_id());
if (it == sequences_.end()) return;
sequences_.erase(it);
execution_threads_.erase(computation->unique_id());
}
void remove_instruction(const HloComputation* computation,
HloInstruction* instruction) {
sequences_[computation->unique_id()].remove_instruction(instruction);
}
void replace_instruction(const HloComputation* computation,
HloInstruction* old_instruction,
HloInstruction* new_instruction) {
sequences_[computation->unique_id()].replace_instruction(old_instruction,
new_instruction);
}
absl::Status Update(
const absl::flat_hash_set<absl::string_view>& execution_threads = {});
absl::Status Verify() const;
std::string ToString() const;
bool empty() const { return sequences_.empty(); }
const HloModule* module() const { return module_; }
private:
absl::Status UpdateComputationSchedule(const HloComputation* computation);
const HloModule* module_;
absl::flat_hash_map<int64_t, HloInstructionSequence> sequences_;
absl::flat_hash_map<int64_t, std::string> execution_threads_;
};
std::ostream& operator<<(std::ostream& out, const HloSchedule& schedule);
}
#endif
#include "xla/hlo/ir/hlo_schedule.h"
#include <cstdint>
#include <ostream>
#include <queue>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/map_util.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/lib/gtl/map_util.h"
namespace xla {
absl::StatusOr<HloSchedule> HloSchedule::CreateFromProto(
const HloModule* module, const HloScheduleProto& proto) {
absl::flat_hash_map<int64_t, const HloComputation*> id_to_computation;
for (const HloComputation* computation : module->computations()) {
id_to_computation[computation->unique_id()] = computation;
}
HloSchedule schedule(module);
for (const auto& id_sequence : proto.sequences()) {
int64_t computation_id = id_sequence.first;
auto comp_it = id_to_computation.find(computation_id);
if (comp_it == id_to_computation.end()) {
continue;
}
const HloComputation* computation = comp_it->second;
absl::flat_hash_map<int64_t, HloInstruction*> id_to_instruction;
for (HloInstruction* instruction : computation->instructions()) {
id_to_instruction[instruction->unique_id()] = instruction;
}
HloInstructionSequence& sequence =
schedule.GetOrCreateSequence(computation);
for (const int64_t instruction_id : id_sequence.second.instruction_ids()) {
auto instr_it = id_to_instruction.find(instruction_id);
TF_RET_CHECK(instr_it != id_to_instruction.end())
<< "No instruction exists in HLO computation " << computation->name()
<< " with id " << instruction_id;
sequence.push_back(instr_it->second);
}
}
TF_RETURN_IF_ERROR(schedule.Verify());
return std::move(schedule);
}
absl::StatusOr<HloScheduleProto> HloSchedule::ToProto() const {
TF_RETURN_IF_ERROR(Verify());
HloScheduleProto proto;
for (const auto& id_sequence : sequences_) {
int64_t computation_id = id_sequence.first;
const HloInstructionSequence& sequence = id_sequence.second;
HloScheduleProto::InstructionSequence& proto_sequence =
(*proto.mutable_sequences())[computation_id];
proto_sequence.mutable_instruction_ids()->Reserve(sequence.size());
for (const int64_t id : sequence.ids()) {
proto_sequence.add_instruction_ids(id);
}
}
return std::move(proto);
}
void HloSchedule::set_sequence(const HloComputation* computation,
absl::Span<HloInstruction* const> sequence) {
set_sequence(computation, HloInstructionSequence(sequence));
}
void HloSchedule::set_sequence(const HloComputation* computation,
HloInstructionSequence sequence) {
CHECK(computation->parent() == module_);
sequences_[computation->unique_id()] = std::move(sequence);
execution_threads_[computation->unique_id()] =
std::string(computation->execution_thread());
}
HloInstructionSequence& HloSchedule::GetOrCreateSequence(
const HloComputation* computation) {
auto it = sequences_.find(computation->unique_id());
if (it == sequences_.end()) {
CHECK(computation->parent() == module_);
execution_threads_[computation->unique_id()] =
std::string(computation->execution_thread());
return sequences_[computation->unique_id()];
} else {
return it->second;
}
}
const HloInstructionSequence& HloSchedule::sequence(
const HloComputation* computation) const {
return sequences_.at(computation->unique_id());
}
absl::Status HloSchedule::UpdateComputationSchedule(
const HloComputation* computation) {
absl::flat_hash_map<int, HloInstruction*> id_to_instruction;
for (HloInstruction* instruction : computation->instructions()) {
InsertOrDie(&id_to_instruction, instruction->unique_id(), instruction);
}
absl::flat_hash_set<int> ids_in_schedule;
for (int id : sequences_.at(computation->unique_id()).ids()) {
InsertOrDie(&ids_in_schedule, id);
}
absl::flat_hash_map<const HloInstruction*, std::vector<HloInstruction*>>
new_instruction_uses;
absl::flat_hash_map<const HloInstruction*, int> unscheduled_operand_count;
std::queue<HloInstruction*> worklist;
for (HloInstruction* instruction : computation->instructions()) {
if (!ids_in_schedule.contains(instruction->unique_id())) {
if (instruction->operands().empty()) {
worklist.push(instruction);
} else {
for (const HloInstruction* operand : instruction->operands()) {
new_instruction_uses[operand].push_back(instruction);
}
unscheduled_operand_count[instruction] = instruction->operand_count();
}
}
}
HloInstructionSequence new_sequence;
auto schedule_worklist = [&]() {
while (!worklist.empty()) {
HloInstruction* instruction = worklist.front();
worklist.pop();
new_sequence.push_back(instruction);
std::vector<HloInstruction*>* new_users =
tsl::gtl::FindOrNull(new_instruction_uses, instruction);
if (new_users != nullptr) {
for (HloInstruction* new_user : *new_users) {
unscheduled_operand_count.at(new_user)--;
CHECK_GE(unscheduled_operand_count.at(new_user), 0);
if (unscheduled_operand_count.at(new_user) == 0) {
worklist.push(new_user);
}
}
}
}
};
schedule_worklist();
for (int id : sequences_.at(computation->unique_id()).ids()) {
auto it = id_to_instruction.find(id);
if (it == id_to_instruction.end()) {
continue;
}
worklist.push(it->second);
schedule_worklist();
}
set_sequence(computation, std::move(new_sequence));
return absl::OkStatus();
}
absl::Status HloSchedule::Update(
const absl::flat_hash_set<absl::string_view>& execution_threads) {
std::vector<HloComputation*> nonfusion_computations =
module_->MakeNonfusionComputations(execution_threads);
for (const HloComputation* computation : nonfusion_computations) {
if (!is_computation_scheduled(computation)) {
GetOrCreateSequence(computation);
TF_RETURN_IF_ERROR(UpdateComputationSchedule(computation));
}
}
auto sum_of_sequences_for_threads = [&]() -> int64_t {
if (execution_threads.empty()) {
return sequences_.size();
}
int64_t sequences_num_for_threads = 0;
for (const auto& [thread_name, sequence_num] :
num_sequences_by_execution_thread()) {
sequences_num_for_threads +=
execution_threads.contains(thread_name) ? sequence_num : 0;
}
return sequences_num_for_threads;
};
int64_t sequence_sum = sum_of_sequences_for_threads();
if (sequence_sum > nonfusion_computations.size()) {
absl::flat_hash_set<int64_t> nonfusion_computations_ids;
for (const HloComputation* computation : nonfusion_computations) {
nonfusion_computations_ids.insert(computation->unique_id());
}
for (auto it = sequences_.begin(); it != sequences_.end();) {
std::string sequence_thread_name = tsl::gtl::FindWithDefault(
execution_threads_, it->first, HloInstruction::kMainExecutionThread);
bool is_thread_included =
execution_threads.empty() ||
execution_threads.contains(sequence_thread_name);
if (!nonfusion_computations_ids.contains(it->first) &&
is_thread_included) {
execution_threads_.erase(it->first);
sequences_.erase(it++);
} else {
++it;
}
}
}
sequence_sum = sum_of_sequences_for_threads();
CHECK_EQ(sequence_sum, nonfusion_computations.size());
for (const HloComputation* computation : nonfusion_computations) {
TF_RETURN_IF_ERROR(UpdateComputationSchedule(computation));
}
TF_RETURN_IF_ERROR(Verify());
return absl::OkStatus();
}
absl::flat_hash_map<std::string, int64_t>
HloSchedule::num_sequences_by_execution_thread() const {
absl::flat_hash_map<std::string, int64_t> sequence_num_by_execution_threads;
for (const auto& id_sequence_item : sequences_) {
++sequence_num_by_execution_threads[tsl::gtl::FindWithDefault(
execution_threads_, id_sequence_item.first,
HloInstruction::kMainExecutionThread)];
}
return sequence_num_by_execution_threads;
}
absl::Status HloSchedule::Verify() const {
VLOG(2) << "VerifySchedule()";
XLA_VLOG_LINES(2, ToString());
absl::flat_hash_map<std::string, int64_t> sequence_num_by_execution_threads =
num_sequences_by_execution_thread();
for (const auto& [thread_name, sequence_size] :
sequence_num_by_execution_threads) {
std::vector<HloComputation*> nonfusion_computations =
module_->MakeNonfusionComputations({thread_name});
TF_RET_CHECK(nonfusion_computations.size() == sequence_size)
<< "For thread " << thread_name << ", schedule has " << sequence_size
<< " sequences, but module has " << nonfusion_computations.size()
<< " non-fusion computations for thread " << thread_name;
for (const HloComputation* computation : nonfusion_computations) {
TF_RET_CHECK(sequences_.contains(computation->unique_id()))
<< "Computation " << computation->name()
<< " missing from HLO schedule.";
}
for (const HloComputation* computation : nonfusion_computations) {
absl::flat_hash_map<const HloInstruction*, int> instruction_position;
int pos = 0;
for (const HloInstruction* instruction :
sequence(computation).instructions()) {
TF_RET_CHECK(instruction_position.insert({instruction, pos}).second)
<< "Instruction " << instruction->name()
<< " appears more than once in the schedule";
pos++;
}
TF_RET_CHECK(instruction_position.size() ==
computation->instruction_count())
<< "Schedule for computation " << computation->name() << " has "
<< instruction_position.size() << " instructions, expected "
<< computation->instruction_count();
for (const HloInstruction* instruction : computation->instructions()) {
TF_RET_CHECK(instruction_position.contains(instruction))
<< "Instruction " << instruction->name() << " is not in schedule";
}
for (const HloInstruction* instruction : computation->instructions()) {
for (const HloInstruction* operand : instruction->operands()) {
TF_RET_CHECK(instruction_position.at(operand) <
instruction_position.at(instruction))
<< "Instruction " << instruction->name()
<< " is not scheduled after its operand " << operand->name();
}
for (const HloInstruction* pred : instruction->control_predecessors()) {
TF_RET_CHECK(instruction_position.at(pred) <
instruction_position.at(instruction))
<< "Instruction " << instruction->name()
<< " is not scheduled after its control predecessor "
<< pred->name();
}
}
}
}
return absl::OkStatus();
}
namespace {
const HloComputation* IdToComputation(const HloModule* module, int64_t id) {
for (const HloComputation* computation : module->computations()) {
if (computation->unique_id() == id) {
return computation;
}
}
return nullptr;
}
}
std::string HloSchedule::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("HloSchedule");
std::vector<int64_t> sorted_ids;
for (const auto& id_sequence : sequences_) {
sorted_ids.push_back(id_sequence.first);
}
absl::c_sort(sorted_ids);
for (const int64_t id : sorted_ids) {
const HloComputation* computation = IdToComputation(module_, id);
const HloInstructionSequence& sequence = sequences_.at(id);
if (computation == nullptr) {
pieces.push_back(absl::StrFormat(
"computation with id %d (no longer in HLO module):", id));
for (int id : sequence.ids()) {
pieces.push_back(absl::StrCat(" ", id));
}
} else {
pieces.push_back(absl::StrFormat("computation %s:", computation->name()));
for (const HloInstruction* instruction : sequence.instructions()) {
pieces.push_back(absl::StrCat(" ", instruction->name()));
}
}
}
return absl::StrJoin(pieces, "\n");
}
std::ostream& operator<<(std::ostream& out, const HloSchedule& schedule) {
return out << schedule.ToString();
}
} | #include "xla/hlo/ir/hlo_schedule.h"
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "absl/log/log.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HloScheduleTest : public HloTestBase {};
TEST_F(HloScheduleTest, UpdateScheduleUnchangedModule) {
const std::string module_str = R"(
HloModule UpdateScheduleUnchanged
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
const auto& entry_schedule =
schedule.sequence(module->entry_computation()).instructions();
EXPECT_EQ(entry_schedule.size(), 6);
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(entry_schedule,
schedule.sequence(module->entry_computation()).instructions());
}
TEST_F(HloScheduleTest, UpdateScheduleWithNewInstructions) {
const std::string module_str = R"(
HloModule UpdateScheduleWithNewInstructions
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
const Shape shape = entry->root_instruction()->shape();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
HloInstruction* sub = entry->AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kSubtract, constant, entry->root_instruction()));
entry->set_root_instruction(sub);
auto in_schedule = [&](const HloInstruction* hlo) {
return absl::c_linear_search(schedule.sequence(entry).instructions(), hlo);
};
EXPECT_EQ(schedule.sequence(entry).size(), 6);
EXPECT_FALSE(in_schedule(constant));
EXPECT_FALSE(in_schedule(sub));
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 8);
EXPECT_TRUE(in_schedule(constant));
EXPECT_TRUE(in_schedule(sub));
}
TEST_F(HloScheduleTest, UpdateScheduleWithAddedAndDeletedInstruction) {
const std::string module_str = R"(
HloModule UpdateScheduleWithAddedAndDeletedInstruction
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
c = f32[] constant(42.0)
sum = f32[] add(a, b)
neg = f32[] negate(c)
ROOT root = f32[] multiply(sum, neg)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
HloInstruction* new_root = entry->AddInstruction(
HloInstruction::CreateBinary(constant->shape(), HloOpcode::kSubtract,
constant, entry->parameter_instruction(0)));
entry->set_root_instruction(new_root);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(entry).size(), 6);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 4);
}
TEST_F(HloScheduleTest, UpdateScheduleWithCompletelyReplacedModule) {
const std::string module_str = R"(
HloModule UpdateScheduleWithCompletelyReplacedModule
ENTRY main {
a = f32[] constant(42.0)
b = f32[] constant(123.0)
ROOT sum = f32[] add(a, b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
}));
HloComputation* entry = module->entry_computation();
HloInstruction* constant = entry->AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
HloInstruction* new_root = entry->AddInstruction(HloInstruction::CreateUnary(
constant->shape(), HloOpcode::kNegate, constant));
entry->set_root_instruction(new_root);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(entry).size(), 3);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(entry).size(), 2);
}
TEST_F(HloScheduleTest, UpdateScheduleWithMultipleComputations) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %WhileLoop () -> s32[] {
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(),
sizeof(void*));
}));
const HloInstruction* xla_while =
module->entry_computation()->root_instruction()->operand(0);
HloComputation* body = xla_while->while_body();
HloComputation* cond = xla_while->while_condition();
cond->set_root_instruction(cond->AddInstruction(
HloInstruction::CreateUnary(ShapeUtil::MakeShape(PRED, {}),
HloOpcode::kNot, cond->root_instruction())));
body->set_root_instruction(body->parameter_instruction(0));
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
EXPECT_EQ(schedule.sequence(body).size(), 7);
EXPECT_EQ(schedule.sequence(cond).size(), 4);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
EXPECT_EQ(schedule.sequence(body).size(), 1);
EXPECT_EQ(schedule.sequence(cond).size(), 5);
}
TEST_F(HloScheduleTest, UpdateScheduleComputationRemoved) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
ENTRY %WhileLoop () -> s32[] {
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
ROOT %root = s32[] get-tuple-element((s32[], token[]) %while), index=0
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(), [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape(),
sizeof(void*));
}));
HloInstruction* xla_while =
module->entry_computation()->root_instruction()->mutable_operand(0);
HloInstruction* init = xla_while->mutable_operand(0);
TF_ASSERT_OK(xla_while->ReplaceAllUsesWith(init));
HloDCE dce;
ASSERT_EQ(module->computation_count(), 3);
TF_ASSERT_OK(dce.Run(module.get()).status());
ASSERT_EQ(module->computation_count(), 1);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update());
TF_ASSERT_OK(schedule.Verify());
}
TEST_F(HloScheduleTest, UpdateScheduleComputationRemovedWithMultiThreads) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %WhileLoop () -> (s32[], f32[10]) {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
%async-done = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
%main_res = s32[] get-tuple-element((s32[], token[]) %while), index=0
ROOT %res = tuple(%main_res, %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(
buffer.shape(),
sizeof(void*));
},
{}, {HloInstruction::kMainExecutionThread}));
HloInstruction* xla_while = module->entry_computation()
->root_instruction()
->mutable_operand(0)
->mutable_operand(0);
HloInstruction* init = xla_while->mutable_operand(0);
TF_ASSERT_OK(xla_while->ReplaceAllUsesWith(init));
HloDCE dce;
ASSERT_EQ(module->computation_count(), 4);
TF_ASSERT_OK(dce.Run(module.get()).status());
ASSERT_EQ(module->computation_count(), 2);
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update({HloInstruction::kMainExecutionThread}));
TF_ASSERT_OK(schedule.Verify());
ASSERT_EQ(module->MakeNonfusionComputations({"parallel_thread"}).size(), 1);
ASSERT_FALSE(schedule.is_computation_scheduled(
module->MakeNonfusionComputations({"parallel_thread"}).front()));
}
TEST_F(HloScheduleTest, UpdateScheduleAddComputation) {
const std::string module_str = R"(
HloModule UpdateScheduleWithMultipleComputations
%Body (param.1: (s32[], token[])) -> (s32[], token[]) {
%param.1 = (s32[], token[]) parameter(0)
%get-tuple-element.1 = s32[] get-tuple-element((s32[], token[]) %param.1), index=0
%constant.1 = s32[] constant(1)
%add = s32[] add(s32[] %get-tuple-element.1, s32[] %constant.1)
%get-tuple-element.2 = token[] get-tuple-element((s32[], token[]) %param.1), index=1
%after-all = token[] after-all(token[] %get-tuple-element.2)
ROOT %tuple = (s32[], token[]) tuple(s32[] %add, token[] %after-all)
}
%Cond (param: (s32[], token[])) -> pred[] {
%param = (s32[], token[]) parameter(0)
%get-tuple-element = s32[] get-tuple-element((s32[], token[]) %param), index=0
%constant = s32[] constant(42)
ROOT %less-than = pred[] compare(s32[] %get-tuple-element, s32[] %constant), direction=LT
}
%async_builder {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
ROOT %foo = add(%p0, %p1)
}, execution_thread="parallel_thread"
ENTRY %WhileLoop () -> (s32[], f32[10]) {
%p0 = f32[10] parameter(0)
%p1 = f32[10] parameter(1)
%zero = s32[] constant(0)
%init_token = token[] after-all()
%init_tuple = (s32[], token[]) tuple(s32[] %zero, token[] %init_token)
%while = (s32[], token[]) while((s32[], token[]) %init_tuple), condition=%Cond, body=%Body
%async-start = ((f32[10], f32[10]), f32[10], s32[]) async-start(f32[10] %p0, f32[10] %p1), async_execution_thread="parallel_thread",calls=%async_builder
%async-done = f32[10]{0} async-done(((f32[10], f32[10]), f32[10], s32[]) %async-start), async_execution_thread="parallel_thread", calls=%async_builder
%main_res = s32[] get-tuple-element((s32[], token[]) %while), index=0
ROOT %res = tuple(%main_res, %async-done)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
TF_ASSERT_OK_AND_ASSIGN(
HloSchedule schedule,
ScheduleModule(module.get(),
[](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(
buffer.shape(),
sizeof(void*));
},
{}, {HloInstruction::kMainExecutionThread}));
HloComputation* entry_computation = module->entry_computation();
HloComputation::Builder comp_builder("fusion_computation");
HloInstruction* entry_comp_parameter_0 =
entry_computation->parameter_instruction(0);
HloInstruction* entry_comp_parameter_1 =
entry_computation->parameter_instruction(1);
std::vector<HloInstruction*> instructions_in_new_computation;
HloInstruction* added_instruction =
entry_computation->AddInstruction(HloInstruction::CreateBinary(
entry_comp_parameter_0->shape(), HloOpcode::kMultiply,
entry_comp_parameter_0, entry_comp_parameter_1));
instructions_in_new_computation.push_back(added_instruction);
HloInstruction* call =
entry_computation->CreateCallInstruction(instructions_in_new_computation);
Shape completion_sflag_shape = ShapeUtil::MakeScalarShape(U32);
TF_ASSERT_OK_AND_ASSIGN(
HloInstruction * async_done,
entry_computation->CreateAsyncInstructions(
call, {completion_sflag_shape}, entry_computation->execution_thread(),
true, true));
HloInstruction* result_2 =
entry_computation->root_instruction()->mutable_operand(1);
HloInstruction* modified_result_2 =
entry_computation->AddInstruction(HloInstruction::CreateBinary(
result_2->shape(), HloOpcode::kAdd, async_done, result_2));
TF_ASSERT_OK(result_2->ReplaceAllUsesWith(modified_result_2));
auto added_computation_name =
async_done->operand(0)->called_computations()[0]->name();
ASSERT_FALSE(schedule.is_computation_scheduled(
module->GetComputationWithName(added_computation_name)));
ASSERT_IS_NOT_OK(schedule.Verify());
TF_ASSERT_OK(schedule.Update({HloInstruction::kMainExecutionThread}));
TF_ASSERT_OK(schedule.Verify());
ASSERT_TRUE(schedule.is_computation_scheduled(
module->GetComputationWithName(added_computation_name)));
}
}
} | 2,171 |
#ifndef XLA_HLO_IR_TILE_ASSIGNMENT_H_
#define XLA_HLO_IR_TILE_ASSIGNMENT_H_
#include <functional>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/algorithm/container.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/printer.h"
namespace xla {
class TileAssignment;
class IotaTileAssignment {
public:
static IotaTileAssignment Create(absl::Span<const int64_t> dims);
static IotaTileAssignment Create(absl::Span<const int64_t> dims,
absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm);
~IotaTileAssignment() = default;
IotaTileAssignment(const IotaTileAssignment& other);
IotaTileAssignment(IotaTileAssignment&& other) = default;
IotaTileAssignment& operator=(const IotaTileAssignment& other);
IotaTileAssignment& operator=(IotaTileAssignment&& other) = default;
bool operator==(const IotaTileAssignment& other) const {
return dims() == other.dims() && reshape_dims() == other.reshape_dims() &&
transpose_perm() == other.transpose_perm();
}
int64_t value_at(absl::Span<const int64_t> index) const;
int64_t ndims() const { return ndims_; }
absl::Span<const int64_t> dims() const {
return absl::MakeSpan(dims_ptr(), ndims_);
}
int64_t dim(int n) const { return dims_ptr()[n]; }
absl::Span<const int64_t> reshape_dims() const {
return absl::MakeSpan(reshape_dims_ptr(), reshape_ndims_);
}
absl::Span<const int> transpose_perm() const {
return absl::MakeSpan(transpose_perm_ptr(), reshape_ndims_);
}
int64_t num_elements() const {
return absl::c_accumulate(dims(), 1LL, std::multiplies<int64_t>());
}
std::optional<IotaTileAssignment> Transpose(absl::Span<const int> perm) const;
void Print(Printer* printer) const;
std::string ToString() const;
Array<int64_t> ToArray() const;
private:
friend class TileAssignment;
static constexpr int kPerDimBytes = sizeof(int64_t);
static constexpr int kPerReshapeDimBytes = sizeof(int64_t) + sizeof(int);
explicit IotaTileAssignment(absl::Span<const int64_t> dims,
absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm);
explicit IotaTileAssignment(int ndims, int reshape_ndims);
int64_t* dims_ptr() { return reinterpret_cast<int64_t*>(storage_.get()); }
const int64_t* dims_ptr() const {
return reinterpret_cast<const int64_t*>(storage_.get());
}
const int64_t* reshape_dims_ptr() const { return dims_ptr() + ndims_; }
int64_t* reshape_dims_ptr() {
return const_cast<int64_t*>(
const_cast<const IotaTileAssignment*>(this)->reshape_dims_ptr());
}
const int* transpose_perm_ptr() const {
return reinterpret_cast<const int*>(reshape_dims_ptr() + reshape_ndims_);
}
int* transpose_perm_ptr() {
return const_cast<int*>(
const_cast<const IotaTileAssignment*>(this)->transpose_perm_ptr());
}
int size_bytes() const {
return ndims_ * kPerDimBytes + reshape_ndims_ * kPerReshapeDimBytes;
}
bool next_index(absl::Span<int64_t> index) const {
DCHECK_EQ(index.size(), ndims_);
for (int64_t i = ndims_ - 1; i >= 0; --i) {
index[i]++;
if (index[i] < dims_ptr()[i]) {
return true;
}
index[i] = 0;
}
return false;
}
int32_t ndims_;
int32_t reshape_ndims_;
std::unique_ptr<char[]> storage_;
};
class TileAssignment {
public:
TileAssignment() : array_(ReplicatedArray()) {}
explicit TileAssignment(std::shared_ptr<const Array<int64_t>> array)
: shared_array_(std::move(array)), array_(shared_array_.get()) {}
explicit TileAssignment(int64_t device_id)
: TileAssignment(std::make_shared<const Array<int64_t>>(
std::initializer_list<int64_t>{1}, device_id)) {}
explicit TileAssignment(IotaTileAssignment iota) : iota_(std::move(iota)) {}
explicit TileAssignment(absl::Span<const int64_t> dims)
: iota_(IotaTileAssignment::Create(dims)) {}
explicit TileAssignment(absl::Span<const int64_t> dims,
absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm)
: iota_(IotaTileAssignment::Create(dims, reshape_dims, transpose_perm)) {}
bool operator==(const TileAssignment& other) const;
bool operator!=(const TileAssignment& other) const {
return !operator==(other);
}
template <typename... Dims>
typename std::enable_if_t<array_impl::pack_is_integral<Dims...>::value,
int64_t>
operator()(Dims... dims) const {
DCHECK_EQ(sizeof...(dims), num_dimensions());
std::array<int64_t, sizeof...(dims)> indexes{
{static_cast<int64_t>(dims)...}};
return operator()(indexes);
}
int64_t operator()(absl::Span<const int64_t> indexes) const;
absl::Span<const int64_t> dimensions() const;
int64_t num_dimensions() const;
int64_t dim(int64_t n) const;
int64_t num_elements() const;
int64_t first() const;
void Each(
absl::FunctionRef<void(absl::Span<const int64_t>, int64_t)> f) const;
absl::Status EachStatus(
absl::FunctionRef<absl::Status(absl::Span<const int64_t>, int64_t)> f)
const;
[[nodiscard]] TileAssignment Reshape(
absl::Span<const int64_t> new_dimensions) const;
[[nodiscard]] TileAssignment Transpose(absl::Span<const int> perm) const;
void Print(Printer* printer) const;
std::string ToString() const;
bool UsesDevice(int64_t device) const;
const std::optional<IotaTileAssignment>& iota() const { return iota_; }
const Array<int64_t>& array() const;
const std::shared_ptr<const Array<int64_t>>& shared_array() const;
std::shared_ptr<Array<int64_t>> shared_array_clone() const;
template <typename H>
friend H AbslHashValue(H h, const TileAssignment& tile) {
return H::combine(std::move(h), tile.array());
}
private:
friend class HloSharding;
explicit TileAssignment(IotaTileAssignment iota,
std::shared_ptr<const Array<int64_t>> shared_array)
: iota_(std::move(iota)),
shared_array_(std::move(shared_array)),
array_(shared_array_.get()) {}
void MaybeMaterializeFullArray() const;
static const Array<int64_t>* ReplicatedArray() {
static auto* array = new Array<int64_t>({0});
return array;
}
std::optional<IotaTileAssignment> iota_;
mutable std::shared_ptr<const Array<int64_t>> shared_array_;
mutable const Array<int64_t>* array_ = nullptr;
};
}
#endif
#include "xla/hlo/ir/tile_assignment.h"
#include <cstdint>
#include <cstring>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/util.h"
namespace xla {
namespace {
void CanonicalizeIotaDims(absl::Span<int64_t>& dims, absl::Span<int>& perm) {
DCHECK_EQ(dims.size(), perm.size());
if (dims.size() <= 1) {
return;
}
absl::InlinedVector<int, 6> old_to_new_dims(dims.size());
while (true) {
bool changed = false;
int new_ndims = 0;
for (int i = 0; i < dims.size(); ++i) {
if (dims[i] == 1) {
old_to_new_dims[i] = -1;
} else {
old_to_new_dims[i] = new_ndims;
++new_ndims;
}
}
if (new_ndims != dims.size()) {
for (int i = 0, new_idx = 0; i < dims.size(); ++i) {
int new_dim = old_to_new_dims[i];
if (new_dim >= 0) {
dims[new_dim] = dims[i];
}
int new_perm_dim = old_to_new_dims[perm[i]];
if (new_perm_dim >= 0) {
perm[new_idx] = new_perm_dim;
++new_idx;
DCHECK_LE(new_idx, new_ndims);
}
}
perm = perm.subspan(0, new_ndims);
dims = dims.subspan(0, new_ndims);
}
for (int i = 1, base = 0, n = dims.size(); i < n; ++i) {
const int base_dim = perm[base];
const int dim = perm[i];
if (base_dim + (i - base) == dim) {
dims[base_dim] *= dims[dim];
dims[dim] = 1;
changed = true;
} else {
base = i;
}
}
if (!changed) {
break;
}
}
}
enum class TransposeKind {
kNoop,
kReshape,
kTranspose,
};
TransposeKind GetTransposeKind(absl::Span<const int64_t> dims,
absl::Span<const int> perm) {
TransposeKind kind = TransposeKind::kNoop;
int prev_non_one_dim = -1;
for (int i = 0; i < perm.size(); ++i) {
const auto& d = perm[i];
if (dims[d] == 1) {
if (d != i && dims[i] != 1) kind = TransposeKind::kReshape;
continue;
}
if (d <= prev_non_one_dim) return TransposeKind::kTranspose;
prev_non_one_dim = d;
}
return kind;
}
std::pair<absl::InlinedVector<int64_t, 6>, absl::InlinedVector<int, 6>>
FullyDecanonicalize(absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm) {
absl::InlinedVector<int64_t, 6> new_reshape_dims;
absl::InlinedVector<int, 6> old_to_new_dims(reshape_dims.size() + 1);
for (int i = 0, n = reshape_dims.size(); i < n; ++i) {
int64_t dim_size = reshape_dims[i];
while (dim_size % 2 == 0) {
new_reshape_dims.push_back(2);
dim_size /= 2;
}
for (int i = 3; i * i <= dim_size; i += 2) {
while (dim_size % i == 0) {
new_reshape_dims.push_back(i);
dim_size /= i;
}
}
if (dim_size > 1) {
CHECK_GT(dim_size, 2);
new_reshape_dims.push_back(dim_size);
}
old_to_new_dims[i + 1] = new_reshape_dims.size();
}
absl::InlinedVector<int, 6> new_transpose_perm;
new_transpose_perm.reserve(new_reshape_dims.size());
for (int i = 0; i < transpose_perm.size(); ++i) {
const int old_dim = transpose_perm[i];
for (int j = old_to_new_dims[old_dim], n = old_to_new_dims[old_dim + 1];
j < n; ++j) {
new_transpose_perm.push_back(j);
}
}
return std::make_pair(std::move(new_reshape_dims),
std::move(new_transpose_perm));
}
}
IotaTileAssignment IotaTileAssignment::Create(
absl::Span<const int64_t> dims) {
return IotaTileAssignment(dims, {Product(dims)}, {0});
}
IotaTileAssignment IotaTileAssignment::Create(
absl::Span<const int64_t> dims, absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm) {
absl::InlinedVector<int64_t, 6> canonicalized_dims(reshape_dims.begin(),
reshape_dims.end());
absl::InlinedVector<int, 6> canonicalized_perm(transpose_perm.begin(),
transpose_perm.end());
auto dims_span = absl::MakeSpan(canonicalized_dims);
auto perm_span = absl::MakeSpan(canonicalized_perm);
CanonicalizeIotaDims(dims_span, perm_span);
if (dims_span.empty()) {
canonicalized_dims[0] = 1;
dims_span = absl::MakeSpan(canonicalized_dims.data(), 1);
canonicalized_perm[0] = 0;
perm_span = absl::MakeSpan(canonicalized_perm.data(), 1);
}
return IotaTileAssignment(dims, dims_span, perm_span);
}
Array<int64_t> IotaTileAssignment::ToArray() const {
Array<int64_t> array(reshape_dims());
array.FillIota(0);
array.TransposeDimensions(transpose_perm());
array.Reshape(dims());
return array;
}
IotaTileAssignment::IotaTileAssignment(const IotaTileAssignment& other)
: IotaTileAssignment(other.ndims_, other.reshape_ndims_) {
std::memcpy(storage_.get(), other.storage_.get(), size_bytes());
}
IotaTileAssignment& IotaTileAssignment::operator=(
const IotaTileAssignment& other) {
const int new_size = other.size_bytes();
if (size_bytes() != new_size) {
storage_.reset(new char[new_size]);
}
ndims_ = other.ndims_;
reshape_ndims_ = other.reshape_ndims_;
std::memcpy(storage_.get(), other.storage_.get(), new_size);
return *this;
}
IotaTileAssignment::IotaTileAssignment(absl::Span<const int64_t> dims,
absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm)
: IotaTileAssignment(dims.size(), reshape_dims.size()) {
DCHECK_EQ(reshape_dims.size(), transpose_perm.size());
std::memcpy(dims_ptr(), dims.data(), ndims_ * sizeof(int64_t));
DCHECK_EQ(num_elements(), Product(reshape_dims));
std::memcpy(reshape_dims_ptr(), reshape_dims.data(),
reshape_ndims_ * sizeof(int64_t));
std::memcpy(transpose_perm_ptr(), transpose_perm.data(),
reshape_ndims_ * sizeof(int));
}
IotaTileAssignment::IotaTileAssignment(int ndims, int reshape_ndims)
: ndims_(ndims),
reshape_ndims_(reshape_ndims),
storage_(new char[size_bytes()]) {}
std::optional<IotaTileAssignment> IotaTileAssignment::Transpose(
absl::Span<const int> perm) const {
DCHECK_EQ(ndims_, perm.size());
auto dims = this->dims();
const TransposeKind kind = GetTransposeKind(dims, perm);
if (kind == TransposeKind::kNoop) return *this;
absl::InlinedVector<int64_t, 6> new_dims(ndims_);
for (int64_t i = 0; i < ndims_; ++i) {
new_dims[i] = dims[perm[i]];
}
if (kind == TransposeKind::kReshape) {
return IotaTileAssignment::Create(new_dims, reshape_dims(),
transpose_perm());
}
if (reshape_ndims_ == 1) {
return IotaTileAssignment::Create(new_dims, dims, perm);
}
bool is_pure_transpose = true;
absl::InlinedVector<int64_t, 6> non_one_dims;
absl::InlinedVector<int, 6> one_to_non_one(ndims_);
non_one_dims.reserve(ndims_);
auto reshape_dims = this->reshape_dims();
auto transpose_perm = this->transpose_perm();
for (int i = 0; i < ndims_; ++i) {
const int64_t dim = dims[i];
if (dim == 1) {
one_to_non_one[i] = -1;
continue;
}
if (non_one_dims.size() >= reshape_ndims_ ||
reshape_dims[transpose_perm[non_one_dims.size()]] != dim) {
is_pure_transpose = false;
}
one_to_non_one[i] = non_one_dims.size();
non_one_dims.push_back(dims[i]);
}
if (is_pure_transpose) {
CHECK_EQ(reshape_ndims_, non_one_dims.size());
absl::InlinedVector<int, 6> new_perm;
new_perm.reserve(non_one_dims.size());
for (int i = 0; i < ndims_; ++i) {
if (dims[perm[i]] == 1) continue;
new_perm.push_back(transpose_perm[one_to_non_one[perm[i]]]);
}
CHECK_EQ(reshape_ndims_, new_perm.size());
return IotaTileAssignment::Create(new_dims, reshape_dims, new_perm);
}
auto [decanonicalized_reshape_dims, decanonicalized_transpose_perm] =
FullyDecanonicalize(reshape_dims, transpose_perm);
CHECK_LE(non_one_dims.size(), decanonicalized_reshape_dims.size());
absl::InlinedVector<absl::InlinedVector<int, 2>, 6> grouped_reshape_dims(
non_one_dims.size());
int transpose_perm_idx = 0;
for (int i = 0, n = non_one_dims.size(),
dn = decanonicalized_reshape_dims.size();
i < n && transpose_perm_idx < dn; ++i) {
int reshape_dim_idx = decanonicalized_transpose_perm[transpose_perm_idx];
int64_t cand = decanonicalized_reshape_dims[reshape_dim_idx];
int64_t target = non_one_dims[i];
while (target % cand == 0) {
target /= cand;
grouped_reshape_dims[i].push_back(reshape_dim_idx);
if (++transpose_perm_idx >= dn) {
break;
}
reshape_dim_idx = decanonicalized_transpose_perm[transpose_perm_idx];
cand = decanonicalized_reshape_dims[reshape_dim_idx];
}
if (target != 1) {
return std::nullopt;
}
}
absl::InlinedVector<int, 6> flattened_transpose_perm;
flattened_transpose_perm.reserve(reshape_ndims_);
for (int i = 0; i < perm.size(); ++i) {
const int dim = perm[i];
if (one_to_non_one[dim] < 0) {
continue;
}
auto& group = grouped_reshape_dims[one_to_non_one[dim]];
flattened_transpose_perm.insert(flattened_transpose_perm.end(),
group.begin(), group.end());
}
CHECK_EQ(flattened_transpose_perm.size(),
decanonicalized_transpose_perm.size());
return IotaTileAssignment::Create(new_dims, decanonicalized_reshape_dims,
flattened_transpose_perm);
}
void IotaTileAssignment::Print(Printer* printer) const {
printer->Append("devices=[");
AppendJoin(printer, dims(), ",");
printer->Append("]<=[");
AppendJoin(printer, reshape_dims(), ",");
printer->Append("]");
if (reshape_ndims_ > 1) {
printer->Append("T(");
AppendJoin(printer, transpose_perm(), ",");
printer->Append(")");
}
}
std::string IotaTileAssignment::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
int64_t IotaTileAssignment::value_at(absl::Span<const int64_t> index) const {
DCHECK_EQ(index.size(), ndims_);
int64_t linear_index = index[0];
auto dims = this->dims();
for (int64_t i = 1; i < ndims_; ++i) {
linear_index *= dims[i];
linear_index += index[i];
}
auto reshape_dims = this->reshape_dims();
auto transpose_perm = this->transpose_perm();
absl::InlinedVector<int64_t, 6> reshape_index(reshape_ndims_);
for (int64_t i = reshape_ndims_ - 1; i >= 0; --i) {
int dim = transpose_perm[i];
int dim_size = reshape_dims[dim];
reshape_index[dim] = linear_index % dim_size;
linear_index /= dim_size;
}
int64_t value = reshape_index[0];
for (int64_t i = 1; i < reshape_ndims_; ++i) {
value *= reshape_dims[i];
value += reshape_index[i];
}
return value;
}
bool TileAssignment::operator==(const TileAssignment& other) const {
if (iota_ && other.iota_) {
return *iota_ == *other.iota_;
}
return array() == other.array();
}
int64_t TileAssignment::operator()(absl::Span<const int64_t> indexes) const {
return array_ ? (*array_)(indexes) : iota_->value_at(indexes);
}
absl::Span<const int64_t> TileAssignment::dimensions() const {
return array_ ? array_->dimensions() : iota_->dims();
}
int64_t TileAssignment::num_dimensions() const {
return array_ ? array_->num_dimensions() : iota_->ndims();
}
int64_t TileAssignment::dim(int64_t n) const {
return array_ ? array_->dim(n) : iota_->dim(n);
}
int64_t TileAssignment::num_elements() const {
return array_ ? array_->num_elements() : iota_->num_elements();
}
int64_t TileAssignment::first() const { return array_ ? *array_->begin() : 0; }
void TileAssignment::Each(
absl::FunctionRef<void(absl::Span<const int64_t>, int64_t)> f) const {
MaybeMaterializeFullArray();
array_->Each(f);
}
absl::Status TileAssignment::EachStatus(
absl::FunctionRef<absl::Status(absl::Span<const int64_t>, int64_t)> f)
const {
MaybeMaterializeFullArray();
return array_->EachStatus(f);
}
[[nodiscard]] TileAssignment TileAssignment::Reshape(
absl::Span<const int64_t> new_dimensions) const {
if (iota_) {
CHECK_EQ(Product(new_dimensions), iota_->num_elements());
return TileAssignment(
IotaTileAssignment(new_dimensions, iota_->reshape_dims(),
iota_->transpose_perm()),
nullptr);
}
auto reshaped = std::make_shared<Array<int64_t>>(*array_);
reshaped->Reshape(new_dimensions);
return TileAssignment(std::move(reshaped));
}
[[nodiscard]] TileAssignment TileAssignment::Transpose(
absl::Span<const int> perm) const {
const TransposeKind kind = GetTransposeKind(dimensions(), perm);
if (kind == TransposeKind::kNoop) {
return *this;
}
if (iota_) {
auto transposed = iota_->Transpose(perm);
if (transposed) {
return TileAssignment(std::move(*transposed));
}
}
auto cloned_array = shared_array_clone();
cloned_array->TransposeDimensions(perm);
return TileAssignment(std::move(cloned_array));
}
void TileAssignment::Print(Printer* printer) const {
if (iota_) {
iota_->Print(printer);
} else {
printer->Append("devices=[");
AppendJoin(printer, array().dimensions(), ",");
printer->Append("]");
AppendJoin(printer, array(), ",");
}
}
std::string TileAssignment::ToString() const {
StringPrinter printer;
Print(&printer);
return std::move(printer).ToString();
}
bool TileAssignment::UsesDevice(int64_t device) const {
return iota_ ? device < iota_->num_elements()
: absl::c_linear_search(array(), device);
}
const Array<int64_t>& TileAssignment::array() const {
MaybeMaterializeFullArray();
return *array_;
}
const std::shared_ptr<const Array<int64_t>>& TileAssignment::shared_array()
const {
MaybeMaterializeFullArray();
return shared_array_;
}
std::shared_ptr<Array<int64_t>> TileAssignment::shared_array_clone() const {
MaybeMaterializeFullArray();
return std::make_shared<Array<int64_t>>(*array_);
}
void TileAssignment::MaybeMaterializeFullArray() const {
if (array_ == nullptr) {
DCHECK(shared_array_ == nullptr);
DCHECK(iota_.has_value());
auto full = std::make_shared<Array<int64_t>>(iota_->ToArray());
shared_array_ = std::move(full);
array_ = shared_array_.get();
}
}
} | #include "xla/hlo/ir/tile_assignment.h"
#include <memory>
#include <vector>
#include "absl/hash/hash.h"
#include "xla/array3d.h"
#include "xla/test.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
std::vector<int64_t> ToVectorUsingEach(const TileAssignment& tile) {
std::vector<int64_t> result;
result.reserve(tile.num_elements());
tile.Each([&](absl::Span<const int64_t> index, int64_t device) {
result.push_back(device);
});
return result;
}
TEST(TileAssignmentTest, Replicated) {
TileAssignment tile;
EXPECT_EQ(tile.num_dimensions(), 1);
EXPECT_EQ(tile.dim(0), 0);
}
TEST(TileAssignmentTest, Maximal) {
TileAssignment tile(5);
EXPECT_EQ(tile.num_dimensions(), 1);
EXPECT_EQ(tile.dim(0), 1);
EXPECT_EQ(tile(0), 5);
EXPECT_EQ(tile({0}), 5);
EXPECT_FALSE(tile.iota());
EXPECT_TRUE(tile.UsesDevice(5));
EXPECT_EQ(tile.first(), 5);
EXPECT_FALSE(tile.UsesDevice(0));
EXPECT_THAT(ToVectorUsingEach(tile), ElementsAre(5));
}
TEST(TileAssignmentTest, V1V2Equivalence) {
Array3D<int64_t> array(
{{{0, 8, 4, 12}, {1, 9, 5, 13}}, {{2, 10, 6, 14}, {3, 11, 7, 15}}});
TileAssignment v1(std::make_shared<const Array<int64_t>>(array));
TileAssignment v2({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
EXPECT_EQ(v1, v2);
EXPECT_EQ(v2, v1);
EXPECT_EQ(v1.first(), 0);
EXPECT_EQ(v2.first(), 0);
EXPECT_NE(v1.iota().has_value(), v2.iota().has_value());
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
TEST(TileAssignmentTest, CopyConstruction) {
TileAssignment tile({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
TileAssignment copied(tile);
EXPECT_EQ(tile, copied);
EXPECT_EQ(tile.iota().has_value(), copied.iota().has_value());
EXPECT_EQ(absl::HashOf(tile), absl::HashOf(copied));
}
TEST(TileAssignmentTest, CopyAssignment) {
TileAssignment tile({2, 2, 4}, {2, 2, 4}, {2, 1, 0});
TileAssignment copied = tile;
EXPECT_EQ(tile, copied);
EXPECT_EQ(tile.iota().has_value(), copied.iota().has_value());
EXPECT_EQ(absl::HashOf(tile), absl::HashOf(copied));
}
class FormattedTileAssignmentTest : public ::testing::TestWithParam<bool> {
protected:
bool ShouldConvertToV1() { return GetParam(); }
};
TEST_P(FormattedTileAssignmentTest, TrivialIotaTile) {
TileAssignment tile({4, 4, 2});
EXPECT_EQ(tile.ToString(), "devices=[4,4,2]<=[32]");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 4, 2}));
EXPECT_EQ(tile.num_dimensions(), 3);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 4);
EXPECT_EQ(tile.dim(2), 2);
EXPECT_EQ(tile(0, 0, 0), 0);
EXPECT_EQ(tile({3, 2, 1}), 29);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
EXPECT_EQ(tile.ToString(), "devices=[4,4,2]<=[2,4,4]T(2,1,0)");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 4, 2}, {2, 4, 4}, {2, 1, 0}));
EXPECT_EQ(tile.num_dimensions(), 3);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 4);
EXPECT_EQ(tile.dim(2), 2);
EXPECT_EQ(tile(0, 0, 0), 0);
EXPECT_EQ(tile({3, 2, 1}), 27);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 16, 4, 20, 8, 24, 12, 28, 1, 17, 5, 21, 9, 25, 13, 29, 2,
18, 6, 22, 10, 26, 14, 30, 3, 19, 7, 23, 11, 27, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, NonCanonicalTransposedIotaTile) {
TileAssignment tile({4, 8}, {2, 4, 4}, {1, 2, 0});
EXPECT_EQ(tile.ToString(), "devices=[4,8]<=[2,16]T(1,0)");
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
EXPECT_EQ(tile, TileAssignment({4, 8}, {2, 16}, {1, 0}));
EXPECT_EQ(tile.num_dimensions(), 2);
EXPECT_EQ(tile.dim(0), 4);
EXPECT_EQ(tile.dim(1), 8);
EXPECT_EQ(tile(0, 0), 0);
EXPECT_EQ(tile({3, 2}), 13);
EXPECT_EQ(tile.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(tile.UsesDevice(0));
EXPECT_TRUE(tile.UsesDevice(31));
EXPECT_FALSE(tile.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(tile),
ElementsAre(0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24,
9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, ReshapeTrivalIotaTile) {
TileAssignment tile({4, 4, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment reshaped = tile.Reshape({2, 8, 2});
EXPECT_NE(reshaped, tile);
EXPECT_EQ(reshaped, TileAssignment({2, 8, 2}));
EXPECT_EQ(reshaped.num_dimensions(), 3);
EXPECT_EQ(reshaped.dim(0), 2);
EXPECT_EQ(reshaped.dim(1), 8);
EXPECT_EQ(reshaped.dim(2), 2);
EXPECT_EQ(reshaped(0, 0, 0), 0);
EXPECT_EQ(reshaped({1, 3, 1}), 23);
EXPECT_EQ(reshaped.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(reshaped.UsesDevice(0));
EXPECT_TRUE(reshaped.UsesDevice(31));
EXPECT_FALSE(reshaped.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(reshaped),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31));
}
TEST_P(FormattedTileAssignmentTest, ReshapeTransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment reshaped = tile.Reshape({2, 2, 4, 2});
EXPECT_NE(reshaped, tile);
EXPECT_EQ(reshaped, TileAssignment({2, 2, 4, 2}, {2, 4, 4}, {2, 1, 0}));
EXPECT_EQ(reshaped.num_dimensions(), 4);
EXPECT_EQ(reshaped.dim(0), 2);
EXPECT_EQ(reshaped.dim(1), 2);
EXPECT_EQ(reshaped.dim(2), 4);
EXPECT_EQ(reshaped.dim(3), 2);
EXPECT_EQ(reshaped(0, 0, 0, 0), 0);
EXPECT_EQ(reshaped({1, 1, 2, 1}), 27);
EXPECT_EQ(reshaped.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(reshaped.UsesDevice(0));
EXPECT_TRUE(reshaped.UsesDevice(31));
EXPECT_FALSE(reshaped.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(reshaped),
ElementsAre(0, 16, 4, 20, 8, 24, 12, 28, 1, 17, 5, 21, 9, 25, 13, 29, 2,
18, 6, 22, 10, 26, 14, 30, 3, 19, 7, 23, 11, 27, 15, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeTrivalIotaTile) {
TileAssignment tile({4, 4, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({2, 0, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({2, 4, 4}, {16, 2}, {1, 0}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 2);
EXPECT_EQ(xposed.dim(1), 4);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({1, 3, 1}), 27);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(31));
EXPECT_FALSE(xposed.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 1,
3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeTransposedIotaTile) {
TileAssignment tile({4, 4, 2}, {2, 4, 4}, {2, 1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 2, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({4, 2, 4}, {8, 4}, {1, 0}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 2);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({3, 0, 3}), 15);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(31));
EXPECT_FALSE(xposed.UsesDevice(32));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 16, 20, 24, 28, 1, 5, 9, 13, 17, 21, 25, 29, 2,
6, 10, 14, 18, 22, 26, 30, 3, 7, 11, 15, 19, 23, 27, 31));
}
TEST_P(FormattedTileAssignmentTest, TransposeIotaTileWithDegernateDims) {
TileAssignment tile({4, 4, 1}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 2, 0});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({4, 1, 4}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 0, 3}), 11);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileSplittingCanonicalizedReshapeDims) {
TileAssignment tile({8, 2, 16}, {16, 16}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 2, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({8, 16, 2}, {16, 8, 2}, {1, 0, 2}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 8);
EXPECT_EQ(xposed.dim(1), 16);
EXPECT_EQ(xposed.dim(2), 2);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 7, 1}), 117);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(255));
EXPECT_FALSE(xposed.UsesDevice(256));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(
0, 1, 16, 17, 32, 33, 48, 49, 64, 65, 80, 81, 96, 97, 112, 113, 128,
129, 144, 145, 160, 161, 176, 177, 192, 193, 208, 209, 224, 225, 240,
241, 2, 3, 18, 19, 34, 35, 50, 51, 66, 67, 82, 83, 98, 99, 114, 115,
130, 131, 146, 147, 162, 163, 178, 179, 194, 195, 210, 211, 226, 227,
242, 243, 4, 5, 20, 21, 36, 37, 52, 53, 68, 69, 84, 85, 100, 101, 116,
117, 132, 133, 148, 149, 164, 165, 180, 181, 196, 197, 212, 213, 228,
229, 244, 245, 6, 7, 22, 23, 38, 39, 54, 55, 70, 71, 86, 87, 102, 103,
118, 119, 134, 135, 150, 151, 166, 167, 182, 183, 198, 199, 214, 215,
230, 231, 246, 247, 8, 9, 24, 25, 40, 41, 56, 57, 72, 73, 88, 89, 104,
105, 120, 121, 136, 137, 152, 153, 168, 169, 184, 185, 200, 201, 216,
217, 232, 233, 248, 249, 10, 11, 26, 27, 42, 43, 58, 59, 74, 75, 90,
91, 106, 107, 122, 123, 138, 139, 154, 155, 170, 171, 186, 187, 202,
203, 218, 219, 234, 235, 250, 251, 12, 13, 28, 29, 44, 45, 60, 61, 76,
77, 92, 93, 108, 109, 124, 125, 140, 141, 156, 157, 172, 173, 188,
189, 204, 205, 220, 221, 236, 237, 252, 253, 14, 15, 30, 31, 46, 47,
62, 63, 78, 79, 94, 95, 110, 111, 126, 127, 142, 143, 158, 159, 174,
175, 190, 191, 206, 207, 222, 223, 238, 239, 254, 255));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileSplittingBothCanonicalizedReshapeDimsAndTileDims) {
TileAssignment tile({14, 3, 5}, {6, 5, 7}, {2, 0, 1});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 0, 2});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({3, 14, 5}, {2, 3, 5, 7}, {1, 3, 0, 2}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 3);
EXPECT_EQ(xposed.dim(1), 14);
EXPECT_EQ(xposed.dim(2), 5);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({2, 11, 3}), 201);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(209));
EXPECT_FALSE(xposed.UsesDevice(210));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(
0, 7, 14, 21, 28, 105, 112, 119, 126, 133, 1, 8, 15, 22, 29, 106, 113,
120, 127, 134, 2, 9, 16, 23, 30, 107, 114, 121, 128, 135, 3, 10, 17,
24, 31, 108, 115, 122, 129, 136, 4, 11, 18, 25, 32, 109, 116, 123,
130, 137, 5, 12, 19, 26, 33, 110, 117, 124, 131, 138, 6, 13, 20, 27,
34, 111, 118, 125, 132, 139, 35, 42, 49, 56, 63, 140, 147, 154, 161,
168, 36, 43, 50, 57, 64, 141, 148, 155, 162, 169, 37, 44, 51, 58, 65,
142, 149, 156, 163, 170, 38, 45, 52, 59, 66, 143, 150, 157, 164, 171,
39, 46, 53, 60, 67, 144, 151, 158, 165, 172, 40, 47, 54, 61, 68, 145,
152, 159, 166, 173, 41, 48, 55, 62, 69, 146, 153, 160, 167, 174, 70,
77, 84, 91, 98, 175, 182, 189, 196, 203, 71, 78, 85, 92, 99, 176, 183,
190, 197, 204, 72, 79, 86, 93, 100, 177, 184, 191, 198, 205, 73, 80,
87, 94, 101, 178, 185, 192, 199, 206, 74, 81, 88, 95, 102, 179, 186,
193, 200, 207, 75, 82, 89, 96, 103, 180, 187, 194, 201, 208, 76, 83,
90, 97, 104, 181, 188, 195, 202, 209));
}
TEST_P(FormattedTileAssignmentTest,
TransposeIotaTileGroupingCanonicalizedReshapeDims) {
TileAssignment tile({1, 4, 16}, {4, 4, 4}, {1, 0, 2});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({2, 0, 1});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed, TileAssignment({16, 1, 4}, {4, 4, 4}, {0, 2, 1}));
EXPECT_EQ(xposed.num_dimensions(), 3);
EXPECT_EQ(xposed.dim(0), 16);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 4);
EXPECT_EQ(xposed(0, 0, 0), 0);
EXPECT_EQ(xposed({7, 0, 3}), 31);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(63));
EXPECT_FALSE(xposed.UsesDevice(64));
EXPECT_THAT(ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15,
16, 20, 24, 28, 17, 21, 25, 29, 18, 22, 26, 30, 19,
23, 27, 31, 32, 36, 40, 44, 33, 37, 41, 45, 34, 38,
42, 46, 35, 39, 43, 47, 48, 52, 56, 60, 49, 53, 57,
61, 50, 54, 58, 62, 51, 55, 59, 63));
}
TEST_P(FormattedTileAssignmentTest, TransposeNoopIotaTile) {
TileAssignment tile({4, 4}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({0, 1});
EXPECT_EQ(xposed, tile);
EXPECT_EQ(xposed.num_dimensions(), 2);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 4);
EXPECT_EQ(xposed(0, 0), 0);
EXPECT_EQ(xposed({2, 3}), 14);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15));
}
TEST_P(FormattedTileAssignmentTest, TransposeNoopIotaTileWithDegernateDims) {
TileAssignment tile({1, 4, 1, 1, 4, 1}, {4, 4}, {1, 0});
if (ShouldConvertToV1()) {
tile = TileAssignment(tile.shared_array());
}
TileAssignment xposed = tile.Transpose({1, 5, 0, 4, 3, 2});
EXPECT_NE(xposed, tile);
EXPECT_EQ(xposed.num_dimensions(), 6);
EXPECT_EQ(xposed.dim(0), 4);
EXPECT_EQ(xposed.dim(1), 1);
EXPECT_EQ(xposed.dim(2), 1);
EXPECT_EQ(xposed.dim(3), 4);
EXPECT_EQ(xposed.dim(4), 1);
EXPECT_EQ(xposed.dim(5), 1);
EXPECT_EQ(xposed(0, 0, 0, 0, 0, 0), 0);
EXPECT_EQ(xposed({2, 0, 0, 3, 0, 0}), 14);
EXPECT_EQ(xposed.iota().has_value(), !ShouldConvertToV1());
EXPECT_TRUE(xposed.UsesDevice(0));
EXPECT_TRUE(xposed.UsesDevice(15));
EXPECT_FALSE(xposed.UsesDevice(16));
EXPECT_THAT(
ToVectorUsingEach(xposed),
ElementsAre(0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15));
}
INSTANTIATE_TEST_SUITE_P(All, FormattedTileAssignmentTest, ::testing::Bool());
}
} | 2,172 |
#ifndef XLA_HLO_IR_HLO_REACHABILITY_H_
#define XLA_HLO_IR_HLO_REACHABILITY_H_
#include <memory>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_map.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/types.h"
namespace xla {
class HloReachabilityMap {
public:
using Index = size_t;
explicit HloReachabilityMap(
absl::Span<const HloInstruction* const> instructions);
static std::unique_ptr<HloReachabilityMap> Build(
const HloComputation* computation);
static std::unique_ptr<HloReachabilityMap> BuildWithRestrictions(
const HloComputation* computation,
absl::FunctionRef<void(const HloInstruction*,
std::vector<HloInstruction*>*)>
add_dependencies);
bool SetReachabilityToUnion(absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction);
void FastSetReachabilityToUnion(
absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction);
void FastSetReachabilityToUnion(absl::Span<const Index> input_indices,
Index index);
Index GetIndex(const HloInstruction* instruction) const {
return indices_.at(GetKey(instruction));
}
void SetReachable(const HloInstruction* a, const HloInstruction* b) {
SetReachable(GetIndex(a), GetIndex(b));
}
void SetReachable(Index a, Index b) { bit_sets_[b].Set(a); }
void UpdateReachabilityThroughInstruction(const HloInstruction* instruction);
bool IsReachable(const HloInstruction* a, const HloInstruction* b) const {
return IsReachable(GetIndex(a), GetIndex(b));
}
bool IsReachable(Index a, Index b) const { return bit_sets_[b].Get(a); }
bool IsConnected(const HloInstruction* a, const HloInstruction* b) const {
return IsConnected(GetIndex(a), GetIndex(b));
}
bool IsConnected(Index a, Index b) const {
return IsReachable(a, b) || IsReachable(b, a);
}
bool IsPresent(const HloInstruction* instruction) const {
return indices_.contains(GetKey(instruction));
}
void Replace(const HloInstruction* original,
const HloInstruction* replacement);
private:
class BitSet {
public:
BitSet() = default;
explicit BitSet(size_t size)
: size_(size), vector_((size + kBits - 1) / kBits, 0) {}
bool Get(Index index) const {
DCHECK(index >= 0 && index < size_);
return vector_[index / kBits] & (1ull << (index % kBits));
}
void Set(Index index) {
DCHECK(index >= 0 && index < size_);
vector_[index / kBits] |= 1ull << (index % kBits);
}
void operator|=(const BitSet& other) {
if (this == &other) return;
DCHECK(size_ == other.size_);
const Word* a = vector_.data();
const Word* b = other.vector_.data();
Word* __restrict out = vector_.data();
size_t num_words = vector_.size();
for (size_t i = 0; i < num_words; ++i) {
out[i] = a[i] | b[i];
}
}
void SetToZero() { absl::c_fill(vector_, 0); }
bool operator==(const BitSet& other) const {
return vector_ == other.vector_;
}
bool operator!=(const BitSet& other) const { return !(*this == other); }
private:
using Word = uint64_t;
static constexpr size_t kBits = 64;
size_t size_;
std::vector<Word> vector_;
};
friend class HloReachabilityMapBitSetBenchmark;
using Key = std::pair<int, int>;
static Key GetKey(const HloInstruction* instruction) {
return {instruction->GetModule()->unique_id(), instruction->unique_id()};
}
void SetReachabilityToUnionHelper(
absl::Span<const HloInstruction* const> inputs, Index index);
void SetReachabilityToUnionHelper(absl::Span<const Index> input_indices,
Index index);
absl::flat_hash_map<Key, Index> indices_;
std::vector<BitSet> bit_sets_;
BitSet tmp_bit_set_;
};
}
#endif
#include "xla/hlo/ir/hlo_reachability.h"
#include <memory>
#include <queue>
#include <vector>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
HloReachabilityMap::HloReachabilityMap(
absl::Span<const HloInstruction* const> instructions)
: bit_sets_(instructions.size(), BitSet(instructions.size())) {
indices_.reserve(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
bit_sets_[i].Set(i);
indices_[GetKey(instructions[i])] = i;
}
}
bool HloReachabilityMap::SetReachabilityToUnion(
absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction) {
Index index = GetIndex(instruction);
BitSet& bit_set = bit_sets_[index];
tmp_bit_set_ = bit_set;
SetReachabilityToUnionHelper(inputs, index);
return bit_set != tmp_bit_set_;
}
void HloReachabilityMap::FastSetReachabilityToUnion(
absl::Span<const HloInstruction* const> inputs,
const HloInstruction* instruction) {
SetReachabilityToUnionHelper(inputs, GetIndex(instruction));
}
void HloReachabilityMap::FastSetReachabilityToUnion(
absl::Span<const Index> input_indices, Index index) {
SetReachabilityToUnionHelper(input_indices, index);
}
void HloReachabilityMap::SetReachabilityToUnionHelper(
absl::Span<const HloInstruction* const> inputs, Index index) {
absl::InlinedVector<Index, 16> input_indices;
input_indices.reserve(inputs.size());
for (const HloInstruction* input : inputs) {
input_indices.push_back(GetIndex(input));
}
SetReachabilityToUnionHelper(input_indices, index);
}
void HloReachabilityMap::SetReachabilityToUnionHelper(
absl::Span<const Index> input_indices, Index index) {
BitSet& bit_set = bit_sets_[index];
if (!absl::c_linear_search(input_indices, index)) {
bit_set.SetToZero();
}
bit_set.Set(index);
for (Index input_index : input_indices) {
if (input_index != index) {
bit_set |= bit_sets_[input_index];
}
}
}
void HloReachabilityMap::Replace(const HloInstruction* original,
const HloInstruction* replacement) {
if (GetKey(original) != GetKey(replacement)) {
indices_[GetKey(replacement)] = GetIndex(original);
indices_.erase(GetKey(original));
}
}
std::unique_ptr<HloReachabilityMap> HloReachabilityMap::BuildWithRestrictions(
const HloComputation* computation,
absl::FunctionRef<void(const HloInstruction*,
std::vector<HloInstruction*>*)>
add_dependencies) {
const auto& all = computation->MakeInstructionPostOrder();
auto result = std::make_unique<HloReachabilityMap>(all);
std::vector<HloInstruction*> inputs;
for (const HloInstruction* hlo : all) {
inputs.clear();
add_dependencies(hlo, &inputs);
result->FastSetReachabilityToUnion(inputs, hlo);
}
return result;
}
std::unique_ptr<HloReachabilityMap> HloReachabilityMap::Build(
const HloComputation* computation) {
HloComputation::ChannelDependencies channel_dependencies =
computation->ComputeChannelDependencies();
std::vector<HloInstruction*> instructions =
computation->MakeInstructionPostOrder(channel_dependencies);
auto result = std::make_unique<HloReachabilityMap>(instructions);
auto get_bit_set = [&](const HloInstruction* instruction) -> BitSet& {
return result->bit_sets_[result->GetIndex(instruction)];
};
for (const HloInstruction* instruction : instructions) {
BitSet& bit_set = get_bit_set(instruction);
auto add_dependencies = [&](const HloInstruction* instruction) {
for (const HloInstruction* operand : instruction->operands()) {
bit_set |= get_bit_set(operand);
}
for (const HloInstruction* predecessor :
instruction->control_predecessors()) {
bit_set |= get_bit_set(predecessor);
}
};
add_dependencies(instruction);
auto it = channel_dependencies.find(instruction);
if (it != channel_dependencies.end()) {
absl::c_for_each(it->second, add_dependencies);
}
}
return result;
}
void HloReachabilityMap::UpdateReachabilityThroughInstruction(
const HloInstruction* instruction) {
std::queue<const HloInstruction*> worklist;
worklist.push(instruction);
std::vector<HloInstruction*> inputs;
while (!worklist.empty()) {
const HloInstruction* item = worklist.front();
worklist.pop();
inputs.assign(item->operands().begin(), item->operands().end());
inputs.insert(inputs.end(), item->control_predecessors().begin(),
item->control_predecessors().end());
if (SetReachabilityToUnion(inputs, item)) {
for (const HloInstruction* user : item->users()) {
worklist.push(user);
}
for (const HloInstruction* succ : item->control_successors()) {
worklist.push(succ);
}
}
}
}
} | #include "xla/hlo/ir/hlo_reachability.h"
#include <memory>
#include <set>
#include <string_view>
#include "absl/random/random.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/service/computation_placer.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class HloReachabilityTest : public HloTestBase {};
TEST_F(HloReachabilityTest, Reachability) {
auto builder = HloComputation::Builder(TestName());
auto a = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto b = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto c = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto d = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto e = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
HloReachabilityMap reachability({a, b, c, d, e});
reachability.SetReachable(a, a);
EXPECT_TRUE(reachability.SetReachabilityToUnion({a}, b));
EXPECT_TRUE(reachability.SetReachabilityToUnion({a}, c));
EXPECT_TRUE(reachability.SetReachabilityToUnion({b, c}, d));
EXPECT_TRUE(reachability.SetReachabilityToUnion({c}, e));
EXPECT_TRUE(reachability.IsReachable(a, a));
EXPECT_TRUE(reachability.IsReachable(a, b));
EXPECT_TRUE(reachability.IsReachable(a, c));
EXPECT_TRUE(reachability.IsReachable(a, d));
EXPECT_TRUE(reachability.IsReachable(a, e));
EXPECT_FALSE(reachability.IsReachable(b, a));
EXPECT_TRUE(reachability.IsReachable(b, b));
EXPECT_FALSE(reachability.IsReachable(b, c));
EXPECT_TRUE(reachability.IsReachable(b, d));
EXPECT_FALSE(reachability.IsReachable(b, e));
EXPECT_FALSE(reachability.IsReachable(e, a));
EXPECT_FALSE(reachability.IsReachable(e, b));
EXPECT_FALSE(reachability.IsReachable(e, c));
EXPECT_FALSE(reachability.IsReachable(e, d));
EXPECT_TRUE(reachability.IsReachable(e, e));
EXPECT_FALSE(reachability.SetReachabilityToUnion({a}, b));
EXPECT_FALSE(reachability.SetReachabilityToUnion({b, c}, d));
}
TEST_F(HloReachabilityTest, NonTrivialReachability) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32, HloOpcode::kAdd, constant1, constant2));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kNegate, constant2));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kMultiply, add, exp));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kCopy, exp));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(mul));
TF_CHECK_OK(add->AddControlDependencyTo(exp));
auto reachability = HloReachabilityMap::Build(computation);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_TRUE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_TRUE(reachability->IsReachable(constant1, copy));
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_TRUE(reachability->IsReachable(constant2, negate));
EXPECT_TRUE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_TRUE(reachability->IsReachable(constant2, copy));
EXPECT_FALSE(reachability->IsReachable(exp, constant1));
EXPECT_FALSE(reachability->IsReachable(exp, constant2));
EXPECT_FALSE(reachability->IsReachable(exp, add));
EXPECT_FALSE(reachability->IsReachable(exp, negate));
EXPECT_TRUE(reachability->IsReachable(exp, exp));
EXPECT_TRUE(reachability->IsReachable(exp, mul));
EXPECT_TRUE(reachability->IsReachable(exp, copy));
EXPECT_FALSE(reachability->IsReachable(mul, constant1));
EXPECT_FALSE(reachability->IsReachable(mul, constant2));
EXPECT_FALSE(reachability->IsReachable(mul, add));
EXPECT_FALSE(reachability->IsReachable(mul, negate));
EXPECT_FALSE(reachability->IsReachable(mul, exp));
EXPECT_TRUE(reachability->IsReachable(mul, mul));
EXPECT_FALSE(reachability->IsReachable(mul, copy));
EXPECT_TRUE(reachability->IsConnected(constant1, copy));
EXPECT_TRUE(reachability->IsConnected(copy, constant1));
EXPECT_FALSE(reachability->IsConnected(negate, add));
EXPECT_FALSE(reachability->IsConnected(add, negate));
ASSERT_IS_OK(add->RemoveControlDependencyTo(exp));
reachability->UpdateReachabilityThroughInstruction(exp);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_FALSE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_FALSE(reachability->IsReachable(constant1, copy));
ASSERT_IS_OK(constant2->ReplaceUseWith(negate, constant1));
reachability->UpdateReachabilityThroughInstruction(negate);
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_FALSE(reachability->IsReachable(constant2, negate));
EXPECT_FALSE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_FALSE(reachability->IsReachable(constant2, copy));
}
TEST_F(HloReachabilityTest, ChannelReachability) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
HloComputation::Builder builder("ChannelReachability");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token0 = builder.AddInstruction(HloInstruction::CreateToken());
auto send =
builder.AddInstruction(HloInstruction::CreateSend(param, token0, 1));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
auto token1 = builder.AddInstruction(HloInstruction::CreateToken());
auto recv =
builder.AddInstruction(HloInstruction::CreateRecv(shape, token1, 1));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
auto module = CreateNewVerifiedModule();
module->mutable_config().set_use_spmd_partitioning(false);
module->mutable_config().set_static_device_assignment(DeviceAssignment(1, 2));
auto computation = module->AddEntryComputation(builder.Build(recv_done));
auto reachability = HloReachabilityMap::Build(computation);
EXPECT_FALSE(reachability->IsReachable(param, recv_done));
EXPECT_FALSE(reachability->IsReachable(send, recv));
EXPECT_FALSE(reachability->IsReachable(send_done, recv));
}
TEST_F(HloReachabilityTest, ReplaceInstructions) {
auto module = ParseAndReturnVerifiedModule(R"(
HloModule test
ENTRY entry {
p0 = f32[28,28]{1,0} parameter(0)
ROOT add = f32[28,28]{1,0} add(p0, p0)
})")
.value();
auto computation = module->entry_computation();
auto reachability = HloReachabilityMap::Build(computation);
auto* add = module->entry_computation()->root_instruction();
auto* p0 = add->operand(0);
EXPECT_TRUE(reachability->IsReachable(p0, add));
reachability->Replace(add, add);
EXPECT_TRUE(reachability->IsReachable(p0, add));
auto* fusion = computation->AddInstruction(HloInstruction::CreateFusion(
add->shape(), HloInstruction::FusionKind::kLoop, add));
EXPECT_FALSE(reachability->IsPresent(fusion));
EXPECT_TRUE(reachability->IsReachable(p0, add));
reachability->Replace(add, fusion);
EXPECT_FALSE(reachability->IsPresent(add));
EXPECT_TRUE(reachability->IsReachable(p0, fusion));
}
}
class HloReachabilityMapBitSetBenchmark {
public:
explicit HloReachabilityMapBitSetBenchmark(int size) : a_(size), b_(size) {
absl::BitGen gen;
for (int i = 0; i < size; ++i) {
if (absl::Bernoulli(gen, 0.5)) a_.Set(i);
if (absl::Bernoulli(gen, 0.5)) b_.Set(i);
}
}
void Union() { a_ |= b_; }
private:
HloReachabilityMap::BitSet a_;
HloReachabilityMap::BitSet b_;
};
namespace {
void BM_HloReachabilityBitSetUnion(benchmark::State& state) {
HloReachabilityMapBitSetBenchmark bm(state.range(0));
for (auto s : state) {
bm.Union();
}
}
#define BM_ARGS Arg(1)->Arg(64)->Arg(128)->Arg(256)->Range(512, 256 * 1024)
BENCHMARK(BM_HloReachabilityBitSetUnion)->BM_ARGS;
class HloReachabilityBenchmark {
public:
HloReachabilityBenchmark(int size, std::string_view name) : name_(name) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(name);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
HloInstruction* prev = constant;
for (int i = 1; i < size; ++i) {
prev = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, prev));
}
HloModuleConfig hlo_config;
module_ = std::make_unique<HloModule>(name_, hlo_config);
computation_ =
module_->AddEntryComputation(builder.Build(prev));
}
std::unique_ptr<HloReachabilityMap> Build() {
return HloReachabilityMap::Build(computation_);
}
private:
std::unique_ptr<HloModule> module_;
HloComputation* computation_;
const std::string name_;
};
void BM_HloReachabilityBuild(benchmark::State& state) {
HloReachabilityBenchmark bm(state.range(0), state.name());
for (auto s : state) {
benchmark::DoNotOptimize(bm.Build());
}
}
BENCHMARK(BM_HloReachabilityBuild)->BM_ARGS;
}
} | 2,173 |
#ifndef XLA_HLO_IR_HLO_COMPUTATION_H_
#define XLA_HLO_IR_HLO_COMPUTATION_H_
#include <cstdint>
#include <list>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/ptrvec.h"
#include "xla/iterator_util.h"
#include "xla/printer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape_tree.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
class HloModule;
class HloComputation {
public:
using InstructionList = std::vector<HloInstructionInfo>;
class Builder {
public:
explicit Builder(absl::string_view name) : name_(name) {}
Builder(Builder&& b) = default;
virtual ~Builder() = default;
std::unique_ptr<HloComputation> Build(
HloInstruction* root_instruction = nullptr);
virtual HloInstruction* AddInstruction(
std::unique_ptr<HloInstruction> instruction) {
auto* added_instruction = instruction.get();
instructions_.push_back(std::move(instruction));
return added_instruction;
}
HloInstruction* AddInstruction(std::unique_ptr<HloInstruction> instruction,
std::optional<absl::string_view> new_name) {
instruction->SetAndSanitizeName(new_name.value());
return AddInstruction(std::move(instruction));
}
absl::StatusOr<HloInstruction*> AddParameter(
std::unique_ptr<HloInstruction> parameter) {
if (!parameter_numbers_.insert(parameter->parameter_number()).second) {
return Internal("Duplicate parameter number %d",
parameter->parameter_number());
}
return AddInstruction(std::move(parameter));
}
absl::Status ForEachInstruction(
absl::FunctionRef<absl::Status(const HloInstruction*)> func) const {
for (const auto& instruction : instructions_) {
TF_RETURN_IF_ERROR(func(instruction.get()));
}
return absl::OkStatus();
}
HloInstruction* last_added_instruction() const {
return instructions_.empty() ? nullptr : instructions_.back().get();
}
private:
const std::string name_;
std::vector<std::unique_ptr<HloInstruction>> instructions_;
absl::flat_hash_set<int> parameter_numbers_;
Builder(const Builder&) = delete;
Builder& operator=(const Builder&) = delete;
};
class MetadataBuilder {
public:
MetadataBuilder(HloComputation* computation, const OpMetadata& metadata)
: computation_(computation), metadata_(metadata) {}
HloInstruction* AddInstruction(
std::unique_ptr<HloInstruction> instruction) {
instruction->set_metadata(metadata_);
return computation_->AddInstruction(std::move(instruction));
}
private:
HloComputation* computation_;
OpMetadata metadata_;
};
class CachingPostOrder {
public:
explicit CachingPostOrder(const HloComputation* computation)
: computation_(computation), recompute_(true) {}
const std::vector<HloInstruction*>& PostOrder() {
if (recompute_) {
cached_post_order_ = computation_->MakeInstructionPostOrder();
recompute_ = false;
}
return cached_post_order_;
}
void RecordChange(bool changed) { recompute_ |= changed; }
private:
const HloComputation* computation_;
bool recompute_;
std::vector<HloInstruction*> cached_post_order_;
};
~HloComputation();
HloInstruction* AddInstruction(std::unique_ptr<HloInstruction> instruction,
absl::string_view new_name = "");
HloInstruction* AddInstruction(std::unique_ptr<HloInstruction> instruction,
const OpMetadata* metadata);
HloInstruction* AddInstruction(std::unique_ptr<HloInstruction> instruction,
const OpMetadata* metadata,
const FrontendAttributes* frontend_attributes);
HloInstruction* ReplaceParameter(int64_t param_no,
std::unique_ptr<HloInstruction> instruction);
absl::Status RemoveParameter(int64_t param_no);
absl::Status RemoveUnusedParametersFromFusedComputation();
absl::Status RemoveUnusedParametersFromAnyComputation();
HloInstruction* AddParameter(std::unique_ptr<HloInstruction> instruction);
HloInstruction* AddEntryComputationParameter(
std::unique_ptr<HloInstruction> instruction);
absl::Status ReplaceEntryComputationParameter(
int64_t param_no, HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> instruction);
absl::Status RemoveInstruction(HloInstruction* instruction);
absl::Status ForceRemoveInstruction(HloInstruction* instruction);
absl::Status RemoveInstructionAndUnusedOperands(
HloInstruction* instruction,
std::optional<absl::FunctionRef<void(HloInstruction*)>> cleanup =
std::nullopt,
bool ignore_control_dependencies = false);
void set_root_instruction(HloInstruction* new_root_instruction,
bool accept_different_shape = false);
HloInstruction* root_instruction() const { return root_instruction_; }
int64_t num_parameters() const { return param_instructions_.size(); }
HloInstruction* parameter_instruction(int64_t param_no) const {
CHECK_GE(param_no, 0);
CHECK_LT(param_no, static_cast<int64_t>(param_instructions_.size()))
<< "Computation " << name() << " has no parameter number " << param_no;
return param_instructions_[param_no];
}
const HloInstruction::InstructionVector& parameter_instructions() const {
return param_instructions_;
}
absl::string_view name() const { return name_; }
void SetAndSanitizeName(absl::string_view name) {
name_ = NameUniquer::GetSanitizedName(name);
}
void UniquifyName(NameUniquer* name_uniquer);
void Print(Printer* printer) const {
return Print(printer, HloPrintOptions::Default());
}
void Print(Printer* printer, const HloPrintOptions& options) const;
void Print(Printer* printer, const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const;
std::string ToString() const;
std::string ToString(const HloPrintOptions& options) const;
std::string ToString(
const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const;
absl::Cord ToCord() const { return ToCord(HloPrintOptions::Default()); }
absl::Cord ToCord(const HloPrintOptions& options) const;
absl::Cord ToCord(
const HloPrintOptions& options,
absl::Span<const HloInstruction* const> instruction_order) const;
HloComputationProto ToProto() const;
static absl::StatusOr<std::unique_ptr<HloComputation>> CreateFromProto(
const HloComputationProto& proto,
const absl::flat_hash_map<int64_t, HloComputation*>& computation_map,
bool prohibit_empty_literal = true);
template <typename H>
friend H AbslHashValue(H h, const HloComputation& computation) {
auto instructions = computation.MakeInstructionPostOrder();
for (auto* instruction : instructions) {
h = H::combine(std::move(h), *instruction);
}
return H::combine(std::move(h), instructions.size());
}
using InstructionSequence = tsl::gtl::iterator_range<
UnwrappingIterator<HloInstructionList::iterator>>;
using ConstInstructionSequence = tsl::gtl::iterator_range<
UnwrappingIterator<HloInstructionList::const_iterator>>;
tsl::gtl::iterator_range<xla::HloInstructionUnwrappingConstIterator>
instructions() const {
const int end = instructions_.size();
return {HloInstructionUnwrappingConstIterator(
HloInstructionConstIterator(&instructions_, 0, end)),
HloInstructionUnwrappingConstIterator(
HloInstructionConstIterator(&instructions_, end, end))};
}
tsl::gtl::iterator_range<xla::HloInstructionUnwrappingIterator>
instructions() {
const int end = instructions_.size();
return {HloInstructionUnwrappingIterator(
HloInstructionIterator(&instructions_, 0, end)),
HloInstructionUnwrappingIterator(
HloInstructionIterator(&instructions_, end, end))};
}
tsl::gtl::iterator_range<HloInstructionIterator> instructions_with_info() {
const int end = instructions_.size();
return {HloInstructionIterator(&instructions_, 0, end),
HloInstructionIterator(&instructions_, end, end)};
}
tsl::gtl::iterator_range<HloInstructionConstIterator> instructions_with_info()
const {
const int end = instructions_.size();
return {HloInstructionConstIterator(&instructions_, 0, end),
HloInstructionConstIterator(&instructions_, end, end)};
}
using ChannelDependencies =
absl::flat_hash_map<const HloInstruction*,
absl::InlinedVector<HloInstruction*, 1>>;
std::vector<HloInstruction*> MakeInstructionPostOrder() const;
std::vector<HloInstruction*> MakeInstructionPostOrderFrom(
HloInstruction&) const;
std::vector<HloInstruction*> MakeInstructionPostOrder(
const ChannelDependencies& channel_dependencies) const;
std::vector<HloInstruction*> MakeInstructionPostOrderWithReshapeFirst() const;
void ForEachInstructionPostOrder(
absl::FunctionRef<void(HloInstruction*)> func) const;
int64_t instruction_count() const { return instruction_count_; }
std::vector<HloComputation*> MakeEmbeddedComputationsList() const;
HloInstruction* CreateFusionInstruction(
absl::Span<HloInstruction* const> instructions_to_fuse,
HloInstruction::FusionKind fusion_kind);
HloInstruction* CreateCallInstruction(
absl::Span<HloInstruction* const> instructions_to_call);
absl::StatusOr<HloInstruction*> CreateAsyncInstructions(
HloInstruction* instruction, absl::Span<const Shape> context_shapes,
absl::string_view async_execution_thread =
HloInstruction::kMainExecutionThread,
bool replace = true, bool override_names = false);
absl::StatusOr<HloInstruction*> DeepCopyInstruction(
HloInstruction* instruction,
const ShapeTree<bool>* indices_to_copy = nullptr,
ShapeTree<HloInstruction*>* copies_added = nullptr);
absl::StatusOr<HloInstruction*> DeepCopyInstructionWithCustomCopier(
HloInstruction* instruction,
absl::FunctionRef<HloInstruction*(HloInstruction* leaf,
const ShapeIndex& leaf_index,
HloComputation* computation)>
copy_leaf);
ProgramShape ComputeProgramShape(bool include_ids = true) const;
bool Equal(
const HloComputation& other, bool is_layout_sensitive,
std::optional<
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>>
computations_comparator = std::nullopt) const {
return EqualInternal(other, is_layout_sensitive, computations_comparator,
false,
false);
}
bool EqualIgnoringChannelIdValues(
const HloComputation& other, bool is_layout_sensitive,
std::optional<
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>>
computations_comparator = std::nullopt) const {
return EqualInternal(other, is_layout_sensitive, computations_comparator,
true,
false);
}
bool EqualIgnoringExecutionThread(
const HloComputation& other, bool is_layout_sensitive,
bool ignore_channel_id_values,
std::optional<
absl::FunctionRef<bool(const HloComputation*, const HloComputation*)>>
computations_comparator = std::nullopt) const {
return EqualInternal(other, is_layout_sensitive, computations_comparator,
ignore_channel_id_values,
true);
}
bool operator==(const HloComputation& other) const {
return Equal(other, true);
}
bool operator!=(const HloComputation& other) const {
return !(*this == other);
}
absl::Status ReplaceWithNewInstruction(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction);
absl::Status ReplaceWithNewEntryComputationParameter(
HloInstruction* old_instruction,
std::unique_ptr<HloInstruction> new_instruction);
absl::StatusOr<bool> ReplaceInstruction(HloInstruction* old_instruction,
HloInstruction* new_instruction,
bool preserve_sharding,
bool relay_control_dependency = false,
bool remove_unused_operands = true);
absl::Status ReplaceInstruction(HloInstruction* old_instruction,
HloInstruction* new_instruction);
absl::StatusOr<bool> ReplaceInstructionWithDifferentShape(
HloInstruction* old_instruction, HloInstruction* new_instruction,
bool preserve_sharding, bool relay_control_dependency = false,
bool remove_unused_operands = true);
absl::Status ReplaceInstructionWithDifferentShape(
HloInstruction* old_instruction, HloInstruction* new_instruction);
void set_parent(HloModule* module) { parent_ = module; }
const HloModule* parent() const { return parent_; }
HloModule* parent() { return parent_; }
template <typename HloInstructionPtr>
absl::Status Accept(DfsHloVisitorBase<HloInstructionPtr>* visitor) const;
absl::Status AcceptWithOperandOrder(
DfsHloVisitor* visitor,
const HloInstruction::CompareFunction& operand_order) const;
template <typename HloInstructionPtr>
absl::Status AcceptOrdered(DfsHloVisitorBase<HloInstructionPtr>* visitor,
absl::Span<HloInstruction* const> order) const;
std::unique_ptr<HloComputation> Clone(const std::string& suffix = "clone",
HloCloneContext* context = nullptr);
std::unique_ptr<HloComputation> CloneWithReplacements(
const absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>* replacements,
absl::Span<const HloInstruction* const> extra_parameters = {},
HloCloneContext* context = nullptr, const std::string& suffix = "clone",
const HloInstruction* new_root = nullptr);
std::unique_ptr<HloComputation> CloneInContext(
HloCloneContext& context,
const absl::flat_hash_map<const HloInstruction*,
std::unique_ptr<HloInstruction>>* replacements =
nullptr,
absl::Span<const HloInstruction* const> extra_parameters = {},
const std::string& suffix = "clone",
const HloInstruction* new_root = nullptr) const;
std::unique_ptr<HloComputation> CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
HloCloneContext* context = nullptr, const std::string& suffix = "clone");
std::unique_ptr<HloComputation> CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r2,
HloCloneContext* context = nullptr, const std::string& suffix = "clone");
std::unique_ptr<HloComputation> CloneWithReplacementPairs(
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r1,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r2,
std::pair<const HloInstruction*, std::unique_ptr<HloInstruction>> r3,
HloCloneContext* context = nullptr, const std::string& suffix = "clone");
bool IsSafelyRemovable(const HloInstruction* instruction,
bool ignore_control_dependency = | #include "xla/hlo/ir/hlo_computation.h"
#include <memory>
#include <set>
#include <string>
#include <string_view>
#include <vector>
#include <gmock/gmock.h>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/hlo_parser.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
namespace m = match;
namespace op = xla::testing::opcode_matchers;
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
class HloComputationTest : public HloTestBase {
protected:
HloComputationTest() {}
std::unique_ptr<HloComputation> CreateNegateComputation() {
auto builder = HloComputation::Builder("Negate");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, param));
return builder.Build();
}
std::unique_ptr<HloComputation> CreateMapComputation(
HloComputation* map_computation) {
auto builder = HloComputation::Builder("Map");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map_computation));
return builder.Build();
}
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(HloComputationTest, GetEmbeddedComputationsEmpty) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEntryComputation(CreateNegateComputation());
EXPECT_TRUE(negate_computation->MakeEmbeddedComputationsList().empty());
}
TEST_F(HloComputationTest, GetEmbeddedComputationsOneComputation) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEmbeddedComputation(CreateNegateComputation());
auto map_computation =
module->AddEntryComputation(CreateMapComputation(negate_computation));
EXPECT_TRUE(negate_computation->MakeEmbeddedComputationsList().empty());
EXPECT_THAT(map_computation->MakeEmbeddedComputationsList(),
ElementsAre(negate_computation));
}
TEST_F(HloComputationTest, GetEmbeddedComputationsDiamond) {
auto module = CreateNewVerifiedModule();
auto negate_computation =
module->AddEmbeddedComputation(CreateNegateComputation());
auto map1_computation =
module->AddEmbeddedComputation(CreateMapComputation(negate_computation));
auto map2_computation =
module->AddEmbeddedComputation(CreateMapComputation(negate_computation));
auto builder = HloComputation::Builder(TestName());
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto map1 = builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map1_computation));
auto map2 = builder.AddInstruction(
HloInstruction::CreateMap(r0f32_, {param}, map2_computation));
builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, map1, map2));
auto computation = module->AddEntryComputation(builder.Build());
auto embedded_computations = computation->MakeEmbeddedComputationsList();
EXPECT_EQ(3, embedded_computations.size());
EXPECT_EQ(negate_computation, *embedded_computations.begin());
EXPECT_THAT(embedded_computations,
UnorderedElementsAre(negate_computation, map1_computation,
map2_computation));
}
TEST_F(HloComputationTest, PostOrderSingleton) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(), ElementsAre(constant));
}
TEST_F(HloComputationTest, PostOrderSimple) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto negate2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, negate1));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(),
ElementsAre(constant, negate1, negate2));
}
TEST_F(HloComputationTest, PostOrderDisconnectedInstructions) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant4 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_THAT(computation->MakeInstructionPostOrder(),
UnorderedElementsAre(constant1, constant2, constant3, constant4));
}
TEST_F(HloComputationTest, PostOrderWithReshapeFirst) {
const std::string& hlo_string = R"(
HloModule test
ENTRY %entry {
parameter.0 = f32[3] parameter(0)
broadcast.0 = f32[1, 3] broadcast(f32[3] parameter.0), dimensions={1}
reshape.0 = f32[3, 1] reshape(f32[3] parameter.0)
ROOT tuple.0 = (f32[1, 3], f32[3, 1]) tuple(f32[1, 3] broadcast.0, f32[3, 1] reshape.0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> hlo_module,
ParseAndReturnVerifiedModule(hlo_string));
HloComputation* entry_computation =
FindComputation(hlo_module.get(), "entry");
HloInstruction* parameter_0 =
FindInstruction(hlo_module.get(), "parameter.0");
HloInstruction* broadcast_0 =
FindInstruction(hlo_module.get(), "broadcast.0");
HloInstruction* reshape_0 = FindInstruction(hlo_module.get(), "reshape.0");
HloInstruction* tuple_0 = FindInstruction(hlo_module.get(), "tuple.0");
EXPECT_THAT(entry_computation->MakeInstructionPostOrder(),
ElementsAre(parameter_0, broadcast_0, reshape_0, tuple_0));
EXPECT_THAT(entry_computation->MakeInstructionPostOrderWithReshapeFirst(),
ElementsAre(parameter_0, reshape_0, broadcast_0, tuple_0));
}
TEST_F(HloComputationTest, PostOrderWithMultipleRoots) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto add1 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto add2 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant2, constant3));
auto add3 = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto post_order = computation->MakeInstructionPostOrder();
EXPECT_EQ(6, post_order.size());
EXPECT_THAT(post_order, UnorderedElementsAre(constant1, constant2, constant3,
add1, add2, add3));
}
TEST_F(HloComputationTest, VisitWithMultipleRoots) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto constant3 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant1, constant2));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant2, constant3));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
constant1, constant3));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
class TestVisitor : public DfsHloVisitorWithDefault {
public:
explicit TestVisitor(HloComputation* computation)
: computation_(computation) {}
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
EXPECT_FALSE(visited_set_.contains(hlo_instruction));
visited_set_.insert(hlo_instruction);
last_visited_ = hlo_instruction;
return absl::OkStatus();
}
absl::Status FinishVisit(HloInstruction* root) override {
EXPECT_EQ(computation_->root_instruction(), root);
++finish_visit_calls_;
return absl::OkStatus();
}
HloComputation* computation_;
absl::flat_hash_set<HloInstruction*> visited_set_;
int64_t finish_visit_calls_ = 0;
HloInstruction* last_visited_ = nullptr;
};
TestVisitor visitor(computation);
EXPECT_IS_OK(computation->Accept(&visitor));
EXPECT_EQ(6, visitor.visited_set_.size());
EXPECT_EQ(1, visitor.finish_visit_calls_);
EXPECT_EQ(computation->root_instruction(), visitor.last_visited_);
}
TEST_F(HloComputationTest, DeepCopyArray) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(constant).value();
EXPECT_THAT(copy, GmockMatch(m::Copy(m::Op().Is(constant))));
}
TEST_F(HloComputationTest, DeepCopyTuple) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto tuple_copy = computation->DeepCopyInstruction(tuple).value();
EXPECT_THAT(tuple_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple))),
m::Copy(m::GetTupleElement(m::Op().Is(tuple))))));
EXPECT_EQ(0, tuple_copy->operand(0)->operand(0)->tuple_index());
EXPECT_EQ(1, tuple_copy->operand(1)->operand(0)->tuple_index());
}
TEST_F(HloComputationTest, DeepCopyArrayAtIndices) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto computation = builder.Build();
{
ShapeTree<bool> indices_to_copy(constant->shape(), true);
EXPECT_THAT(
computation->DeepCopyInstruction(constant, &indices_to_copy).value(),
GmockMatch(m::Copy(m::Op().Is(constant))));
}
{
ShapeTree<bool> indices_to_copy(constant->shape(), false);
EXPECT_EQ(
computation->DeepCopyInstruction(constant, &indices_to_copy).value(),
constant);
}
}
TEST_F(HloComputationTest, DeepCopyTupleAtIndices) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<float>({1.0, 2.0, 3.0})));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple = builder.AddInstruction(
HloInstruction::CreateTuple({constant1, constant2}));
auto computation = builder.Build();
{
ShapeTree<bool> indices_to_copy(tuple->shape(), true);
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple)))
.Is(copies_added.element({0})),
m::Copy(m::GetTupleElement(m::Op().Is(tuple)))
.Is(copies_added.element({1})))));
}
{
ShapeTree<bool> indices_to_copy(tuple->shape(), false);
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy,
GmockMatch(m::Tuple(m::GetTupleElement(m::Op().Is(tuple)),
m::GetTupleElement(m::Op().Is(tuple)))));
EXPECT_TRUE(copies_added.element({}) == nullptr);
EXPECT_TRUE(copies_added.element({0}) == nullptr);
EXPECT_TRUE(copies_added.element({1}) == nullptr);
}
{
ShapeTree<bool> indices_to_copy(tuple->shape(), false);
*indices_to_copy.mutable_element({0}) = true;
ShapeTree<HloInstruction*> copies_added(tuple->shape(),
nullptr);
HloInstruction* deep_copy =
computation->DeepCopyInstruction(tuple, &indices_to_copy, &copies_added)
.value();
EXPECT_THAT(deep_copy, GmockMatch(m::Tuple(
m::Copy(m::GetTupleElement(m::Op().Is(tuple))),
m::GetTupleElement(m::Op().Is(tuple)))));
EXPECT_TRUE(copies_added.element({}) == nullptr);
EXPECT_TRUE(copies_added.element({0}) != nullptr);
EXPECT_TRUE(copies_added.element({1}) == nullptr);
}
}
TEST_F(HloComputationTest, DeepCopyToken) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(token).value();
EXPECT_THAT(copy, GmockMatch(m::AfterAll()));
}
TEST_F(HloComputationTest, DeepCopyTokenTuple) {
auto builder = HloComputation::Builder(TestName());
auto token = builder.AddInstruction(HloInstruction::CreateToken());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0)));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({token, constant}));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
auto copy = computation->DeepCopyInstruction(tuple).value();
EXPECT_THAT(copy, GmockMatch(m::Tuple(
m::GetTupleElement(m::Op().Is(tuple)),
m::Copy(m::GetTupleElement(m::Op().Is(tuple))))));
}
TEST_F(HloComputationTest, CycleDetection) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, negate, negate));
auto module = CreateNewUnverifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
ASSERT_IS_OK(add->AddControlDependencyTo(negate));
auto instructions = computation->MakeInstructionPostOrder();
EXPECT_EQ(3, instructions.size());
FunctionVisitor visitor(
[](HloInstruction* instruction) { return absl::OkStatus(); });
auto visit_status = computation->Accept(&visitor);
ASSERT_FALSE(visit_status.ok());
ASSERT_THAT(visit_status.message(),
::testing::ContainsRegex("cycle is detecte"));
}
TEST_F(HloComputationTest, RemoveInstructionWithDuplicateOperand) {
auto builder = HloComputation::Builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
auto dead_negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto dead_add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, dead_negate, dead_negate));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, constant));
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(builder.Build());
EXPECT_EQ(4, computation->instruction_count());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Negate(m::Op().Is(constant))));
EXPECT_EQ(negate, computation->root_instruction());
ASSERT_IS_OK(computation->RemoveInstructionAndUnusedOperands(dead_add));
EXPECT_EQ(2, computation->instruction_count());
EXPECT_THAT(computation->root_instruction(),
GmockMatch(m::Negate(m::Op().Is(constant))));
EXPECT_EQ(negate, computation->root_instruction());
}
TEST_F(HloComputationTest, RemoveSeveralUnusedFusionParameters) {
const char* const kHloModule = R"(
HloModule test
f {
p0 = f32[] parameter(0)
p1 = f32[] parameter(1)
p2 = f32[] parameter(2)
add = f32[] add(p0, p2)
ROOT neg = f32[] negate(p1)
}
ENTRY main {
param0 = f32[] parameter(0)
param1 = f32[] parameter(1)
param2 = f32[] parameter(2)
ROOT res = f32[] fusion(param0, param1, param2), kind=kLoop, calls=f
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloModule));
auto root = module->entry_computation()->root_instruction();
auto dead_add = FindInstruction(module.get(), "add");
ASSERT_IS_OK(root->fused_instructions_computation()
->RemoveInstructionAndUnusedOperands(dead_add));
root = module->entry_computation()->root_instruction();
EXPECT_THAT(root, GmockMatch(m::Fusion(m::Parameter(1))));
EXPECT_THAT(root->fused_expression_root(),
GmockMatch(m::Negate(m::Parameter(0))));
}
TEST_F(HloComputationTest, ReplaceParameter) {
const char* const kHloModule = R"(
HloModule ModuleWithWhile
body {
p_body = (f32[2], s32[]) parameter(0)
val = f32[2] get-tuple-element(p_body), index=0
const = s32[] constant(-1)
ROOT root = (f32[2], s32[]) tuple(val, const)
}
condition {
p_cond = (f32[2], s32[]) parameter(0)
gte = s32[] get-tuple-element(p_cond), index=1
const = s32[] constant(42)
ROOT result = pred[] compare(gte, const), direction=EQ
}
ENTRY entry {
param.1 = s32[] parameter(0)
const = f32[2] constant({0,1})
while_init = (f32[2], s32[]) tuple(const, param.1)
while = (f32[2], s32[]) while(while_init), condition=condition, body=body
ROOT out = s32[] get-tuple-element(while), index=1
})";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnUnverifiedModule(kHloModule));
HloComputation* body = module->GetComputationWithName("body");
Shape new_param_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(S32, {2}), ShapeUtil::MakeShape(S32, {})});
body->ReplaceParameter(
0, HloInstruction::CreateParameter(0, new_param_shape, "new_p_body"));
EXPECT_TRUE(ShapeUtil::Equal(body->parameter_instruction(0)->shape(),
new_param_shape));
}
TEST_F(HloComputationTest, CloneWithControlDependency) {
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, param));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(add));
TF_CHECK_OK(negate->AddControlDependencyTo(add));
auto clone = computation->Clone();
auto cloned_add = clone->root_instruction();
EXPECT_EQ(cloned_add->opcode(), HloOpcode::kAdd);
auto predecessors = cloned_add->control_predecessors();
EXPECT_EQ(1, predecessors.size());
EXPECT_EQ(HloOpcode::kNegate, predecessors[0]->opcode());
auto successors = predecessors[0]->control_successors();
EXPECT_THAT(successors, ::testing::ElementsAre(cloned_add));
}
TEST_F(HloComputationTest, CloneWithReplacements) {
auto builder = HloComputation::Builder(TestName());
Shape r0s64 = ShapeUtil::MakeShape(S64, {});
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
Shape r0u32 = ShapeUtil::MakeShape(U32, {});
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "p.0.lhs"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "p.0.rhs"));
auto param2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0s64, "p.1"));
auto lt = builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
param1, ComparisonDirection::kLt));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(lt));
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(param2,
HloInstruction::CreateParameter(2, r0s32, "p.1"));
auto param3 = HloInstruction::CreateParameter(3, r0u32, "p.2");
std::vector<const HloInstruction*> extra_parameters{param3.get()};
auto clone =
computation->CloneWithReplacements(&replacements, extra_parameters);
ASSERT_EQ(clone->num_parameters(), 4);
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(0)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(1)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(2)->shape(), r0s32));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(3)->shape(), r0u32));
}
TEST_F(HloComputationTest, CloneInContext) {
HloComputation::Builder builder(TestName());
Shape r0s64 = ShapeUtil::MakeShape(S64, {});
Shape r0s32 = ShapeUtil::MakeShape(S32, {});
Shape r0u32 = ShapeUtil::MakeShape(U32, {});
HloInstruction* param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "p.0.lhs"));
HloInstruction* param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "p.0.rhs"));
HloInstruction* param2 =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0s64, "p.1"));
HloInstruction* lt = builder.AddInstruction(
HloInstruction::CreateCompare(ShapeUtil::MakeShape(PRED, {}), param0,
param1, ComparisonDirection::kLt));
std::unique_ptr<VerifiedHloModule> module = CreateNewVerifiedModule();
const HloComputation& computation =
*module->AddEntryComputation(builder.Build(lt));
absl::flat_hash_map<const HloInstruction*, std::unique_ptr<HloInstruction>>
replacements;
replacements.emplace(param2,
HloInstruction::CreateParameter(2, r0s32, "p.1"));
std::unique_ptr<HloInstruction> param3 =
HloInstruction::CreateParameter(3, r0u32, "p.2");
std::vector<const HloInstruction*> extra_parameters = {param3.get()};
HloCloneContext clone_context(module.get());
std::unique_ptr<HloComputation> clone = computation.CloneInContext(
clone_context, &replacements, extra_parameters);
ASSERT_EQ(clone->num_parameters(), 4);
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(0)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(1)->shape(), r0f32_));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(2)->shape(), r0s32));
EXPECT_TRUE(
ShapeUtil::Equal(clone->parameter_instruction(3)->shape(), r0u32));
}
TEST_F(HloComputationTest, Stringification) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->SetExecutionThread("MainThread");
auto options = HloPrintOptions().set_print_metadata(false);
const std::string expected_computation =
R"(%TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation);
}
TEST_F(HloComputationTest, StringificationIndent) {
const Shape s1 = ShapeUtil::MakeShape(F32, {5, 10});
const Shape s2 = ShapeUtil::MakeShape(F32, {20, 10});
const Shape s2t = ShapeUtil::MakeShape(F32, {10, 20});
const Shape sout = ShapeUtil::MakeShape(F32, {5, 20});
HloComputation::Builder builder("TransposeDot");
HloInstruction* x =
builder.AddInstruction(HloInstruction::CreateParameter(0, s1, "x"));
HloInstruction* y =
builder.AddInstruction(HloInstruction::CreateParameter(1, s2, "y"));
HloInstruction* reshape =
builder.AddInstruction(HloInstruction::CreateTranspose(s2t, y, {1, 0}));
DotDimensionNumbers dot_dnums;
dot_dnums.add_lhs_contracting_dimensions(1);
dot_dnums.add_rhs_contracting_dimensions(0);
PrecisionConfig precision_config;
precision_config.mutable_operand_precision()->Resize(
2, PrecisionConfig::DEFAULT);
builder.AddInstruction(
HloInstruction::CreateDot(sout, x, reshape, dot_dnums, precision_config));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
computation->SetExecutionThread("MainThread");
auto options =
HloPrintOptions().set_print_metadata(false).set_indent_amount(2);
const std::string expected_computation =
R"( %TransposeDot (x: f32[5,10], y: f32[20,10]) -> f32[5,20] {
%x = f32[5,10]{1,0} parameter(0)
%y = f32[20,10]{1,0} parameter(1)
%transpose = f32[10,20]{1,0} transpose(f32[20,10]{1,0} %y), dimensions={1,0}
ROOT %dot = f32[5,20]{1,0} dot(f32[5,10]{1,0} %x, f32[10,20]{1,0} %transpose), lhs_contracting_dims={1}, rhs_contracting_dims={0}
}, execution_thread="MainThread")";
EXPECT_EQ(computation->ToString(options), expected_computation);
}
TEST_F(HloComputationTest, StringificationCanonical) {
const Shape s1 = ShapeUtil::Mak | 2,174 |
#ifndef XLA_HLO_IR_HLO_INPUT_OUTPUT_ALIAS_CONFIG_H_
#define XLA_HLO_IR_HLO_INPUT_OUTPUT_ALIAS_CONFIG_H_
#include <cstdint>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include "absl/container/btree_set.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_tree.h"
#include "xla/shape_util.h"
namespace xla {
class HloModule;
class HloInputOutputAliasConfig {
public:
enum AliasKind {
kMayAlias,
kMustAlias,
};
struct Alias {
Alias(int64_t parameter_number, ShapeIndex parameter_index,
AliasKind kind = kMayAlias)
: parameter_number(parameter_number),
parameter_index(std::move(parameter_index)),
kind(kind) {}
int64_t parameter_number;
ShapeIndex parameter_index;
AliasKind kind;
bool must_alias() const { return kind == kMustAlias; }
std::string ToString() const {
return absl::StrFormat("(%lld, %s, %s)", parameter_number,
parameter_index.ToString(),
kind == kMustAlias ? "must-alias" : "may-alias");
}
};
HloInputOutputAliasConfig() = default;
explicit HloInputOutputAliasConfig(Shape output_shape)
: alias_(std::move(output_shape)) {}
absl::Status SetUpAlias(const ShapeIndex& output_index, int64_t param_number,
const ShapeIndex& param_index,
AliasKind must_alias = kMayAlias);
bool ParameterHasAlias(int64_t param_number,
const ShapeIndex& param_index) const {
return GetAliasedOutput(param_number, param_index).has_value();
}
bool OutputHasAlias(const ShapeIndex& output_index) const;
HloInputOutputAliasProto ToProto() const;
static absl::StatusOr<HloInputOutputAliasConfig> CreateFromProto(
Shape output_shape, const HloInputOutputAliasProto& proto);
std::optional<ShapeIndex> GetAliasedOutput(
int64_t param_number, const ShapeIndex& param_index) const;
std::optional<Alias> GetAliasedParameter(
const ShapeIndex& output_index) const;
bool ParameterMustAlias(int64_t param_number,
const ShapeIndex& param_index) const;
using AliasFn =
absl::FunctionRef<void(const ShapeIndex& output_index, const Alias&)>;
void ForEachAlias(AliasFn fn) const;
using AliasFnWithStatus = absl::FunctionRef<absl::Status(
const ShapeIndex& output_index, const Alias&)>;
absl::Status Verify(const HloModule& module,
absl::FunctionRef<int64_t(const Shape&)> size_func) const;
absl::Status ForEachAliasWithStatus(AliasFnWithStatus fn) const;
const Shape& shape() const;
std::string ToString() const;
std::string ToShortString() const;
private:
ShapeTree<std::optional<Alias>> alias_;
};
class HloBufferDonorConfig {
public:
struct BufferDonor {
BufferDonor(int64_t param_number, ShapeIndex param_index)
: param_number(param_number), param_index(std::move(param_index)) {}
int64_t param_number;
ShapeIndex param_index;
bool operator==(const BufferDonor& other) const {
return param_number == other.param_number &&
param_index == other.param_index;
}
bool operator<(const BufferDonor& other) const {
return std::forward_as_tuple(param_number, param_index) <
std::forward_as_tuple(other.param_number, other.param_index);
}
bool operator>(const BufferDonor& other) const { return other < *this; }
bool operator<=(const BufferDonor& other) const { return !(*this > other); }
bool operator>=(const BufferDonor& other) const { return !(*this < other); }
template <typename H>
friend H AbslHashValue(H h, const BufferDonor& donor) {
return H::combine(std::move(h), donor.param_number, donor.param_index);
}
};
HloBufferDonorConfig() = default;
absl::Status AddBufferDonor(int64_t param_number,
const ShapeIndex& param_index);
absl::Status RemoveBufferDonor(int64_t param_number,
const ShapeIndex& param_index);
bool ParameterIsBufferDonor(int64_t param_number,
const ShapeIndex& param_index) const;
HloBufferDonorProto ToProto() const;
static absl::StatusOr<HloBufferDonorConfig> CreateFromProto(
const HloBufferDonorProto& proto);
absl::Status Verify(const HloModule& module) const;
const absl::btree_set<BufferDonor>& buffer_donor() const {
return buffer_donor_;
}
std::string ToString() const;
std::string ToShortString() const;
private:
absl::btree_set<BufferDonor> buffer_donor_;
};
std::ostream& operator<<(std::ostream& out,
const HloInputOutputAliasConfig& config);
std::ostream& operator<<(std::ostream& out, const HloBufferDonorConfig& config);
}
#endif
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include <cstdint>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include <vector>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/layout_util.h"
#include "xla/service/hlo.pb.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
namespace xla {
bool HloInputOutputAliasConfig::OutputHasAlias(
const ShapeIndex& output_index) const {
return alias_.element(output_index).has_value();
}
absl::Status HloInputOutputAliasConfig::SetUpAlias(
const ShapeIndex& output_index, int64_t param_number,
const ShapeIndex& param_index,
HloInputOutputAliasConfig::AliasKind must_alias) {
TF_RET_CHECK(ShapeUtil::IndexIsValid(alias_.shape(), output_index))
<< "Trying to set up alias at " << output_index.ToString()
<< " which is an invalid index for shape "
<< ShapeUtil::HumanString(alias_.shape());
TF_RET_CHECK(param_number >= 0) << param_number;
TF_RET_CHECK(!alias_.element(output_index)) << absl::StrFormat(
"Trying to set up output alias for param %lld at %s but failed: output "
"index %s is already aliased with param %lld at %s",
param_number, param_index.ToString(), output_index.ToString(),
alias_.element(output_index)->parameter_number,
alias_.element(output_index)->parameter_index.ToString());
(*alias_.mutable_element(output_index)) =
Alias(param_number, param_index, must_alias);
VLOG(4) << "Set up alias between output index " << output_index.ToString()
<< " and parameter " << param_number << " at index "
<< param_index.ToString();
return absl::OkStatus();
}
HloInputOutputAliasProto HloInputOutputAliasConfig::ToProto() const {
HloInputOutputAliasProto result;
alias_.ForEachElement(
[&](const ShapeIndex& index, const std::optional<Alias>& data) {
if (data) {
HloInputOutputAliasProto::AliasEntryProto entry;
for (int64_t i : index) {
entry.add_output_shape_index(i);
}
entry.set_parameter_number(data->parameter_number);
for (int64_t i : data->parameter_index) {
entry.add_parameter_shape_index(i);
}
if (data->must_alias()) {
entry.set_kind(Kind::MUST_ALIAS);
} else {
entry.set_kind(Kind::MAY_ALIAS);
}
result.add_entries()->Swap(&entry);
}
});
return result;
}
absl::StatusOr<HloInputOutputAliasConfig>
HloInputOutputAliasConfig::CreateFromProto(
Shape output_shape, const HloInputOutputAliasProto& proto) {
HloInputOutputAliasConfig result(std::move(output_shape));
for (const HloInputOutputAliasProto::AliasEntryProto& entry :
proto.entries()) {
ShapeIndex output_index(entry.output_shape_index().begin(),
entry.output_shape_index().end());
int64_t param_number = entry.parameter_number();
ShapeIndex param_index(entry.parameter_shape_index().begin(),
entry.parameter_shape_index().end());
AliasKind kind = entry.kind() == Kind::MAY_ALIAS ? kMayAlias : kMustAlias;
TF_RETURN_IF_ERROR(
result.SetUpAlias(output_index, param_number, param_index, kind));
}
return result;
}
const Shape& HloInputOutputAliasConfig::shape() const { return alias_.shape(); }
std::string HloInputOutputAliasConfig::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("HloInputOutputAliasConfig");
pieces.push_back(
absl::StrFormat(" Output shape: %s", alias_.shape().ToString()));
ForEachAlias([&](const ShapeIndex& output_index, const Alias& alias) {
pieces.push_back(absl::StrFormat(
" OutputIndex %s is %saliased with parameter %lld at %s:",
output_index.ToString(), alias.kind == kMustAlias ? "must-" : "may-",
alias.parameter_number, alias.parameter_index.ToString()));
});
return absl::StrJoin(pieces, "\n");
}
std::string HloInputOutputAliasConfig::ToShortString() const {
std::vector<std::string> pieces;
for (const auto& p : alias_) {
const ShapeIndex& index = p.first;
if (std::optional<Alias> alias = p.second) {
pieces.push_back(
absl::StrFormat("%s: %s", index.ToString(), alias->ToString()));
}
}
return absl::StrJoin(pieces, ", ");
}
bool HloInputOutputAliasConfig::ParameterMustAlias(
int64_t param_number, const ShapeIndex& param_index) const {
bool result = false;
alias_.ForEachElement(
[&](const xla::ShapeIndex&, std::optional<Alias> alias) {
if (alias && alias->parameter_number == param_number &&
alias->parameter_index == param_index && alias->must_alias()) {
result = true;
}
});
return result;
}
std::optional<ShapeIndex> HloInputOutputAliasConfig::GetAliasedOutput(
int64_t param_number, const ShapeIndex& param_index) const {
for (auto it = alias_.rbegin(); it != alias_.rend(); ++it) {
if (it->second.has_value() &&
it->second->parameter_number == param_number &&
it->second->parameter_index == param_index) {
return it->first;
}
}
return std::nullopt;
}
std::optional<HloInputOutputAliasConfig::Alias>
HloInputOutputAliasConfig::GetAliasedParameter(
const ShapeIndex& output_index) const {
CHECK(ShapeUtil::IndexIsValid(alias_.shape(), output_index))
<< ToString() << " " << alias_.shape().ToString() << " " << output_index;
return alias_.element(output_index);
}
void HloInputOutputAliasConfig::ForEachAlias(AliasFn fn) const {
alias_.ForEachElement(
[&](const ShapeIndex& output_index, std::optional<Alias> aliased) {
if (aliased) {
fn(output_index, *aliased);
}
});
}
absl::Status HloInputOutputAliasConfig::ForEachAliasWithStatus(
AliasFnWithStatus fn) const {
return alias_.ForEachElementWithStatus(
[&](const ShapeIndex& output_index, std::optional<Alias> aliased) {
if (aliased) {
TF_RETURN_IF_ERROR(fn(output_index, *aliased));
}
return absl::OkStatus();
});
}
absl::Status HloInputOutputAliasConfig::Verify(
const HloModule& module,
absl::FunctionRef<int64_t(const Shape&)> size_func) const {
std::vector<ShapeTree<bool>> param_has_seen;
const HloComputation* entry = module.entry_computation();
for (int64_t i = 0; i < entry->num_parameters(); ++i) {
HloInstruction* param = entry->parameter_instruction(i);
param_has_seen.emplace_back(param->shape());
}
return ForEachAliasWithStatus([&](const ShapeIndex& output_index,
const Alias& alias) -> absl::Status {
TF_RET_CHECK(0 <= alias.parameter_number);
TF_RET_CHECK(entry->num_parameters() > alias.parameter_number);
const Shape& param_shape =
module.entry_computation_layout().parameter_shape(
alias.parameter_number);
const Shape& output_shape =
module.entry_computation_layout().result_shape();
TF_RET_CHECK(ShapeUtil::IndexIsValid(param_shape, alias.parameter_index));
TF_RET_CHECK(ShapeUtil::IndexIsValid(output_shape, output_index));
const Shape& param_subshape =
ShapeUtil::GetSubshape(param_shape, alias.parameter_index);
const Shape& output_subshape =
ShapeUtil::GetSubshape(output_shape, output_index);
TF_RET_CHECK(LayoutUtil::IsDenseArray(param_subshape));
TF_RET_CHECK(LayoutUtil::IsDenseArray(output_subshape));
if (size_func(param_subshape) != size_func(output_subshape)) {
return Internal(
"Expected aliased input %lld at index %s and output at index %s to "
"have the same size. Input sub-shape is %s with size %lld, output "
"sub-shape is %s with size %lld",
alias.parameter_number, alias.parameter_index.ToString(),
output_index.ToString(),
ShapeUtil::HumanStringWithLayout(param_subshape),
size_func(param_subshape),
ShapeUtil::HumanStringWithLayout(output_subshape),
size_func(output_subshape));
}
TF_RET_CHECK(param_has_seen[alias.parameter_number].element(
alias.parameter_index) == false);
*(param_has_seen[alias.parameter_number].mutable_element(
alias.parameter_index)) = true;
return absl::OkStatus();
});
}
std::ostream& operator<<(std::ostream& out,
const HloInputOutputAliasConfig& config) {
out << config.ToString();
return out;
}
absl::Status HloBufferDonorConfig::AddBufferDonor(
int64_t param_number, const ShapeIndex& param_index) {
TF_RET_CHECK(param_number >= 0) << param_number;
VLOG(4) << "Register the parameter " << param_number << " at index "
<< param_index.ToString() << " as a buffer donor.";
buffer_donor_.emplace(BufferDonor(param_number, param_index));
return absl::OkStatus();
}
absl::Status HloBufferDonorConfig::RemoveBufferDonor(
int64_t param_number, const ShapeIndex& param_index) {
TF_RET_CHECK(param_number >= 0) << param_number;
buffer_donor_.erase(BufferDonor(param_number, param_index));
return absl::OkStatus();
}
HloBufferDonorProto HloBufferDonorConfig::ToProto() const {
HloBufferDonorProto result;
for (const auto& donor : buffer_donor_) {
HloBufferDonorProto::BufferDonorEntryProto entry;
entry.set_parameter_number(donor.param_number);
for (int64_t i : donor.param_index) {
entry.add_parameter_shape_index(i);
}
result.add_entries()->Swap(&entry);
}
return result;
}
absl::StatusOr<HloBufferDonorConfig> HloBufferDonorConfig::CreateFromProto(
const HloBufferDonorProto& proto) {
HloBufferDonorConfig result;
for (const HloBufferDonorProto::BufferDonorEntryProto& entry :
proto.entries()) {
int64_t param_number = entry.parameter_number();
ShapeIndex param_index(entry.parameter_shape_index().begin(),
entry.parameter_shape_index().end());
TF_RETURN_IF_ERROR(result.AddBufferDonor(param_number, param_index));
}
return result;
}
std::string HloBufferDonorConfig::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("HloBufferDonorConfig");
for (const auto& donor : buffer_donor_) {
pieces.push_back(absl::StrFormat(
" Parameter %lld at %s is registered as a buffer donor.",
donor.param_number, donor.param_index.ToString()));
}
return absl::StrJoin(pieces, "\n");
}
std::string HloBufferDonorConfig::ToShortString() const {
std::vector<std::string> pieces;
pieces.reserve(buffer_donor_.size());
for (const auto& donor : buffer_donor_) {
pieces.push_back(absl::StrFormat("(%lld, %s)", donor.param_number,
donor.param_index.ToString()));
}
return absl::StrJoin(pieces, ", ");
}
bool HloBufferDonorConfig::ParameterIsBufferDonor(
int64_t param_number, const ShapeIndex& param_index) const {
auto it = buffer_donor_.find(BufferDonor(param_number, param_index));
return it != buffer_donor_.end();
}
absl::Status HloBufferDonorConfig::Verify(const HloModule& module) const {
const HloComputation* entry = module.entry_computation();
const auto& alias_config = module.input_output_alias_config();
for (const auto& donor : buffer_donor_) {
TF_RET_CHECK(donor.param_number >= 0);
TF_RET_CHECK(donor.param_number < entry->num_parameters());
const Shape& param_shape =
module.entry_computation_layout().parameter_shape(donor.param_number);
TF_RET_CHECK(ShapeUtil::IndexIsValid(param_shape, donor.param_index));
const Shape& param_subshape =
ShapeUtil::GetSubshape(param_shape, donor.param_index);
TF_RET_CHECK(LayoutUtil::IsDenseArray(param_subshape));
if (alias_config.ParameterHasAlias(donor.param_number, donor.param_index)) {
return Internal(
"Input %lld at index %s is registered as a buffer donor. However, it "
"is also in the input output alias config.",
donor.param_number, donor.param_index.ToString());
}
}
return absl::OkStatus();
}
std::ostream& operator<<(std::ostream& out,
const HloBufferDonorConfig& config) {
out << config.ToString();
return out;
}
} | #include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "absl/algorithm/container.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/hlo_ordering.h"
#include "xla/shape_util.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/types.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
class HloInputOutputAliasConfigTest : public HloTestBase {
protected:
void expect_aliased(const ShapeIndex& output_index, int64_t param_number,
const ShapeIndex& param_index,
const HloInputOutputAliasConfig& config) {
std::optional<ShapeIndex> aliased_output =
config.GetAliasedOutput(param_number, param_index);
EXPECT_TRUE(aliased_output);
EXPECT_EQ(aliased_output.value(), output_index);
std::optional<HloInputOutputAliasConfig::Alias> aliased_param =
config.GetAliasedParameter(output_index);
EXPECT_TRUE(aliased_param);
EXPECT_EQ(aliased_param->parameter_number, param_number);
EXPECT_EQ(aliased_param->parameter_index, param_index);
}
void expect_not_aliased(const ShapeIndex& output_index, int64_t param_number,
const ShapeIndex& param_index,
const HloInputOutputAliasConfig& config) {
std::optional<ShapeIndex> aliased_output =
config.GetAliasedOutput(param_number, param_index);
EXPECT_FALSE(aliased_output && aliased_output == output_index);
std::optional<HloInputOutputAliasConfig::Alias> aliased_param =
config.GetAliasedParameter(output_index);
EXPECT_FALSE(aliased_param &&
aliased_param->parameter_number == param_number &&
aliased_param->parameter_index == param_index);
}
};
TEST_F(HloInputOutputAliasConfigTest, SimpleAliasing) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT root = (f32[], f32[]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{0}, 1,
{}));
expect_aliased({0}, 1,
{}, config);
expect_not_aliased({1}, 1,
{}, config);
expect_not_aliased({0}, 0,
{}, config);
}
TEST_F(HloInputOutputAliasConfigTest, SimpleAliasingWithTupleInput) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{0}, 0,
{0}));
TF_ASSERT_OK(config.SetUpAlias(
{1}, 0,
{1}));
expect_aliased({0}, 0,
{0}, config);
expect_aliased({1}, 0,
{1}, config);
expect_not_aliased({1}, 1,
{}, config);
expect_not_aliased({0}, 0,
{}, config);
}
TEST_F(HloInputOutputAliasConfigTest, InputDoNotAliasTwice) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT root = (f32[], f32[]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{0}, 0,
{}));
TF_ASSERT_OK(config.SetUpAlias(
{1}, 0,
{}));
ASSERT_IS_NOT_OK(config.Verify(*module, [](const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape);
}));
}
TEST_F(HloInputOutputAliasConfigTest, SizesMustMatch) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[4096] parameter(1)
ROOT root = (f32[], f32[4096]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{1}, 0,
{}));
ASSERT_IS_NOT_OK(config.Verify(*module, [](const Shape& shape) {
return ShapeUtil::ByteSizeOf(shape);
}));
}
TEST_F(HloInputOutputAliasConfigTest, OutputDoNotAliasTwice) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT root = (f32[], f32[]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloInputOutputAliasConfig config(
module->entry_computation()->root_instruction()->shape());
TF_ASSERT_OK(config.SetUpAlias(
{0}, 0,
{}));
ASSERT_IS_NOT_OK(config.SetUpAlias(
{0}, 1,
{}));
}
class HloBufferDonorConfigTest : public HloTestBase {};
TEST_F(HloBufferDonorConfigTest, SimpleBufferDonor) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT root = (f32[], f32[]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloBufferDonorConfig config;
TF_ASSERT_OK(config.AddBufferDonor(0, {}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.AddBufferDonor(1, {}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {}));
EXPECT_TRUE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.RemoveBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(0, {}));
EXPECT_TRUE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.Verify(*module));
TF_ASSERT_OK(config.AddBufferDonor(2, {}));
ASSERT_IS_NOT_OK(config.Verify(*module));
}
TEST_F(HloBufferDonorConfigTest, SimpleBufferDonorWithTupleInput) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloBufferDonorConfig config;
TF_ASSERT_OK(config.AddBufferDonor(0, {0}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {0}));
EXPECT_FALSE(config.ParameterIsBufferDonor(0, {1}));
EXPECT_FALSE(config.ParameterIsBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.AddBufferDonor(0, {1}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {0}));
EXPECT_TRUE(config.ParameterIsBufferDonor(0, {1}));
EXPECT_FALSE(config.ParameterIsBufferDonor(0, {}));
EXPECT_FALSE(config.ParameterIsBufferDonor(1, {}));
TF_ASSERT_OK(config.Verify(*module));
TF_ASSERT_OK(config.AddBufferDonor(0, {2}));
ASSERT_IS_NOT_OK(config.Verify(*module));
}
TEST_F(HloBufferDonorConfigTest, BufferDonorInputOutputAliasOverlap) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
HloBufferDonorConfig config;
TF_ASSERT_OK(config.AddBufferDonor(0, {0}));
TF_ASSERT_OK(config.Verify(*module));
TF_ASSERT_OK(module->input_output_alias_config().SetUpAlias({0}, 0, {0}));
ASSERT_IS_NOT_OK(config.Verify(*module));
}
}
} | 2,175 |
#ifndef XLA_HLO_IR_BACKEND_CONFIG_H_
#define XLA_HLO_IR_BACKEND_CONFIG_H_
#include <memory>
#include <string>
#include <utility>
#include "absl/base/thread_annotations.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "tsl/platform/protobuf.h"
namespace xla {
absl::StatusOr<std::string> BackendConfigToRawString(
const tsl::protobuf::Message& proto);
std::unique_ptr<tsl::protobuf::Message> CloneBackendConfigProto(
const tsl::protobuf::Message* proto);
class BackendConfigWrapper {
public:
BackendConfigWrapper() = default;
explicit BackendConfigWrapper(std::string raw_string)
: raw_string_(std::move(raw_string)) {}
explicit BackendConfigWrapper(const tsl::protobuf::Message& proto)
: proto_(CloneBackendConfigProto(&proto)) {}
BackendConfigWrapper(const BackendConfigWrapper& other) {
absl::MutexLock other_lock{&other.mutex_};
proto_ = CloneBackendConfigProto(other.proto_.get());
raw_string_ = other.raw_string_;
}
BackendConfigWrapper& operator=(BackendConfigWrapper&& other);
bool operator==(const BackendConfigWrapper& other) const;
bool operator!=(const BackendConfigWrapper& other) const {
return !(*this == other);
}
const std::string& GetRawString() const {
absl::WriterMutexLock lock{&mutex_};
return GetRawStringWithoutMutex();
}
absl::Status GetProto(tsl::protobuf::Message* output_proto) const;
bool empty() const {
absl::MutexLock lock{&mutex_};
return proto_ == nullptr && raw_string_.empty();
}
private:
const std::string& GetRawStringWithoutMutex() const
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
mutable absl::Mutex mutex_;
mutable std::unique_ptr<tsl::protobuf::Message> proto_
ABSL_GUARDED_BY(mutex_);
mutable std::string raw_string_ ABSL_GUARDED_BY(mutex_);
};
}
#endif
#include "xla/hlo/ir/backend_config.h"
#include <memory>
#include <string>
#include <utility>
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/synchronization/mutex.h"
#include "xla/util.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/human_readable_json.h"
#include "tsl/platform/protobuf.h"
namespace xla {
std::unique_ptr<tsl::protobuf::Message> CloneBackendConfigProto(
const tsl::protobuf::Message* proto) {
if (proto == nullptr) {
return nullptr;
}
std::unique_ptr<tsl::protobuf::Message> result(proto->New());
result->CopyFrom(*proto);
return result;
}
absl::StatusOr<std::string> BackendConfigToRawString(
const tsl::protobuf::Message& proto) {
return tsl::ProtoToHumanReadableJson(proto, true);
}
const std::string& BackendConfigWrapper::GetRawStringWithoutMutex() const {
if (proto_ && raw_string_.empty()) {
raw_string_ = BackendConfigToRawString(*proto_).value();
}
static const std::string* kEmptyString = new std::string();
return raw_string_.empty() ? *kEmptyString : raw_string_;
}
absl::Status BackendConfigWrapper::GetProto(
tsl::protobuf::Message* output_proto) const {
output_proto->Clear();
absl::WriterMutexLock lock{&mutex_};
if (proto_ != nullptr) {
if (proto_->GetDescriptor() != output_proto->GetDescriptor()) {
return Internal("Mismatched backend config descriptors.");
}
output_proto->CopyFrom(*proto_);
return absl::OkStatus();
}
if (raw_string_.empty()) {
return absl::OkStatus();
}
TF_RETURN_IF_ERROR(tsl::HumanReadableJsonToProto(raw_string_, output_proto));
proto_ = CloneBackendConfigProto(output_proto);
return absl::OkStatus();
}
BackendConfigWrapper& BackendConfigWrapper::operator=(
BackendConfigWrapper&& other) {
std::unique_ptr<tsl::protobuf::Message> temp_proto;
std::string temp_string;
{
absl::MutexLock other_lock{&other.mutex_};
temp_proto = std::move(other.proto_);
temp_string = std::move(other.raw_string_);
}
absl::MutexLock this_lock{&mutex_};
proto_ = std::move(temp_proto);
raw_string_ = std::move(temp_string);
return *this;
}
bool BackendConfigWrapper::operator==(const BackendConfigWrapper& other) const {
tsl::protobuf::Message* this_proto = nullptr;
{
absl::MutexLock this_lock{&mutex_};
this_proto = proto_.get();
}
const std::string* other_raw_string = nullptr;
{
absl::MutexLock other_lock{&other.mutex_};
if (this_proto != nullptr && other.proto_ != nullptr) {
using ::tsl::protobuf::util::MessageDifferencer;
return MessageDifferencer::Equals(*this_proto, *other.proto_);
}
other_raw_string = &other.GetRawStringWithoutMutex();
}
return GetRawString() == *other_raw_string;
}
} | #include "xla/hlo/ir/backend_config.h"
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/synchronization/notification.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/test.h"
namespace xla {
namespace {
const int kNumThreads = 100;
const int kNumRepetitions = 100;
constexpr absl::string_view kRawString =
R"({"operation_queue_id":"0","wait_on_operation_queues":[],"fusion_backend_config":{"kind":"__triton_gemm","triton_gemm_config":{"block_m":"256","block_n":"256","block_k":"32","split_k":"1","num_stages":"1","num_warps":"16","num_ctas":"1"}},"force_earliest_schedule":false})";
template <typename Input, typename CheckFn>
void RunThreaded(Input input, CheckFn check_fn) {
for (int i = 0; i < kNumRepetitions; ++i) {
BackendConfigWrapper source(input);
absl::Notification all_threads_created;
std::vector<std::unique_ptr<std::thread>> threads;
for (int i = 0; i < kNumThreads; ++i) {
threads.emplace_back(std::make_unique<std::thread>([&] {
all_threads_created.WaitForNotification();
check_fn(source);
}));
}
all_threads_created.Notify();
for (int i = 0; i < kNumThreads; ++i) {
threads[i]->join();
}
}
}
TEST(BackendConfigWrapperTest, ConcurrentGetProto) {
RunThreaded(std::string{kRawString}, [](BackendConfigWrapper& source) {
gpu::GpuBackendConfig proto;
TF_EXPECT_OK(source.GetProto(&proto));
EXPECT_TRUE(proto.has_fusion_backend_config());
BackendConfigWrapper wrapped(proto);
EXPECT_TRUE(wrapped == source);
});
}
TEST(BackendConfigWrapperTest, ConcurrentGetRawString) {
BackendConfigWrapper source_json(std::string{kRawString});
gpu::GpuBackendConfig proto;
TF_EXPECT_OK(source_json.GetProto(&proto));
RunThreaded(proto, [](BackendConfigWrapper& source) {
std::string raw_string = source.GetRawString();
EXPECT_EQ(raw_string, kRawString);
BackendConfigWrapper wrapped(raw_string);
EXPECT_TRUE(wrapped == source);
});
}
TEST(BackendConfigWrapperTest, AssignmentToNonEmptyIsOK) {
BackendConfigWrapper a(std::string{kRawString});
BackendConfigWrapper b(std::string{kRawString});
a = std::move(b);
EXPECT_TRUE(a == BackendConfigWrapper(std::string{kRawString}));
}
TEST(BackendConfigWrapperTest, AssignmentDoesNotDeadlock) {
BackendConfigWrapper source;
BackendConfigWrapper& ref = source;
source = std::move(ref);
}
TEST(BackendConfigWrapperTest, SelfComparisonDoesNotDeadlock) {
BackendConfigWrapper source(std::string{kRawString});
EXPECT_TRUE(source == source);
}
TEST(BackendConfigWrapperTest, ComparisonDoesNotDeadlock) {
BackendConfigWrapper source_json(std::string{kRawString});
gpu::GpuBackendConfig proto;
TF_EXPECT_OK(source_json.GetProto(&proto));
RunThreaded(std::string{kRawString}, [&proto](BackendConfigWrapper& source) {
BackendConfigWrapper other_first(proto);
EXPECT_TRUE(other_first == source);
BackendConfigWrapper other_second(proto);
EXPECT_TRUE(source == other_second);
});
}
}
} | 2,176 |
#ifndef XLA_HLO_IR_HLO_OPCODE_H_
#define XLA_HLO_IR_HLO_OPCODE_H_
#include <cstdint>
#include <iosfwd>
#include <optional>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
namespace xla {
#define HLO_OPCODE_LIST(V) \
\
V(kAbs, "abs", 1) \
V(kAdd, "add", 2) \
V(kAddDependency, "add-dependency", 2) \
V(kAfterAll, "after-all", kHloOpcodeIsVariadic) \
V(kAllGather, "all-gather", kHloOpcodeIsVariadic) \
V(kAllGatherDone, "all-gather-done", 1) \
V(kAllGatherStart, "all-gather-start", kHloOpcodeIsVariadic) \
V(kAllReduce, "all-reduce", kHloOpcodeIsVariadic) \
V(kAllReduceDone, "all-reduce-done", 1) \
V(kAllReduceStart, "all-reduce-start", kHloOpcodeIsVariadic) \
V(kAllToAll, "all-to-all", kHloOpcodeIsVariadic) \
V(kAnd, "and", 2) \
V(kAsyncDone, "async-done", 1) \
V(kAsyncStart, "async-start", kHloOpcodeIsVariadic) \
V(kAsyncUpdate, "async-update", 1) \
V(kAtan2, "atan2", 2) \
V(kBatchNormGrad, "batch-norm-grad", 5) \
V(kBatchNormInference, "batch-norm-inference", 5) \
V(kBatchNormTraining, "batch-norm-training", 3) \
V(kBitcast, "bitcast", 1) \
V(kBitcastConvert, "bitcast-convert", 1) \
V(kBroadcast, "broadcast", 1) \
V(kCall, "call", kHloOpcodeIsVariadic) \
V(kCbrt, "cbrt", 1) \
V(kCeil, "ceil", 1) \
V(kCholesky, "cholesky", 1) \
V(kClamp, "clamp", 3) \
V(kClz, "count-leading-zeros", 1) \
V(kCollectiveBroadcast, "collective-broadcast", kHloOpcodeIsVariadic) \
V(kCollectivePermute, "collective-permute", kHloOpcodeIsVariadic) \
V(kCollectivePermuteDone, "collective-permute-done", 1) \
V(kCollectivePermuteStart, "collective-permute-start", kHloOpcodeIsVariadic) \
V(kCompare, "compare", 2) \
V(kComplex, "complex", 2) \
V(kConcatenate, "concatenate", kHloOpcodeIsVariadic) \
V(kConditional, "conditional", kHloOpcodeIsVariadic) \
V(kConstant, "constant", 0) \
V(kConvert, "convert", 1) \
V(kConvolution, "convolution", 2) \
V(kCopy, "copy", 1) \
V(kCopyDone, "copy-done", 1) \
V(kCopyStart, "copy-start", 1) \
V(kCos, "cosine", 1) \
V(kCustomCall, "custom-call", kHloOpcodeIsVariadic) \
V(kDivide, "divide", 2) \
V(kDomain, "domain", 1) \
V(kDot, "dot", kHloOpcodeIsVariadic) \
V(kDynamicReshape, "dynamic-reshape", kHloOpcodeIsVariadic) \
V(kDynamicSlice, "dynamic-slice", kHloOpcodeIsVariadic) \
V(kDynamicUpdateSlice, "dynamic-update-slice", kHloOpcodeIsVariadic) \
V(kErf, "erf", 1) \
V(kExp, "exponential", 1) \
V(kExpm1, "exponential-minus-one", 1) \
V(kFft, "fft", 1) \
V(kFloor, "floor", 1) \
V(kFusion, "fusion", kHloOpcodeIsVariadic) \
V(kGather, "gather", 2) \
V(kGetDimensionSize, "get-dimension-size", 1) \
V(kGetTupleElement, "get-tuple-element", 1) \
V(kImag, "imag", 1) \
V(kInfeed, "infeed", 1) \
V(kIota, "iota", 0) \
V(kIsFinite, "is-finite", 1) \
V(kLog, "log", 1) \
V(kLog1p, "log-plus-one", 1) \
V(kLogistic, "logistic", 1) \
V(kMap, "map", kHloOpcodeIsVariadic) \
V(kMaximum, "maximum", 2) \
V(kMinimum, "minimum", 2) \
V(kMultiply, "multiply", 2) \
V(kNegate, "negate", 1) \
V(kNot, "not", 1) \
V(kOptimizationBarrier, "opt-barrier", 1) \
V(kOr, "or", 2) \
V(kOutfeed, "outfeed", 2) \
V(kPad, "pad", 2) \
V(kParameter, "parameter", 0) \
V(kPartitionId, "partition-id", 0) \
V(kPopulationCount, "popcnt", 1) \
V(kPower, "power", 2) \
V(kReal, "real", 1) \
V(kRecv, "recv", 1) \
V(kRecvDone, "recv-done", 1) \
V(kReduce, "reduce", kHloOpcodeIsVariadic) \
V(kReducePrecision, "reduce-precision", 1) \
V(kReduceScatter, "reduce-scatter", kHloOpcodeIsVariadic) \
V(kReduceWindow, "reduce-window", kHloOpcodeIsVariadic) \
V(kRemainder, "remainder", 2) \
V(kReplicaId, "replica-id", 0) \
V(kReshape, "reshape", 1) \
V(kReverse, "reverse", 1) \
V(kRng, "rng", kHloOpcodeIsVariadic) \
V(kRngBitGenerator, "rng-bit-generator", 1) \
V(kRngGetAndUpdateState, "rng-get-and-update-state", 0) \
V(kRoundNearestAfz, "round-nearest-afz", 1) \
V(kRoundNearestEven, "round-nearest-even", 1) \
V(kRsqrt, "rsqrt", 1) \
V(kScatter, "scatter", kHloOpcodeIsVariadic) \
V(kSelect, "select", 3) \
V(kSelectAndScatter, "select-and-scatter", 3) \
V(kSend, "send", 2) \
V(kSendDone, "send-done", 1) \
V(kSetDimensionSize, "set-dimension-size", 2) \
V(kShiftLeft, "shift-left", 2) \
V(kShiftRightArithmetic, "shift-right-arithmetic", 2) \
V(kShiftRightLogical, "shift-right-logical", 2) \
V(kSign, "sign", 1) \
V(kSin, "sine", 1) \
V(kSlice, "slice", 1) \
V(kSort, "sort", kHloOpcodeIsVariadic) \
V(kSqrt, "sqrt", 1) \
V(kStochasticConvert, "stochastic-convert", 2) \
V(kSubtract, "subtract", 2) \
V(kTan, "tan", 1) \
V(kTanh, "tanh", 1) \
V(kTopK, "topk", 1) \
V(kTranspose, "transpose", 1) \
V(kTriangularSolve, "triangular-solve", 2) \
V(kTuple, "tuple", kHloOpcodeIsVariadic) \
V(kWhile, "while", 1) \
V(kXor, "xor", 2) \
enum class HloOpcode : uint8_t {
#define DECLARE_ENUM(enum_name, opcode_name, ...) enum_name,
HLO_OPCODE_LIST(DECLARE_ENUM)
#undef DECLARE_ENUM
};
enum {
kHloOpcodeIsVariadic = -1,
};
absl::string_view HloOpcodeString(HloOpcode opcode);
absl::StatusOr<HloOpcode> StringToHloOpcode(absl::string_view opcode_name);
inline std::ostream& operator<<(std::ostream& os, HloOpcode opcode) {
return os << HloOpcodeString(opcode);
}
bool HloOpcodeIsComparison(HloOpcode opcode);
bool HloOpcodeIsVariadic(HloOpcode opcode);
std::optional<int> HloOpcodeArity(HloOpcode opcode);
bool HloOpcodeIsAsync(HloOpcode opcode);
inline bool HloOpcodeIsBinaryCommutative(HloOpcode opcode) {
switch (opcode) {
case HloOpcode::kAdd:
case HloOpcode::kMultiply:
case HloOpcode::kMaximum:
case HloOpcode::kMinimum:
case HloOpcode::kAnd:
case HloOpcode::kOr:
case HloOpcode::kXor:
return true;
default:
return false;
}
}
inline constexpr uint32_t HloOpcodeCount() {
#define HLO_COUNT_ONE(...) +1
#define HLO_XLIST_LENGTH(list) list(HLO_COUNT_ONE)
return HLO_XLIST_LENGTH(HLO_OPCODE_LIST);
}
static_assert(HloOpcodeCount() < 256,
"HloOpcode is a uint8_t. You need to increase its size before "
"adding new op codes.");
}
#endif
#include "xla/hlo/ir/hlo_opcode.h"
#include <optional>
#include <string>
#include "absl/container/flat_hash_map.h"
#include "xla/util.h"
namespace xla {
absl::string_view HloOpcodeString(HloOpcode opcode) {
switch (opcode) {
#define CASE_OPCODE_STRING(enum_name, opcode_name, ...) \
case HloOpcode::enum_name: \
return opcode_name;
HLO_OPCODE_LIST(CASE_OPCODE_STRING)
#undef CASE_OPCODE_STRING
}
}
absl::StatusOr<HloOpcode> StringToHloOpcode(absl::string_view opcode_name) {
static auto* opcode_map = new absl::flat_hash_map<std::string, HloOpcode>({
#define STRING_TO_OPCODE_ENTRY(enum_name, opcode_name, ...) \
{opcode_name, HloOpcode::enum_name},
HLO_OPCODE_LIST(STRING_TO_OPCODE_ENTRY)
#undef STRING_TO_OPCODE_ENTRY
});
auto it = opcode_map->find(opcode_name);
if (it == opcode_map->end()) {
return InvalidArgument("Unknown opcode: %s", opcode_name);
}
return it->second;
}
bool HloOpcodeIsComparison(HloOpcode opcode) {
return opcode == HloOpcode::kCompare;
}
bool HloOpcodeIsVariadic(HloOpcode opcode) {
switch (opcode) {
#define CASE_IS_VARIADIC(enum_name, opcode_name, arity, ...) \
case HloOpcode::enum_name: \
return arity == kHloOpcodeIsVariadic;
HLO_OPCODE_LIST(CASE_IS_VARIADIC)
#undef CASE_IS_VARIADIC
}
}
std::optional<int> HloOpcodeArity(HloOpcode opcode) {
switch (opcode) {
#define CASE_ARITY(enum_name, opcode_name, arity, ...) \
case HloOpcode::enum_name: \
return arity == kHloOpcodeIsVariadic ? std::nullopt \
: std::make_optional(arity);
HLO_OPCODE_LIST(CASE_ARITY)
#undef CASE_ARITY
}
}
bool HloOpcodeIsAsync(HloOpcode opcode) {
return opcode == HloOpcode::kAsyncStart ||
opcode == HloOpcode::kAsyncUpdate || opcode == HloOpcode::kAsyncDone;
}
} | #include "xla/hlo/ir/hlo_opcode.h"
#include "xla/test.h"
#include "xla/types.h"
namespace xla {
namespace {
TEST(HloOpcodeTest, StringifyMultiply) {
ASSERT_EQ("multiply", HloOpcodeString(HloOpcode::kMultiply));
}
TEST(HloOpcodeTest, OpcodeProperties) {
#define SOME_LIST(X) \
X(One) \
X(Two) \
X(Three)
EXPECT_EQ(3, HLO_XLIST_LENGTH(SOME_LIST));
#undef SOME_LIST
for (int i = 0; i < HloOpcodeCount(); ++i) {
auto opcode = static_cast<HloOpcode>(i);
EXPECT_EQ(opcode, StringToHloOpcode(HloOpcodeString(opcode)).value());
switch (opcode) {
case HloOpcode::kCompare:
EXPECT_TRUE(HloOpcodeIsComparison(opcode));
break;
default:
EXPECT_FALSE(HloOpcodeIsComparison(opcode));
}
switch (opcode) {
case HloOpcode::kAfterAll:
case HloOpcode::kAllGather:
case HloOpcode::kAllGatherStart:
case HloOpcode::kAllReduce:
case HloOpcode::kAsyncStart:
case HloOpcode::kReduceScatter:
case HloOpcode::kAllReduceStart:
case HloOpcode::kAllToAll:
case HloOpcode::kCall:
case HloOpcode::kCollectiveBroadcast:
case HloOpcode::kCollectivePermute:
case HloOpcode::kCollectivePermuteStart:
case HloOpcode::kConcatenate:
case HloOpcode::kConditional:
case HloOpcode::kCustomCall:
case HloOpcode::kDot:
case HloOpcode::kDynamicSlice:
case HloOpcode::kDynamicUpdateSlice:
case HloOpcode::kDynamicReshape:
case HloOpcode::kFusion:
case HloOpcode::kMap:
case HloOpcode::kReduce:
case HloOpcode::kRng:
case HloOpcode::kScatter:
case HloOpcode::kSort:
case HloOpcode::kTuple:
case HloOpcode::kReduceWindow:
EXPECT_TRUE(HloOpcodeIsVariadic(opcode));
break;
default:
EXPECT_FALSE(HloOpcodeIsVariadic(opcode));
}
}
}
}
} | 2,177 |
#ifndef XLA_HLO_IR_HLO_MODULE_METADATA_H_
#define XLA_HLO_IR_HLO_MODULE_METADATA_H_
#include <functional>
#include <optional>
#include <string>
#include <vector>
#include "xla/service/hlo.pb.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
#include "tsl/platform/statusor.h"
namespace xla {
class HloModuleMetadata {
public:
explicit HloModuleMetadata(tsl::Env* env) : env_(env) {}
const HloModuleMetadataProto& proto() const { return module_metadata_; }
void RecordPassStart();
absl::Status RecordPassEnd();
const std::optional<HloModuleMetadataProto>& prepartitioning_metadata()
const {
return prepartitioning_metadata_;
}
void set_prepartitioning_metadata(
const HloModuleMetadata& prepartitioning_metadata);
void set_module_group_name(const std::string& name) {
module_metadata_.set_module_group_name(name);
}
void set_canonical_module_id(int64_t id) {
module_metadata_.set_canonical_module_id(id);
}
void add_partitioned_module_id(int64_t id) {
module_metadata_.add_partitioned_module_ids(id);
}
absl::Status set_custom_metadata(const ::tsl::protobuf::Message& message);
absl::StatusOr<int64_t> current_pass_id() {
TF_ASSIGN_OR_RETURN(HloPassMetadata * pass_metadata,
GetCurrentHloPassMetadata());
return pass_metadata->pass_id();
}
absl::Status set_current_pass_name(const std::string& pass_name) {
return MutateCurrentHloPassMetadata(
[&pass_name](HloPassMetadata* pass_metadata) {
pass_metadata->set_pass_name(pass_name);
});
}
absl::Status set_current_pass_pipeline_name(
const std::string& pipeline_name) {
return MutateCurrentHloPassMetadata(
[&pipeline_name](HloPassMetadata* pass_metadata) {
pass_metadata->set_pipeline_name(pipeline_name);
});
}
absl::Status add_current_pass_dump_filename(
const std::string& dump_filename) {
return MutateCurrentHloPassMetadata(
[&dump_filename](HloPassMetadata* pass_metadata) {
pass_metadata->add_dump_filenames(dump_filename);
});
}
absl::Status set_current_pass_module_changed(bool module_changed) {
return MutateCurrentHloPassMetadata(
[&module_changed](HloPassMetadata* pass_metadata) {
pass_metadata->set_module_changed(module_changed);
});
}
absl::Status set_current_pass_module_id(int64_t module_id) {
return MutateCurrentHloPassMetadata(
[&module_id](HloPassMetadata* pass_metadata) {
pass_metadata->set_module_id(module_id);
});
}
absl::Status add_current_pass_module_group_module_id(int64_t module_id) {
return MutateCurrentHloPassMetadata(
[&module_id](HloPassMetadata* pass_metadata) {
pass_metadata->add_module_group_module_ids(module_id);
});
}
private:
absl::StatusOr<HloPassMetadata*> GetCurrentHloPassMetadata();
absl::Status MutateCurrentHloPassMetadata(
absl::FunctionRef<void(HloPassMetadata*)> mutator);
HloModuleMetadataProto module_metadata_;
tsl::Env* env_;
int64_t next_pass_id_ = 1;
std::vector<HloPassMetadata*> running_passes_;
std::optional<HloModuleMetadataProto> prepartitioning_metadata_ =
std::nullopt;
};
}
#endif
#include "xla/hlo/ir/hlo_module_metadata.h"
#include <algorithm>
#include "absl/container/flat_hash_set.h"
#include "absl/log/log.h"
#include "xla/util.h"
#include "tsl/platform/env.h"
#include "tsl/platform/protobuf.h"
namespace xla {
absl::StatusOr<HloPassMetadata*>
HloModuleMetadata::GetCurrentHloPassMetadata() {
if (running_passes_.empty()) {
return NotFound(
"HloPassMetadata for currently running pass not found, either because "
"the pass did not call RecordPassStart or because a pass is "
"creating/switching modules without using "
"HloModuleGroup::ReplaceModule.");
}
return running_passes_.back();
}
absl::Status HloModuleMetadata::MutateCurrentHloPassMetadata(
absl::FunctionRef<void(HloPassMetadata*)> mutator) {
TF_ASSIGN_OR_RETURN(HloPassMetadata * pass_metadata,
GetCurrentHloPassMetadata());
mutator(pass_metadata);
return absl::OkStatus();
}
void HloModuleMetadata::RecordPassStart() {
HloPassMetadata* pass_metadata = module_metadata_.add_pass_metadata();
pass_metadata->set_pass_id(next_pass_id_++);
pass_metadata->set_start_timestamp_usec(env_->NowMicros());
running_passes_.push_back(pass_metadata);
}
absl::Status HloModuleMetadata::RecordPassEnd() {
TF_ASSIGN_OR_RETURN(HloPassMetadata * pass_metadata,
GetCurrentHloPassMetadata());
pass_metadata->set_end_timestamp_usec(env_->NowMicros());
running_passes_.pop_back();
return absl::OkStatus();
}
void HloModuleMetadata::set_prepartitioning_metadata(
const HloModuleMetadata& prepartitioning_metadata) {
module_metadata_.set_original_module_id(
prepartitioning_metadata.proto().canonical_module_id());
prepartitioning_metadata_ = prepartitioning_metadata.proto();
prepartitioning_metadata_->clear_pass_metadata();
absl::flat_hash_set<HloPassMetadata*> running_passes(
prepartitioning_metadata.running_passes_.begin(),
prepartitioning_metadata.running_passes_.end());
for (const HloPassMetadata& pass_metadata :
prepartitioning_metadata.proto().pass_metadata()) {
if (running_passes.contains(&pass_metadata)) {
HloPassMetadata* added_pass_metadata =
module_metadata_.add_pass_metadata();
*added_pass_metadata = pass_metadata;
running_passes_.push_back(added_pass_metadata);
next_pass_id_ =
std::max(next_pass_id_,
static_cast<int64_t>(added_pass_metadata->pass_id()) + 1);
} else {
*prepartitioning_metadata_->add_pass_metadata() = pass_metadata;
}
}
}
absl::Status HloModuleMetadata::set_custom_metadata(
const ::tsl::protobuf::Message& message) {
TF_ASSIGN_OR_RETURN(HloPassMetadata * pass_metadata,
GetCurrentHloPassMetadata());
if (!pass_metadata->mutable_custom_metadata()->PackFrom(message)) {
LOG(WARNING) << "failed to pack custom metadata for "
<< pass_metadata->pass_id();
return Internal("failed to pack custom metadata");
};
return absl::OkStatus();
}
} | #include "xla/hlo/ir/hlo_module_metadata.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
namespace xla {
namespace {
using ::testing::ElementsAre;
using ::testing::Property;
using ::testing::StrEq;
class TestEnv : public tsl::EnvWrapper {
public:
TestEnv() : EnvWrapper(Env::Default()) {}
uint64_t NowMicros() const override { return current_micros_; }
void SetCurrentMicros(uint64_t micros) { current_micros_ = micros; }
private:
uint64_t current_micros_ = 1;
};
TEST(HloModuleMetadata, RecordsPassStart) {
TestEnv env;
HloModuleMetadata module_metadata(&env);
env.SetCurrentMicros(1234);
module_metadata.RecordPassStart();
EXPECT_THAT(
module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::start_timestamp_usec, 1234)));
}
TEST(HloModuleMetadata, RecordsPassEnd) {
TestEnv env;
HloModuleMetadata module_metadata(&env);
module_metadata.RecordPassStart();
env.SetCurrentMicros(4321);
EXPECT_IS_OK(module_metadata.RecordPassEnd());
EXPECT_THAT(
module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::end_timestamp_usec, 4321)));
}
TEST(HloModuleMetadata, RecordsPassEndInNestedMetadata) {
TestEnv env;
HloModuleMetadata module_metadata(&env);
module_metadata.RecordPassStart();
module_metadata.RecordPassStart();
env.SetCurrentMicros(111);
EXPECT_IS_OK(module_metadata.RecordPassEnd());
EXPECT_THAT(module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::end_timestamp_usec, 0),
Property(&HloPassMetadata::end_timestamp_usec, 111)));
env.SetCurrentMicros(222);
EXPECT_IS_OK(module_metadata.RecordPassEnd());
EXPECT_THAT(module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::end_timestamp_usec, 222),
Property(&HloPassMetadata::end_timestamp_usec, 111)));
}
TEST(HloModuleMetadata, RecordPassEndReturnsNotFound) {
HloModuleMetadata module_metadata(tsl::Env::Default());
EXPECT_EQ(module_metadata.RecordPassEnd().code(), tsl::error::NOT_FOUND);
module_metadata.RecordPassStart();
EXPECT_IS_OK(module_metadata.RecordPassEnd());
EXPECT_EQ(module_metadata.RecordPassEnd().code(), tsl::error::NOT_FOUND);
}
TEST(HloModuleMetadata, SetsHloPassMetadataFields) {
HloModuleMetadata module_metadata(tsl::Env::Default());
module_metadata.RecordPassStart();
EXPECT_IS_OK(module_metadata.set_current_pass_name("fake name"));
EXPECT_THAT(
module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::pass_name, StrEq("fake name"))));
}
TEST(HloModuleMetadata, SetsHloPassMetadataFieldsInNestedMetadata) {
HloModuleMetadata module_metadata(tsl::Env::Default());
module_metadata.RecordPassStart();
module_metadata.RecordPassStart();
EXPECT_IS_OK(module_metadata.set_current_pass_name("fake name"));
EXPECT_THAT(
module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::pass_name, StrEq("")),
Property(&HloPassMetadata::pass_name, StrEq("fake name"))));
}
TEST(HloModuleMetadata, SetterReturnsNotFound) {
HloModuleMetadata module_metadata(tsl::Env::Default());
EXPECT_EQ(module_metadata.set_current_pass_name("fake name").code(),
tsl::error::NOT_FOUND);
}
TEST(HloModuleMetadata, CopiesRunningPrepartitioningPasses) {
HloModuleMetadata old_module_metadata(tsl::Env::Default());
old_module_metadata.RecordPassStart();
EXPECT_IS_OK(old_module_metadata.set_current_pass_name("outer pass"));
old_module_metadata.RecordPassStart();
EXPECT_IS_OK(old_module_metadata.set_current_pass_name("finished pass"));
EXPECT_IS_OK(old_module_metadata.RecordPassEnd());
old_module_metadata.RecordPassStart();
EXPECT_IS_OK(old_module_metadata.set_current_pass_name("inner pass"));
HloModuleMetadata new_module_metadata(tsl::Env::Default());
new_module_metadata.set_prepartitioning_metadata(old_module_metadata);
EXPECT_THAT(
new_module_metadata.proto().pass_metadata(),
ElementsAre(Property(&HloPassMetadata::pass_name, StrEq("outer pass")),
Property(&HloPassMetadata::pass_name, StrEq("inner pass"))));
EXPECT_THAT(new_module_metadata.prepartitioning_metadata()->pass_metadata(),
ElementsAre(Property(&HloPassMetadata::pass_name,
StrEq("finished pass"))));
}
}
} | 2,178 |
#ifndef XLA_HLO_IR_HLO_MODULE_GROUP_H_
#define XLA_HLO_IR_HLO_MODULE_GROUP_H_
#include <iosfwd>
#include <memory>
#include <string>
#include <vector>
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo.pb.h"
namespace xla {
class HloModuleGroup {
public:
explicit HloModuleGroup(absl::string_view name) : name_(name) {}
explicit HloModuleGroup(std::unique_ptr<HloModule> module);
HloModuleGroup(absl::string_view name,
absl::Span<std::unique_ptr<HloModule>> modules);
HloModuleGroup(absl::string_view name,
std::vector<std::unique_ptr<HloModule>>&& modules);
HloModuleGroup(const HloModuleGroup& other) = delete;
HloModuleGroup(HloModuleGroup&& other) = default;
HloModuleGroup& operator=(const HloModuleGroup& other) = delete;
HloModuleGroup& operator=(HloModuleGroup&& other) = default;
const std::vector<HloModule*>& modules() const { return module_ptrs_; }
HloModule& module(int index) const { return *module_ptrs_.at(index); }
void push_back(std::unique_ptr<HloModule> module);
void ReplaceModule(int index, std::unique_ptr<HloModule> module);
std::vector<std::unique_ptr<HloModule>> ConsumeModules();
std::string name() const { return name_; }
std::string ToString() const;
void Cleanup() {
for (auto& module : modules_) {
module->Cleanup();
}
}
template <typename H>
friend H AbslHashValue(H h, const HloModuleGroup& group) {
for (auto& module : group.modules_) {
h = H::combine(std::move(h), *module);
}
return H::combine(std::move(h), group.modules_.size());
}
HloModuleGroupProto ToProto() const;
static absl::StatusOr<HloModuleGroup> CreateFromProto(
const HloModuleGroupProto& proto,
absl::Span<const HloModuleConfig> module_configs);
int size() const { return modules_.size(); }
bool empty() const { return modules_.empty(); }
absl::string_view cache_key() const { return cache_key_; }
void set_cache_key(absl::string_view cache_key) {
cache_key_ = std::string(cache_key);
}
private:
std::string name_;
std::vector<std::unique_ptr<HloModule>> modules_;
std::vector<HloModule*> module_ptrs_;
std::string cache_key_;
};
std::ostream& operator<<(std::ostream& out, const HloModuleGroup& group);
}
#endif
#include "xla/hlo/ir/hlo_module_group.h"
#include <memory>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
namespace xla {
HloModuleGroup::HloModuleGroup(std::unique_ptr<HloModule> module)
: name_(module->name()) {
push_back(std::move(module));
}
HloModuleGroup::HloModuleGroup(absl::string_view name,
absl::Span<std::unique_ptr<HloModule>> modules)
: name_(name) {
for (auto& module : modules) {
push_back(std::move(module));
}
}
HloModuleGroup::HloModuleGroup(
absl::string_view name, std::vector<std::unique_ptr<HloModule>>&& modules)
: name_(name) {
for (auto& module : modules) {
push_back(std::move(module));
}
}
std::vector<std::unique_ptr<HloModule>> HloModuleGroup::ConsumeModules() {
std::vector<std::unique_ptr<HloModule>> ret_modules = std::move(modules_);
modules_.clear();
module_ptrs_.clear();
return ret_modules;
}
std::string HloModuleGroup::ToString() const {
std::ostringstream s;
s << "HloModuleGroup " << name() << "\n\n";
for (const HloModule* module : modules()) {
s << module->ToString() << "\n";
}
return s.str();
}
HloModuleGroupProto HloModuleGroup::ToProto() const {
HloModuleGroupProto proto;
proto.set_name(name());
for (const HloModule* module : modules()) {
*proto.add_hlo_modules() = module->ToProto();
}
return proto;
}
absl::StatusOr<HloModuleGroup> HloModuleGroup::CreateFromProto(
const HloModuleGroupProto& proto,
absl::Span<const HloModuleConfig> module_configs) {
TF_RET_CHECK(!proto.name().empty()) << "Module group name cannot be empty";
TF_RET_CHECK(proto.hlo_modules_size() > 0)
<< "Module group must have at least one HLO module";
TF_RET_CHECK(proto.hlo_modules_size() == module_configs.size());
std::vector<std::unique_ptr<HloModule>> modules;
for (int i = 0; i < proto.hlo_modules_size(); ++i) {
const HloModuleProto& module_proto = proto.hlo_modules(i);
TF_ASSIGN_OR_RETURN(
std::unique_ptr<HloModule> module,
HloModule::CreateFromProto(module_proto, module_configs[i]));
modules.push_back(std::move(module));
}
return HloModuleGroup(proto.name(), absl::MakeSpan(modules));
}
void HloModuleGroup::push_back(std::unique_ptr<HloModule> module) {
module->metadata()->set_module_group_name(name());
modules_.push_back(std::move(module));
module_ptrs_.push_back(modules_.back().get());
}
void HloModuleGroup::ReplaceModule(int index,
std::unique_ptr<HloModule> module) {
modules_.at(index)->MoveMetadataToModule(module.get());
modules_.at(index) = std::move(module);
module_ptrs_.at(index) = modules_.at(index).get();
}
std::ostream& operator<<(std::ostream& out, const HloModuleGroup& group) {
out << group.ToString();
return out;
}
} | #include "xla/hlo/ir/hlo_module_group.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_group_metadata.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace op = ::xla::testing::opcode_matchers;
using ::testing::Property;
using ::testing::StrEq;
class HloModuleGroupTest : public HloTestBase {
protected:
HloModuleGroupTest() = default;
};
TEST_F(HloModuleGroupTest, SingleModule) {
const std::string text = R"(
HloModule simple_module
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(text));
HloModuleGroup group(std::move(module));
EXPECT_EQ(group.modules().size(), 1);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
TF_ASSERT_OK_AND_ASSIGN(HloModuleGroup group_copy,
HloModuleGroup::CreateFromProto(
group.ToProto(), {group.module(0).config()}));
EXPECT_EQ(group_copy.modules().size(), 1);
EXPECT_THAT(
group_copy.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
std::vector<std::unique_ptr<HloModule>> modules = group.ConsumeModules();
EXPECT_EQ(modules.size(), 1);
EXPECT_EQ(group.modules().size(), 0);
}
TEST_F(HloModuleGroupTest, MultipleModules) {
const std::string text_0 = R"(
HloModule module0
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
const std::string text_1 = R"(
HloModule module1
ENTRY %entry (a: f32[]) -> f32[] {
ROOT %a = f32[] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_0,
ParseAndReturnVerifiedModule(text_0));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_1,
ParseAndReturnVerifiedModule(text_1));
std::vector<std::unique_ptr<HloModule>> modules;
modules.push_back(std::move(module_0));
modules.push_back(std::move(module_1));
HloModuleGroup group(TestName(), absl::MakeSpan(modules));
EXPECT_EQ(group.modules().size(), 2);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
EXPECT_THAT(group.module(1).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter()));
TF_ASSERT_OK_AND_ASSIGN(HloModuleGroup group_copy,
HloModuleGroup::CreateFromProto(
group.ToProto(), {group.module(0).config(),
group.module(1).config()}));
EXPECT_EQ(group_copy.modules().size(), 2);
}
TEST_F(HloModuleGroupTest, BuildModuleGroupByPushBack) {
const std::string text_0 = R"(
HloModule module0
ENTRY %entry (x: f32[], y: f32[]) -> f32[] {
%x = f32[] parameter(0)
%y = f32[] parameter(1)
ROOT %add = f32[] add(%x, %y)
}
)";
const std::string text_1 = R"(
HloModule module1
ENTRY %entry (a: f32[]) -> f32[] {
ROOT %a = f32[] parameter(0)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_0,
ParseAndReturnVerifiedModule(text_0));
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module_1,
ParseAndReturnVerifiedModule(text_1));
HloModuleGroup group(TestName());
group.push_back(std::move(module_0));
group.push_back(std::move(module_1));
EXPECT_EQ(group.modules().size(), 2);
EXPECT_THAT(
group.module(0).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Add()));
EXPECT_THAT(group.module(1).entry_computation()->instructions(),
::testing::ElementsAre(op::Parameter()));
}
TEST_F(HloModuleGroupTest, ModuleGroupCompanionOrder) {
constexpr char text[] = R"(
HloModule module_%d
while_cond {
param = s32[] parameter(0)
ROOT p = pred[] constant(true)
}
while_body {
param = s32[] parameter(0)
token.s = token[] after-all()
token.r = token[] after-all()
send = (s32[], u32[], token[]) send(param, token.s), channel_id=%d
send-done = token[] send-done(send), channel_id=%d
recv = (s32[], u32[], token[]) recv(token.r), channel_id=%d
recv-done = (s32[], token[]) recv-done(recv), channel_id=%d
ROOT data = s32[] get-tuple-element(recv-done), index=0
}
ENTRY entry {
while_init = s32[] constant(1)
ROOT while = s32[] while(while_init), condition=while_cond, body=while_body
}
)";
const int64_t kTrialCount = 5;
const int64_t kDeviceCount = 10;
std::vector<int64_t> companion_order;
for (int64_t t = 0; t < kTrialCount; ++t) {
HloModuleGroup group(TestName());
for (int64_t i = 0; i < kDeviceCount; ++i) {
const int64_t send_channel = i;
const int64_t recv_channel = i == 0 ? kDeviceCount - 1 : i - 1;
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(absl::StrFormat(
text, i, send_channel, send_channel,
recv_channel, recv_channel)));
group.push_back(std::move(module));
}
ASSERT_EQ(group.modules().size(), kDeviceCount);
TF_ASSERT_OK_AND_ASSIGN(auto metadata,
HloModuleGroupMetadata::Build(group.modules()));
ASSERT_EQ(metadata->companion_sets().size(), 1);
std::vector<int64_t> module_ids;
const auto& companion_sets = *metadata->companion_sets()[0];
module_ids.reserve(companion_sets.size());
for (HloInstruction* companion : companion_sets) {
module_ids.push_back(metadata->GetModuleId(companion->GetModule()));
}
if (t == 0) {
companion_order = module_ids;
} else {
EXPECT_TRUE(absl::c_equal(companion_order, module_ids));
}
}
}
TEST_F(HloModuleGroupTest, ReplaceModuleMetadata) {
auto old_module = CreateNewVerifiedModule();
int old_module_id = old_module->unique_id();
old_module->metadata()->RecordPassStart();
TF_EXPECT_OK(old_module->metadata()->set_current_pass_name("fake pass"));
HloModuleGroup group(std::move(old_module));
EXPECT_EQ(group.module(0).metadata()->proto().module_group_name(),
group.name());
auto new_module = CreateNewVerifiedModule();
group.ReplaceModule(0, std::move(new_module));
EXPECT_NE(group.module(0).unique_id(), old_module_id);
const HloModuleMetadataProto& module_metadata =
group.module(0).metadata()->proto();
EXPECT_EQ(module_metadata.canonical_module_id(), old_module_id);
const HloPassMetadata& pass_metadata =
*module_metadata.pass_metadata().rbegin();
EXPECT_THAT(pass_metadata,
Property(&HloPassMetadata::pass_name, StrEq("fake pass")));
}
}
} | 2,179 |
#ifndef XLA_HLO_IR_HLO_SHARDING_H_
#define XLA_HLO_IR_HLO_SHARDING_H_
#include <cstdint>
#include <map>
#include <memory>
#include <optional>
#include <ostream>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "xla/array.h"
#include "xla/hlo/ir/tile_assignment.h"
#include "xla/shape_tree.h"
#include "xla/xla_data.pb.h"
namespace xla {
class HloSharding {
public:
static HloSharding Replicate(absl::Span<const OpMetadata> metadata = {}) {
return HloSharding(false, true, false,
metadata);
}
static HloSharding Manual(absl::Span<const OpMetadata> metadata = {}) {
return HloSharding(true, false, false,
metadata);
}
static HloSharding Unknown(absl::Span<const OpMetadata> metadata = {}) {
return HloSharding(false, false, true,
metadata);
}
static HloSharding AssignDevice(int64_t device_id,
absl::Span<const OpMetadata> metadata = {});
static HloSharding Tile(TileAssignment tile_assignment,
absl::Span<const OpMetadata> metadata = {}) {
return HloSharding(tile_assignment, false,
metadata);
}
static HloSharding Tile(Array<int64_t> tile_assignment,
absl::Span<const OpMetadata> metadata = {}) {
return HloSharding(TileAssignment(std::make_shared<const Array<int64_t>>(
std::move(tile_assignment))),
false, metadata);
}
static HloSharding IotaTile(absl::Span<const int64_t> tile_assignment_dims,
absl::Span<const OpMetadata> metadata = {}) {
return HloSharding(TileAssignment(tile_assignment_dims),
false, metadata);
}
static HloSharding IotaTile(absl::Span<const int64_t> tile_assignment_dims,
absl::Span<const int64_t> reshape_dims,
absl::Span<const int> transpose_perm,
absl::Span<const OpMetadata> metadata = {}) {
return HloSharding(
TileAssignment(tile_assignment_dims, reshape_dims, transpose_perm),
false, metadata);
}
static HloSharding PartialTile(
const TileAssignment& tile_assignment_last_dim_replicate,
absl::Span<const OpMetadata> metadata = {});
static HloSharding PartialTile(
Array<int64_t> tile_assignment_last_dim_replicate,
absl::Span<const OpMetadata> metadata = {}) {
return PartialTile(TileAssignment(std::make_shared<const Array<int64_t>>(
std::move(tile_assignment_last_dim_replicate))),
metadata);
}
static HloSharding Subgroup(const TileAssignment& tile_assignment,
absl::Span<const OpSharding::Type> subgroup_types,
absl::Span<const OpMetadata> metadata = {});
static HloSharding Subgroup(Array<int64_t> tile_assignment,
absl::Span<const OpSharding::Type> subgroup_types,
absl::Span<const OpMetadata> metadata = {}) {
return Subgroup(
TileAssignment(std::make_shared<const Array<int64_t>>(tile_assignment)),
subgroup_types, metadata);
}
static HloSharding Tile1D(const Shape& input_shape, int64_t num_tiles,
absl::Span<const OpMetadata> metadata = {});
static HloSharding Tuple(const ShapeTree<HloSharding>& sub_shardings);
static HloSharding Tuple(const Shape& tuple_shape,
absl::Span<const HloSharding> shardings);
static HloSharding SingleTuple(const Shape& tuple_shape,
const HloSharding& sharding);
static HloSharding Single(const Shape& shape, const HloSharding& sharding);
static absl::StatusOr<HloSharding> FromProto(const OpSharding& proto);
static bool IsReservedDevice(int64_t device) { return device < 0; }
OpSharding ToProto() const;
void Print(Printer* printer, bool include_metadata = false) const;
std::string ToString(bool include_metadata = false) const;
absl::Status Validate(const Shape& shape,
std::optional<int64_t> num_devices = {}) const;
bool IsTuple() const { return tuple_; }
bool IsReplicated() const {
if (!IsTuple()) {
return replicated_;
}
return absl::c_all_of(
tuple_elements_, [](const HloSharding& s) { return s.IsReplicated(); });
}
bool IsReplicatedLeaf() const {
DCHECK(!IsTuple());
return replicated_;
}
bool IsTileMaximal() const {
if (!IsTuple()) {
return maximal_;
}
return absl::c_all_of(tuple_elements_, [](const HloSharding& s) {
return s.IsTileMaximal();
});
}
bool IsTileMaximalLeaf() const {
DCHECK(!IsTuple());
return maximal_;
}
bool IsManual() const {
if (!IsTuple()) {
return manual_;
}
return absl::c_all_of(tuple_elements_,
[](const HloSharding& s) { return s.IsManual(); });
}
bool IsManualLeaf() const {
DCHECK(!IsTuple());
return manual_;
}
bool IsUnknown() const {
if (!IsTuple()) {
return unknown_;
}
return absl::c_all_of(tuple_elements_,
[](const HloSharding& s) { return s.IsUnknown(); });
}
bool IsUnknownLeaf() const {
DCHECK(!IsTuple());
return unknown_;
}
bool IsShardGroup() const {
if (!IsTuple()) {
return shard_group_.shard_group_id != -1 &&
(shard_group_.shard_like || shard_group_.shard_as);
}
return !tuple_elements_.empty() &&
absl::c_all_of(tuple_elements_, [](const HloSharding& s) {
return s.IsShardGroup();
});
}
bool IsShardAs() const {
if (!IsTuple()) {
return shard_group_.shard_group_id != -1 && shard_group_.shard_as;
}
return !tuple_elements_.empty() &&
absl::c_all_of(tuple_elements_,
[](const HloSharding& s) { return s.IsShardAs(); });
}
bool IsShardLike() const {
if (!IsTuple()) {
return shard_group_.shard_group_id != -1 && shard_group_.shard_like;
}
return !tuple_elements_.empty() &&
absl::c_all_of(tuple_elements_,
[](const HloSharding& s) { return s.IsShardLike(); });
}
bool IsManualSubgroup() const {
if (!IsTuple()) {
return absl::c_linear_search(subgroup_types_, OpSharding::MANUAL);
}
return absl::c_all_of(tuple_elements_, [](const HloSharding& s) {
return s.IsManualSubgroup();
});
}
bool IsTiled() const {
return !IsTileMaximal() && !IsManual() && !IsUnknown();
}
bool IsTiledLeaf() const {
return !IsTileMaximalLeaf() && !IsManualLeaf() && !IsUnknownLeaf();
}
bool ReplicateOnLastTileDim() const { return replicate_on_last_tile_dim_; }
bool HasPartialReplication() const {
return replicate_on_last_tile_dim_ ||
absl::c_linear_search(subgroup_types_, OpSharding::REPLICATED);
}
bool UsesDevice(int64_t device) const;
std::map<int64_t, int64_t> UsedDevices(int64_t* count) const;
std::vector<int64_t> TileIndexForDevice(int64_t device) const;
int64_t DeviceForTileIndex(absl::Span<const int64_t> index) const;
std::vector<int64_t> TileOffsetForDevice(const Shape& shape,
int64_t device) const;
std::vector<int64_t> TileLimitForDevice(const Shape& shape,
int64_t device) const;
std::optional<int64_t> UniqueDevice() const;
int64_t GetUniqueDevice() const;
bool HasUniqueDevice() const { return UniqueDevice().has_value(); }
absl::StatusOr<ShapeTree<HloSharding>> AsShapeTree(const Shape& shape) const;
ShapeTree<HloSharding> GetAsShapeTree(const Shape& shape) const {
return AsShapeTree(shape).value();
}
HloSharding GetSubSharding(const Shape& shape, const ShapeIndex& index) const;
absl::StatusOr<HloSharding> GetTupleSharding(const Shape& shape) const;
HloSharding NormalizeTupleSharding(const Shape& shape) const;
std::optional<HloSharding> ExtractSingleSharding() const;
HloSharding WithoutMetadata() const;
HloSharding WithMetadata(absl::Span<const OpMetadata> metadata,
bool overwrite) const;
bool operator==(const HloSharding& other) const {
return replicated_ == other.replicated_ && maximal_ == other.maximal_ &&
manual_ == other.manual_ && unknown_ == other.unknown_ &&
tile_assignment_ == other.tile_assignment_ &&
tuple_elements_ == other.tuple_elements_ &&
replicate_on_last_tile_dim_ == other.replicate_on_last_tile_dim_ &&
subgroup_types_ == other.subgroup_types_ &&
shard_group_ == other.shard_group_;
}
bool operator!=(const HloSharding& other) const { return !(*this == other); }
template <typename H>
friend H AbslHashValue(H h, const HloSharding& sharding) {
if (sharding.tuple_) {
return H::combine(std::move(h), sharding.tuple_elements_);
}
return H::combine(std::move(h), sharding.replicated_, sharding.manual_,
sharding.unknown_, sharding.tile_assignment_.array(),
sharding.replicate_on_last_tile_dim_,
sharding.shard_group_.ToString());
}
const TileAssignment& tile_assignment() const { return tile_assignment_; }
const std::vector<OpSharding::Type>& subgroup_types() const {
return subgroup_types_;
}
std::vector<HloSharding>& tuple_elements() { return tuple_elements_; }
const std::vector<HloSharding>& tuple_elements() const {
return tuple_elements_;
}
Shape TileShape(const Shape& shape) const;
Shape TileShape(const Shape& shape, int64_t device) const;
int64_t TotalNumTiles() const;
int64_t NumTiles() const;
int64_t NumTilesLeaf() const;
int64_t NumTiles(absl::Span<const int64_t> dims) const;
std::vector<OpMetadata>& metadata() { return metadata_; }
const std::vector<OpMetadata>& metadata() const { return metadata_; }
int64_t SubgroupReplicationDim() const {
auto it = absl::c_find(subgroup_types_, OpSharding::REPLICATED);
if (it != subgroup_types_.end()) {
return (it - subgroup_types_.begin()) + TiledDataRank();
}
if (replicate_on_last_tile_dim_) {
return tile_assignment_.num_dimensions() - 1;
}
return -1;
}
int64_t SubgroupManualDim() const {
auto it = absl::c_find(subgroup_types_, OpSharding::MANUAL);
if (it != subgroup_types_.end()) {
return (it - subgroup_types_.begin()) + TiledDataRank();
}
return -1;
}
int64_t TiledDataRank() const {
CHECK(IsTiled());
int64_t rank = tile_assignment_.num_dimensions();
if (ReplicateOnLastTileDim()) {
rank--;
}
rank -= subgroup_types_.size();
return rank;
}
int64_t TiledDataRankLeaf() const {
DCHECK(!IsTuple());
CHECK(IsTiledLeaf());
int64_t rank = tile_assignment_.num_dimensions();
if (ReplicateOnLastTileDim()) {
rank--;
}
rank -= subgroup_types_.size();
return rank;
}
static int64_t RequiredLeaves(const Shape& shape);
struct ShardGroup {
ShardGroup(int64_t shard_group_id, bool shard_as, bool shard_like)
: shard_group_id(shard_group_id),
shard_as(shard_as),
shard_like(shard_like) {}
bool operator==(const ShardGroup& rhs) const {
return shard_group_id == rhs.shard_group_id && shard_as == rhs.shard_as &&
shard_like == rhs.shard_like;
}
std::string ToString() const {
std::ostringstream result;
if (shard_as) {
result << "shard_as " << shard_group_id;
} else if (shard_like) {
result << "shard_like " << shard_group_id;
}
return result.str();
}
int64_t shard_group_id = 0;
bool shard_as;
bool shard_like;
};
static ShardGroup NotShardGroup() {
return ShardGroup(
-1,
false,
false);
}
static ShardGroup ShardAs(int64_t shard_group_id) {
return ShardGroup(shard_group_id,
true,
false);
}
static ShardGroup ShardLike(int64_t shard_group_id) {
return ShardGroup(shard_group_id,
false,
true);
}
HloSharding& SetShardGroup(const ShardGroup& shard_group) {
shard_group_ = shard_group;
return *this;
}
HloSharding& SetShardGroupFromProto(const OpSharding& proto) {
ShardGroup shard_group = NotShardGroup();
if (proto.is_shard_group()) {
if (proto.shard_group_type() == OpSharding::AS) {
shard_group = ShardAs(proto.shard_group_id());
} else {
shard_group = ShardLike(proto.shard_group_id());
}
}
SetShardGroup(shard_group);
return *this;
}
HloSharding& ClearShardGroup() {
shard_group_ = NotShardGroup();
return *this;
}
const ShardGroup& GetShardGroup() const { return shard_group_; }
private:
explicit HloSharding(bool manual, bool replicated, bool unknown,
absl::Span<const OpMetadata> metadata)
: metadata_(metadata.begin(), metadata.end()),
replicated_(replicated),
maximal_(replicated),
tuple_(false),
manual_(manual),
unknown_(unknown),
replicate_on_last_tile_dim_(false) {}
explicit HloSharding(int64_t device_id, absl::Span<const OpMetadata> metadata)
: tile_assignment_(device_id),
metadata_(metadata.begin(), metadata.end()),
replicated_(false),
maximal_(true),
tuple_(false),
manual_(false),
unknown_(false),
replicate_on_last_tile_dim_(false) {}
explicit HloSharding(TileAssignment tile_assignment,
bool replicate_on_last_tile_dim,
absl::Span<const OpMetadata> metadata = {})
: tile_assignment_(std::move(tile_assignment)),
metadata_(metadata.begin(), metadata.end()),
replicated_(false),
maximal_(false),
tuple_(false),
manual_(false),
unknown_(false),
replicate_on_last_tile_dim_(replicate_on_last_tile_dim) {}
explicit HloSharding(TileAssignment tile_assignment,
absl::Span<const OpSharding::Type> subgroup_types,
absl::Span<const OpMetadata> metadata = {})
: tile_assignment_(std::move(tile_assignment)),
metadata_(metadata.begin(), metadata.end()),
subgroup_types_(subgroup_types.begin(), subgroup_types.end()),
replicated_(false),
maximal_(false),
tuple_(false),
manual_(false),
unknown_(false),
replicate_on_last_tile_dim_(false) {}
explicit HloSharding(std::vector<HloSharding> tuple_shardings)
: tuple_elements_(std::move(tuple_shardings)),
replicated_(false),
maximal_(false),
tuple_(true),
manual_(false),
unknown_(false),
replicate_on_last_tile_dim_(false) {}
explicit HloSharding(const HloSharding& other, TileAssignment tile_assignment)
: tile_assignment_(std::move(tile_assignment)),
tuple_elements_(other.tuple_elements_),
metadata_(other.metadata_),
subgroup_types_(other.subgroup_types_),
replicated_(other.replicated_),
maximal_(other.maximal_),
tuple_(other.tuple_),
manual_(other.manual_),
unknown_(other.unknown_),
replicate_on_last_tile_dim_(other.replicate_on_last_tile_dim_) {
CHECK(tile_assignment_ == other.tile_assignment_)
<< tile_assignment_.ToString() << " v.s. "
<< other.tile_assignment_.ToString();
}
friend class HloShardingTestHelper;
absl::Status CheckLeafCount(const Shape& shape) const;
absl::Status ValidateTuple(const Shape& shape,
std::optional<int64_t> num_devices) const;
absl::Status ValidateNonTuple(const Shape& shape,
std::optional<int64_t> num_devices) const;
TileAssignment tile_assignment_;
std::vector<HloSharding> tuple_elements_;
std::vector<OpMetadata> metadata_;
std::vector<OpSharding::Type> subgroup_types_;
bool replicated_ : 1;
bool maximal_ : 1;
bool tuple_ : 1;
bool manual_ : 1;
bool unknown_ : 1;
bool replicate_on_last_tile_dim_ : 1;
ShardGroup shard_group_ = NotShardGroup();
};
std::ostream& operator<<(std::ostream& out, const HloSharding& sharding);
}
#endif
#include "xla/hlo/ir/hlo_sharding.h"
#include <algorithm>
#include <array>
#include <cstdint>
#include <iterator>
#include <map>
#include <memory>
#include <optional>
#include <ostream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_op_metadata.h"
#include "xla/overflow_util.h"
#include "xla/printer.h"
#include "xla/status_macros.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/protobuf.h"
namespace xla {
namespace {
using absl::StrCat;
bool GroupMinorIotaDimsSorted(absl::Span<const int64_t> dims,
absl::Span<const int> perm, int64_t group_size,
absl::InlinedVector<int64_t, 6>& new_dims,
absl::InlinedVector<int, 6>& new_perm) {
DCHECK_GT(group_size, 1);
int grouped_dims = 0;
std::optional<std::pair<int, int64_t>> split_dim_and_size;
for (int i = perm.size() - 1; i >= 0; --i) {
const int dim = perm[i];
const int64_t dim_size = dims[dim];
if (dim_size <= group_size) {
if (group_size % dim_size != 0) {
return false;
}
group_size /= dim_size;
++grouped_dims;
} else {
if (dim_size % group_size != 0) {
return false;
}
split_dim_and_size.emplace(dim, dim_size / group_size);
++grouped_dims;
group_size = 1;
break;
}
}
if (!split_dim_and_size) {
new_dims.assign(dims.begin(), dims.end());
new_perm.assign(perm.begin(), perm.end());
std::stable_sort(new_perm.end() - grouped_dims, new_perm.end());
return true;
}
new_dims.resize(dims.size() + 1);
new_perm.resize(perm.size() + 1);
const int split_i = split_dim_and_size->first;
for (int i = 0; i < split_i; ++i) {
new_dims[i] = dims[i];
}
new_dims[split_i] = split_dim_and_size->second;
new_dims[split_i + 1] = dims[split_i] / split_dim_and_size->second;
for (int i = split_i + 2; i < new_perm.size(); ++i) {
new_dims[i] = dims[i - 1];
}
int perm_split = 0;
for (int i = 0; i < perm.size(); ++i) {
const int perm_dim = perm[i];
new_perm[i] = perm_dim <= split_i ? perm_dim : (perm_dim + 1);
if (perm_dim == split_i) {
perm_split = i;
break;
}
}
new_perm[perm_split + 1] = new_perm[perm_split] + 1;
for (int i = perm_split + 2; i < new_perm.size(); ++i) {
const int perm_dim = perm[i - 1];
new_perm[i] = perm_dim <= split_i ? perm_dim : (perm_dim + 1);
}
std::stable_sort(new_perm.end() - grouped_dims, new_perm.end());
return true;
}
}
HloSharding HloSharding::AssignDevice(int64_t device_id,
absl::Span<const OpMetadata> metadata) {
return HloSharding(device_id, metadata);
}
HloSharding HloSharding::Tile1D(const Shape& input_shape, int64_t num_tiles,
absl::Span<const OpMetadata> metadata) {
CHECK_EQ(1, input_shape.rank());
CHECK_GT(num_tiles, | #include <algorithm>
#include <set>
#include <sstream>
#include <string>
#include <tuple>
#include <utility>
#include <vector>
#include "absl/hash/hash.h"
#include "xla/protobuf_util.h"
#include "xla/service/hlo_parser.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla_data.pb.h"
namespace xla {
namespace {
Array<int64_t> MakeArray(absl::Span<const int64_t> dimensions,
absl::Span<const int64_t> contents) {
Array<int64_t> a(dimensions);
std::copy(contents.begin(), contents.end(), a.begin());
return a;
}
OpMetadata GetMetadata(const std::string& op_name) {
OpMetadata metadata;
metadata.set_op_name(op_name);
return metadata;
}
std::vector<OpMetadata> SingleMetadata() { return {GetMetadata("a")}; }
std::vector<OpMetadata> ListMetadata() {
return {GetMetadata("b"), GetMetadata("c")};
}
class HloShardingTest : public HloTestBase {};
TEST_F(HloShardingTest, Replicate) {
HloSharding sharding = HloSharding::Replicate();
EXPECT_TRUE(sharding.IsReplicated());
EXPECT_TRUE(sharding.IsTileMaximal());
EXPECT_TRUE(sharding.UsesDevice(0));
EXPECT_TRUE(sharding.UsesDevice(65535));
HloSharding other = HloSharding::Replicate();
EXPECT_EQ(other, sharding);
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4}),
2));
EXPECT_FALSE(sharding.HasUniqueDevice());
}
TEST_F(HloShardingTest, DevicePlacement) {
HloSharding sharding = HloSharding::AssignDevice(5);
EXPECT_FALSE(sharding.IsReplicated());
EXPECT_TRUE(sharding.IsTileMaximal());
EXPECT_FALSE(sharding.UsesDevice(0));
EXPECT_TRUE(sharding.UsesDevice(5));
EXPECT_EQ(5, sharding.GetUniqueDevice());
HloSharding other = HloSharding::Replicate();
EXPECT_NE(other, sharding);
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4}),
6));
EXPECT_IS_NOT_OK(
sharding.Validate(ShapeUtil::MakeShape(U32, {4}), 5));
ShapeTree<HloSharding> shape_tree =
sharding.GetAsShapeTree(ShapeUtil::MakeShape(U32, {4}));
EXPECT_EQ(shape_tree.element({}), sharding);
EXPECT_TRUE(shape_tree.IsLeaf({}));
}
TEST_F(HloShardingTest, ProtoRoundTrip) {
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
auto* tiled = proto.add_tuple_shardings();
tiled->set_type(OpSharding::OTHER);
tiled->add_tile_assignment_devices(0);
tiled->add_tile_assignment_devices(1);
tiled->add_tile_assignment_dimensions(1);
tiled->add_tile_assignment_dimensions(2);
*tiled->add_metadata() = GetMetadata("a");
*tiled->add_metadata() = GetMetadata("b");
auto* replicated = proto.add_tuple_shardings();
replicated->set_type(OpSharding::REPLICATED);
*replicated->add_metadata() = GetMetadata("c");
auto* manual = proto.add_tuple_shardings();
manual->set_type(OpSharding::MANUAL);
HloSharding sharding = HloSharding::FromProto(proto).value();
EXPECT_TRUE(protobuf_util::ProtobufEquals(proto, sharding.ToProto()));
}
TEST_F(HloShardingTest, IotaProtoRoundTrip) {
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
auto* tiled = proto.add_tuple_shardings();
tiled->set_type(OpSharding::OTHER);
tiled->add_tile_assignment_dimensions(6);
tiled->add_tile_assignment_dimensions(1);
tiled->add_iota_reshape_dims(3);
tiled->add_iota_reshape_dims(2);
tiled->add_iota_transpose_perm(1);
tiled->add_iota_transpose_perm(0);
*tiled->add_metadata() = GetMetadata("a");
*tiled->add_metadata() = GetMetadata("b");
auto* replicated = proto.add_tuple_shardings();
replicated->set_type(OpSharding::REPLICATED);
*replicated->add_metadata() = GetMetadata("c");
auto* manual = proto.add_tuple_shardings();
manual->set_type(OpSharding::MANUAL);
HloSharding sharding = HloSharding::FromProto(proto).value();
EXPECT_TRUE(protobuf_util::ProtobufEquals(proto, sharding.ToProto()));
}
TEST_F(HloShardingTest, Tile) {
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 0, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(F32, {4, 6}),
4));
}
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4, 6}),
2));
}
{
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
EXPECT_IS_NOT_OK(sharding.Validate(ShapeUtil::MakeShape(U32, {4, 6}),
5));
}
{
Shape shape = ShapeUtil::MakeShape(U32, {4, 5});
HloSharding sharding = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
EXPECT_IS_OK(sharding.Validate(ShapeUtil::MakeShape(F32, {3, 5}),
4));
EXPECT_EQ(0, sharding.DeviceForTileIndex({0, 0}));
EXPECT_EQ(3, sharding.DeviceForTileIndex({0, 1}));
EXPECT_EQ(2, sharding.DeviceForTileIndex({1, 0}));
EXPECT_EQ(1, sharding.DeviceForTileIndex({1, 1}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 0),
(std::vector<int64_t>{0, 0}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 3),
(std::vector<int64_t>{0, 3}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 2),
(std::vector<int64_t>{2, 0}));
EXPECT_EQ(sharding.TileOffsetForDevice(shape, 1),
(std::vector<int64_t>{2, 3}));
EXPECT_FALSE(sharding.HasUniqueDevice());
}
}
TEST_F(HloShardingTest, V1V2TileEquivalence) {
{
HloSharding v1 = HloSharding::Tile(MakeArray({2, 2}, {0, 1, 2, 3}));
HloSharding v2 = HloSharding::IotaTile({2, 2});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::Tile(MakeArray({2, 2}, {0, 2, 1, 3}));
HloSharding v2 = HloSharding::IotaTile({2, 2}, {2, 2}, {1, 0});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Tile(MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}));
HloSharding v2 = HloSharding::IotaTile({2, 2, 2}, {2, 2, 2}, {2, 0, 1});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, V1V2PartialTileEquivalence) {
{
HloSharding v1 = HloSharding::PartialTile(MakeArray({2, 2}, {0, 1, 2, 3}));
HloSharding v2 = HloSharding::PartialTile(
TileAssignment((absl::Span<const int64_t>){2, 2}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::PartialTile(MakeArray({2, 2}, {0, 2, 1, 3}));
HloSharding v2 =
HloSharding::PartialTile(TileAssignment({2, 2}, {2, 2}, {1, 0}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 = HloSharding::PartialTile(
MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}));
HloSharding v2 = HloSharding::PartialTile(
TileAssignment({2, 2, 2}, {2, 2, 2}, {2, 0, 1}));
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, V1V2SubgroupEquivalence) {
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2}, {0, 1, 2, 3}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 =
HloSharding::Subgroup(TileAssignment((absl::Span<const int64_t>){2, 2}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2}, {0, 2, 1, 3}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 =
HloSharding::Subgroup(TileAssignment({2, 2}, {2, 2}, {1, 0}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
{
HloSharding v1 =
HloSharding::Subgroup(MakeArray({2, 2, 2}, {0, 2, 4, 6, 1, 3, 5, 7}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
HloSharding v2 =
HloSharding::Subgroup(TileAssignment({2, 2, 2}, {2, 2, 2}, {2, 0, 1}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_EQ(v1, v2);
EXPECT_EQ(absl::HashOf(v1), absl::HashOf(v2));
}
}
TEST_F(HloShardingTest, EmptySingleTuple) {
HloSharding sharding = HloSharding::SingleTuple(ShapeUtil::MakeTupleShape({}),
HloSharding::AssignDevice(0));
EXPECT_TRUE(sharding.ExtractSingleSharding());
}
TEST_F(HloShardingTest, EmptySingleTupleIsNotShardGroup) {
HloSharding sharding = HloSharding::SingleTuple(ShapeUtil::MakeTupleShape({}),
HloSharding::AssignDevice(0));
EXPECT_FALSE(sharding.IsShardGroup());
EXPECT_FALSE(sharding.IsShardAs());
EXPECT_FALSE(sharding.IsShardLike());
}
TEST_F(HloShardingTest, NestedTuple) {
Shape nested_tuple_shape = ShapeUtil::MakeTupleShape({
ShapeUtil::MakeShape(F32, {}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3})}),
ShapeUtil::MakeShape(F32, {4, 6}),
});
HloSharding tiled_sharding = HloSharding::Tile(Array<int64_t>({{0, 1}}));
OpSharding proto;
proto.set_type(OpSharding::TUPLE);
*proto.add_tuple_shardings() = HloSharding::Replicate().ToProto();
*proto.add_tuple_shardings() = HloSharding::AssignDevice(0).ToProto();
*proto.add_tuple_shardings() = tiled_sharding.ToProto();
HloSharding tuple_sharding = HloSharding::FromProto(proto).value();
ShapeTree<HloSharding> shape_tree =
tuple_sharding.GetAsShapeTree(nested_tuple_shape);
EXPECT_EQ(shape_tree.element({0}), HloSharding::Replicate());
EXPECT_EQ(shape_tree.element({1, 0}), HloSharding::AssignDevice(0));
EXPECT_EQ(shape_tree.element({2}), tiled_sharding);
EXPECT_IS_OK(tuple_sharding.Validate(nested_tuple_shape, 2));
EXPECT_IS_NOT_OK(tuple_sharding.Validate(ShapeUtil::MakeTupleShape({}),
5));
EXPECT_IS_NOT_OK(tuple_sharding.Validate(ShapeUtil::MakeShape(F32, {}),
5));
}
TEST_F(HloShardingTest, NormalizeTrivialSubgroupToManual) {
HloSharding sharding =
HloSharding::Subgroup(MakeArray({1, 2, 1}, {0, 1}),
{OpSharding::MANUAL, OpSharding::REPLICATED});
EXPECT_TRUE(sharding.IsManual());
}
TEST_F(HloShardingTest, Hash) {
auto hash_compare_equal = [](const HloSharding& a, const HloSharding& b) {
if (absl::HashOf(a) != absl::HashOf(b)) {
return false;
}
return a == b;
};
{
HloSharding sharding1 = HloSharding::Replicate();
HloSharding sharding2 = HloSharding::Replicate();
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::AssignDevice(1);
HloSharding sharding2 = HloSharding::AssignDevice(1);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::AssignDevice(1);
HloSharding sharding2 = HloSharding::AssignDevice(2);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
HloSharding sharding2 = HloSharding::Tile(MakeArray({2, 2}, {0, 3, 2, 1}));
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
HloSharding sharding1 = HloSharding::IotaTile({3, 4});
HloSharding sharding2 = HloSharding::Tile(
MakeArray({3, 4}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}));
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
HloSharding default_sharding = HloSharding::Replicate();
{
ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}),
default_sharding);
HloSharding sharding1 = HloSharding::Replicate();
HloSharding sharding2 = HloSharding::Tuple(shape_tree);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree(ShapeUtil::MakeTupleShape({}),
default_sharding);
HloSharding sharding1 = HloSharding::Tuple(shape_tree);
HloSharding sharding2 = HloSharding::Tuple(shape_tree);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree1(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree1.mutable_element({0}) = HloSharding::Replicate();
ShapeTree<HloSharding> shape_tree2(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree2.mutable_element({0}) = HloSharding::AssignDevice(0);
HloSharding sharding1 = HloSharding::Tuple(shape_tree1);
HloSharding sharding2 = HloSharding::Tuple(shape_tree2);
EXPECT_FALSE(hash_compare_equal(sharding1, sharding2));
}
{
ShapeTree<HloSharding> shape_tree1(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree1.mutable_element({0}) = HloSharding::AssignDevice(0);
ShapeTree<HloSharding> shape_tree2(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {4})}),
default_sharding);
*shape_tree2.mutable_element({0}) = HloSharding::AssignDevice(0);
HloSharding sharding1 = HloSharding::Tuple(shape_tree1);
HloSharding sharding2 = HloSharding::Tuple(shape_tree2);
EXPECT_TRUE(hash_compare_equal(sharding1, sharding2));
}
}
using ShardingWithMetadataParamType =
std::tuple<std::vector<OpMetadata>, std::string>;
TEST_F(HloShardingTest, ToStringReplicatedTest) {
HloSharding sharding = HloSharding::Replicate();
EXPECT_EQ(sharding.ToString(), "{replicated}");
}
class HloReplicateShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloReplicateShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::Replicate(std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false), "{replicated}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloReplicateShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{replicated}"),
std::make_tuple(SingleMetadata(),
"{replicated metadata={op_name=\"a\"}}"),
std::make_tuple(
ListMetadata(),
"{replicated metadata={{op_name=\"b\"}, {op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringAssignDeviceTest) {
HloSharding sharding = HloSharding::AssignDevice(7);
EXPECT_EQ(sharding.ToString(), "{maximal device=7}");
}
class HloAssignDeviceShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloAssignDeviceShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::AssignDevice(7, std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false),
"{maximal device=7}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloAssignDeviceShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{maximal device=7}"),
std::make_tuple(SingleMetadata(),
"{maximal device=7 metadata={op_name=\"a\"}}"),
std::make_tuple(
ListMetadata(),
"{maximal device=7 metadata={{op_name=\"b\"}, {op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringTiledTest) {
HloSharding sharding =
HloSharding::Tile(Array3D<int64_t>({{{2, 3}}, {{5, 7}}}));
EXPECT_EQ(sharding.ToString(), "{devices=[2,1,2]2,3,5,7}");
}
TEST_F(HloShardingTest, ToStringIotaTiledTest) {
HloSharding sharding = HloSharding::IotaTile({3, 4}, {2, 2, 3}, {2, 1, 0});
EXPECT_EQ(sharding.ToString(), "{devices=[3,4]<=[2,2,3]T(2,1,0)}");
}
class HloTiledShardingWithMetadataTest
: public ::testing::TestWithParam<ShardingWithMetadataParamType> {};
TEST_P(HloTiledShardingWithMetadataTest, ToStringTest) {
HloSharding sharding = HloSharding::Tile(
Array3D<int64_t>({{{2, 3}}, {{5, 7}}}), std::get<0>(GetParam()));
EXPECT_EQ(sharding.ToString(false),
"{devices=[2,1,2]2,3,5,7}");
EXPECT_EQ(sharding.ToString(true),
std::get<1>(GetParam()));
}
INSTANTIATE_TEST_SUITE_P(
ToString, HloTiledShardingWithMetadataTest,
::testing::Values(
std::make_tuple(std::vector<OpMetadata>(), "{devices=[2,1,2]2,3,5,7}"),
std::make_tuple(SingleMetadata(),
"{devices=[2,1,2]2,3,5,7 metadata={op_name=\"a\"}}"),
std::make_tuple(ListMetadata(),
"{devices=[2,1,2]2,3,5,7 metadata={{op_name=\"b\"}, "
"{op_name=\"c\"}}}")));
TEST_F(HloShardingTest, ToStringTupleTest) {
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(), HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3)});
EXPECT_EQ(sharding.ToString(),
"{{replicated}, {devices=[1,2]3,5}, {maximal device=3}}");
}
TEST_F(HloShardingTest, ToStringTupleWithMetadataTest) {
auto metadata = SingleMetadata();
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate({GetMetadata("d")}),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, {GetMetadata("e")})});
EXPECT_EQ(sharding.ToString(false),
"{{replicated}, {devices=[1,2]3,5}, {maximal device=3}}");
EXPECT_EQ(sharding.ToString(true),
"{{replicated metadata={op_name=\"d\"}}, {devices=[1,2]3,5}, "
"{maximal device=3 metadata={op_name=\"e\"}}}");
}
TEST_F(HloShardingTest, OstreamTest) {
HloSharding sharding =
HloSharding::Tile(Array4D<int64_t>({{{{0, 1}, {2, 3}}}}));
std::ostringstream oss;
oss << sharding;
EXPECT_EQ(oss.str(), "{devices=[1,1,2,2]0,1,2,3}");
}
class HloParseShardingWithMetadataTest
: public ::testing::TestWithParam<std::vector<OpMetadata>> {};
TEST_P(HloParseShardingWithMetadataTest, ParseHloString) {
auto check = [](const HloSharding& sharding) {
TF_ASSERT_OK_AND_ASSIGN(
auto parsed_sharding,
ParseSharding(sharding.ToString(true)));
EXPECT_EQ(sharding, parsed_sharding);
};
check(HloSharding::Replicate(GetParam()));
check(HloSharding::AssignDevice(2, GetParam()));
check(HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}}), GetParam()));
check(HloSharding::Tuple(ShapeUtil::MakeTupleShape({}),
{HloSharding::Replicate(GetParam())}));
{
auto tuple_shape =
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 1, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 7})});
check(HloSharding::Tuple(
tuple_shape,
{HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}})),
HloSharding::Replicate(GetParam()), HloSharding::AssignDevice(1)}));
}
{
auto tuple_shape = ShapeUtil::MakeTupleShape(
{ShapeUtil::MakeShape(F32, {3, 1, 5, 7}),
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5, 7}),
ShapeUtil::MakeShape(F32, {3, 7})})});
std::vector<HloSharding> leaf_shardings = {
HloSharding::Tile(Array4D<int64_t>({{{{0}, {1}}}})),
HloSharding::Replicate(), HloSharding::AssignDevice(1, GetParam())};
ShapeTree<HloSharding> sharding_tree(tuple_shape, HloSharding::Replicate());
auto it = leaf_shardings.begin();
for (auto& index_to_sharding : sharding_tree.leaves()) {
index_to_sharding.second = *it++;
}
check(HloSharding::Tuple(sharding_tree));
}
}
INSTANTIATE_TEST_SUITE_P(ParseHloString, HloParseShardingWithMetadataTest,
::testing::Values(std::vector<OpMetadata>(),
SingleMetadata(), ListMetadata()));
TEST_F(HloShardingTest, WithMetadataNoOverwrite) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_new_metadata =
sharding.WithMetadata(SingleMetadata(), false);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata().front(), SingleMetadata().front()));
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), false);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding.metadata().front(), sharding_new_metadata.metadata().front()));
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, SingleMetadata())});
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), false);
EXPECT_TRUE(sharding_new_metadata.metadata().empty());
ASSERT_TRUE(sharding_new_metadata.IsTuple());
ASSERT_EQ(sharding_new_metadata.tuple_elements().size(), 3);
ASSERT_EQ(sharding_new_metadata.tuple_elements()[0].metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[0].metadata().front(),
SingleMetadata().front()));
ASSERT_EQ(sharding_new_metadata.tuple_elements()[1].metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[1].metadata()[i],
ListMetadata()[i]));
}
ASSERT_EQ(sharding_new_metadata.tuple_elements()[2].metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.tuple_elements()[2].metadata().front(),
SingleMetadata().front()));
}
}
TEST_F(HloShardingTest, WithMetadataOverwrite) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_new_metadata =
sharding.WithMetadata(SingleMetadata(), true);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 1);
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata().front(), SingleMetadata().front()));
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), true);
ASSERT_EQ(sharding_new_metadata.metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(
sharding_new_metadata.metadata()[i], ListMetadata()[i]));
}
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, SingleMetadata())});
auto sharding_new_metadata =
sharding.WithMetadata(ListMetadata(), true);
EXPECT_TRUE(sharding_new_metadata.metadata().empty());
ASSERT_TRUE(sharding_new_metadata.IsTuple());
ASSERT_EQ(sharding_new_metadata.tuple_elements().size(), 3);
for (const auto& sub_sharding : sharding_new_metadata.tuple_elements()) {
ASSERT_EQ(sub_sharding.metadata().size(), 2);
for (int i = 0; i < 2; ++i) {
EXPECT_TRUE(protobuf_util::ProtobufEquals(sub_sharding.metadata()[i],
ListMetadata()[i]));
}
}
}
}
TEST_F(HloShardingTest, WithoutMetadata) {
{
HloSharding sharding = HloSharding::Replicate();
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
}
{
HloSharding sharding = HloSharding::AssignDevice(7, SingleMetadata());
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
}
{
HloSharding sharding = HloSharding::Tuple(
ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {3, 5}),
ShapeUtil::MakeShape(U32, {7, 25}),
ShapeUtil::MakeShape(S32, {9, 11})}),
{HloSharding::Replicate(SingleMetadata()),
HloSharding::Tile(Array2D<int64_t>({{3, 5}})),
HloSharding::AssignDevice(3, ListMetadata())});
auto sharding_no_metadata = sharding.WithoutMetadata();
EXPECT_TRUE(sharding_no_metadata.metadata().empty());
ASSERT_TRUE(sharding_no_metadata.IsTuple());
EXPECT_EQ(sharding_no_metadata.tuple_elements().size(), 3);
for (const auto& sub_sharding : sharding_no_metadata.tuple_elements()) {
EXPECT_TRUE(sub_sharding.metadata().empty());
}
}
}
}
} | 2,180 |
#ifndef XLA_HLO_IR_HLO_DFS_REACHABILITY_H_
#define XLA_HLO_IR_HLO_DFS_REACHABILITY_H_
#include <cstddef>
#include <memory>
#include "llvm/ADT/DenseMap.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
class HloDfsReachability {
public:
bool IsPresent(const HloInstruction* instruction) const;
bool IsReachable(const HloInstruction* from, const HloInstruction* to) const;
bool IsConnected(const HloInstruction* a, const HloInstruction* b) const;
static std::unique_ptr<HloDfsReachability> Build(
const HloComputation* computation);
private:
llvm::DenseMap<const HloInstruction*, size_t> instruction_to_idx_;
};
}
#endif
#include "xla/hlo/ir/hlo_dfs_reachability.h"
#include <cstddef>
#include <memory>
#include <vector>
#include "absl/algorithm/container.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallVector.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
namespace xla {
bool HloDfsReachability::IsPresent(const HloInstruction* instruction) const {
return instruction_to_idx_.contains(instruction);
}
bool HloDfsReachability::IsReachable(const HloInstruction* from,
const HloInstruction* to) const {
if (from == to) {
return true;
}
if (to->operand_count() == 0 && from->control_predecessors().empty()) {
return false;
}
size_t target_node_idx = instruction_to_idx_.at(from);
size_t dfs_root_idx = instruction_to_idx_.at(to);
if (dfs_root_idx < target_node_idx) {
return false;
}
llvm::SmallVector<const HloInstruction*> stack{to};
llvm::BitVector visited_idxs(1 + (dfs_root_idx - target_node_idx));
visited_idxs.set(dfs_root_idx - target_node_idx);
auto check_and_enqueue = [&](const HloInstruction* instr) {
if (instr == from) {
return true;
}
size_t instr_idx = instruction_to_idx_.at(instr);
if (instr_idx < target_node_idx) {
return false;
}
size_t visited_idx = instr_idx - target_node_idx;
if (visited_idxs.test(visited_idx)) {
return false;
}
visited_idxs.set(visited_idx);
stack.push_back(instr);
return false;
};
while (!stack.empty()) {
const HloInstruction* instr = stack.pop_back_val();
if (absl::c_any_of(instr->operands(), check_and_enqueue) ||
absl::c_any_of(instr->control_predecessors(), check_and_enqueue)) {
return true;
}
}
return false;
}
bool HloDfsReachability::IsConnected(const HloInstruction* a,
const HloInstruction* b) const {
return IsReachable(a, b) || IsReachable(b, a);
}
std::unique_ptr<HloDfsReachability> HloDfsReachability::Build(
const HloComputation* computation) {
auto res = std::make_unique<HloDfsReachability>();
HloComputation::ChannelDependencies empty_channel_dependencies;
std::vector<HloInstruction*> instructions =
computation->MakeInstructionPostOrder(empty_channel_dependencies);
res->instruction_to_idx_.reserve(instructions.size());
for (size_t i = 0; i < instructions.size(); ++i) {
res->instruction_to_idx_[instructions[i]] = i;
}
return res;
}
} | #include "xla/hlo/ir/hlo_dfs_reachability.h"
#include <cstddef>
#include <set>
#include <string_view>
#include "absl/random/random.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
class HloDfsReachabilityTest : public HloTestBase {};
TEST_F(HloDfsReachabilityTest, NonTrivialReachability) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32, HloOpcode::kAdd, constant1, constant2));
auto negate = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kNegate, constant2));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, negate));
auto mul = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kMultiply, add, exp));
auto copy = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kCopy, exp));
auto module = CreateNewVerifiedModule();
auto computation =
module->AddEntryComputation(builder.Build(mul));
TF_CHECK_OK(add->AddControlDependencyTo(exp));
auto reachability = HloDfsReachability::Build(computation);
EXPECT_TRUE(reachability->IsReachable(constant1, constant1));
EXPECT_FALSE(reachability->IsReachable(constant1, constant2));
EXPECT_TRUE(reachability->IsReachable(constant1, add));
EXPECT_FALSE(reachability->IsReachable(constant1, negate));
EXPECT_TRUE(reachability->IsReachable(constant1, exp));
EXPECT_TRUE(reachability->IsReachable(constant1, mul));
EXPECT_TRUE(reachability->IsReachable(constant1, copy));
EXPECT_FALSE(reachability->IsReachable(constant2, constant1));
EXPECT_TRUE(reachability->IsReachable(constant2, constant2));
EXPECT_TRUE(reachability->IsReachable(constant2, add));
EXPECT_TRUE(reachability->IsReachable(constant2, negate));
EXPECT_TRUE(reachability->IsReachable(constant2, exp));
EXPECT_TRUE(reachability->IsReachable(constant2, mul));
EXPECT_TRUE(reachability->IsReachable(constant2, copy));
EXPECT_FALSE(reachability->IsReachable(exp, constant1));
EXPECT_FALSE(reachability->IsReachable(exp, constant2));
EXPECT_FALSE(reachability->IsReachable(exp, add));
EXPECT_FALSE(reachability->IsReachable(exp, negate));
EXPECT_TRUE(reachability->IsReachable(exp, exp));
EXPECT_TRUE(reachability->IsReachable(exp, mul));
EXPECT_TRUE(reachability->IsReachable(exp, copy));
EXPECT_FALSE(reachability->IsReachable(mul, constant1));
EXPECT_FALSE(reachability->IsReachable(mul, constant2));
EXPECT_FALSE(reachability->IsReachable(mul, add));
EXPECT_FALSE(reachability->IsReachable(mul, negate));
EXPECT_FALSE(reachability->IsReachable(mul, exp));
EXPECT_TRUE(reachability->IsReachable(mul, mul));
EXPECT_FALSE(reachability->IsReachable(mul, copy));
EXPECT_TRUE(reachability->IsConnected(constant1, copy));
EXPECT_TRUE(reachability->IsConnected(copy, constant1));
EXPECT_FALSE(reachability->IsConnected(negate, add));
EXPECT_FALSE(reachability->IsConnected(add, negate));
}
TEST_F(HloDfsReachabilityTest, ChannelReachability) {
const Shape shape = ShapeUtil::MakeShape(F32, {5, 7});
HloComputation::Builder builder("ChannelReachability");
auto param = builder.AddInstruction(
HloInstruction::CreateParameter(0, shape, "param"));
auto token0 = builder.AddInstruction(HloInstruction::CreateToken());
auto send =
builder.AddInstruction(HloInstruction::CreateSend(param, token0, 1));
auto send_done = builder.AddInstruction(HloInstruction::CreateSendDone(send));
auto token1 = builder.AddInstruction(HloInstruction::CreateToken());
auto recv =
builder.AddInstruction(HloInstruction::CreateRecv(shape, token1, 1));
auto recv_done = builder.AddInstruction(HloInstruction::CreateRecvDone(recv));
auto module = CreateNewVerifiedModule();
module->mutable_config().set_use_spmd_partitioning(false);
module->mutable_config().set_static_device_assignment(DeviceAssignment(1, 2));
auto computation = module->AddEntryComputation(builder.Build(recv_done));
auto reachability = HloDfsReachability::Build(computation);
EXPECT_FALSE(reachability->IsReachable(param, recv_done));
EXPECT_FALSE(reachability->IsReachable(send, recv));
EXPECT_FALSE(reachability->IsReachable(send_done, recv));
}
class HloDfsReachabilityBenchmark {
public:
HloDfsReachabilityBenchmark(int size, std::string_view name) : name_(name) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
auto builder = HloComputation::Builder(name);
HloInstruction* constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.0f)));
HloInstruction* prev = constant;
for (int i = 1; i < size; ++i) {
prev = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32, HloOpcode::kExp, prev));
}
HloModuleConfig hlo_config;
module_ = std::make_unique<HloModule>(name_, hlo_config);
computation_ =
module_->AddEntryComputation(builder.Build(prev));
}
std::unique_ptr<HloDfsReachability> Build() {
return HloDfsReachability::Build(computation_);
}
const HloComputation* computation() { return computation_; }
private:
std::unique_ptr<HloModule> module_;
HloComputation* computation_;
const std::string name_;
};
void BM_HloDfsReachabilityBuild(benchmark::State& state) {
int num_nodes = state.range(0);
HloDfsReachabilityBenchmark bm(num_nodes, state.name());
while (state.KeepRunningBatch(num_nodes)) {
benchmark::DoNotOptimize(bm.Build());
}
}
void BM_HloDfsReachabilityCheck(benchmark::State& state) {
size_t size = state.range(0);
HloDfsReachabilityBenchmark bm(size, state.name());
auto reachability = bm.Build();
auto instrs = bm.computation()->MakeInstructionPostOrder();
size_t i = 0;
for (auto s : state) {
size_t from = i % size;
size_t to = (++i + size / 2) % size;
reachability->IsReachable(instrs[from], instrs[to]);
}
}
#define BM_ARGS Arg(1)->Arg(64)->Arg(128)->Arg(256)->Range(512, 256 * 1024)
BENCHMARK(BM_HloDfsReachabilityBuild)->BM_ARGS;
BENCHMARK(BM_HloDfsReachabilityCheck)->BM_ARGS;
}
} | 2,181 |
#ifndef XLA_HLO_IR_HLO_MODULE_H_
#define XLA_HLO_IR_HLO_MODULE_H_
#include <atomic>
#include <functional>
#include <memory>
#include <optional>
#include <random>
#include <string>
#include <string_view>
#include <utility>
#include <variant>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_input_output_alias_config.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module_metadata.h"
#include "xla/hlo/ir/hlo_schedule.h"
#include "xla/iterator_util.h"
#include "xla/printer.h"
#include "xla/service/compilation_environments.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/name_uniquer.h"
#include "xla/xla.pb.h"
#include "tsl/lib/gtl/iterator_range.h"
#include "tsl/platform/logging.h"
namespace xla {
using LayoutCanonicalizationCallback =
std::function<absl::StatusOr<std::pair<std::vector<Shape>, Shape>>(
const HloModule& module)>;
template <typename T>
class CopyOnWrite {
public:
static_assert(!std::is_const_v<T>);
explicit CopyOnWrite(
std::variant<std::unique_ptr<T>, std::shared_ptr<const T>> ptr)
: ownership_(std::move(ptr)), ptr_([&]() -> decltype(ptr_) {
if (auto* owned = std::get_if<std::unique_ptr<T>>(&ownership_)) {
return owned->get();
}
return std::get<std::shared_ptr<const T>>(ownership_).get();
}()) {}
const T& get() const { return *ptr_; }
T& get_mutable() {
if (auto* owned = std::get_if<std::unique_ptr<T>>(&ownership_)) {
return **owned;
}
auto& shared = std::get<std::shared_ptr<const T>>(ownership_);
DeepCopyToNewUnique(T(*shared));
return const_cast<T&>(*ptr_);
}
void set(T&& value) {
if (auto* owned = std::get_if<std::unique_ptr<T>>(&ownership_)) {
**owned = std::forward<T>(value);
} else {
DeepCopyToNewUnique(std::forward<T>(value));
}
}
const std::shared_ptr<const T>& FreezeAndShare() const {
if (auto* owned = std::get_if<std::unique_ptr<T>>(&ownership_)) {
ownership_ = std::shared_ptr<const T>(std::move(*owned));
}
return std::get<std::shared_ptr<const T>>(ownership_);
}
private:
void DeepCopyToNewUnique(T&& value) {
auto owned = std::make_unique<T>(std::forward<T>(value));
ptr_ = owned.get();
ownership_ = std::move(owned);
}
mutable std::variant<std::unique_ptr<T>, std::shared_ptr<const T>> ownership_;
const T* ptr_;
};
class HloModule {
public:
HloModule(const std::string& name, HloModuleConfig config);
HloModule(const std::string& name, HloModuleConfig config,
std::unique_ptr<CompilationEnvironments> comp_envs);
HloModule(const std::string& name,
std::variant<std::unique_ptr<HloModuleConfig>,
std::shared_ptr<const HloModuleConfig>>
config,
std::unique_ptr<CompilationEnvironments> comp_envs);
virtual ~HloModule() = default;
HloComputation* AddEntryComputation(
std::unique_ptr<HloComputation> computation);
HloComputation* AddEntryComputationWithLayouts(
std::unique_ptr<HloComputation> computation);
void ReplaceEntryComputation(HloComputation* entry_computation);
HloComputation* AddEmbeddedComputation(
std::unique_ptr<HloComputation> computation);
absl::Status RemoveEmbeddedComputation(HloComputation* to_remove);
absl::Status RemoveUnusedComputations();
void MarkFusionDuplications(
const absl::flat_hash_map<HloComputation*, HloComputation*>&
replacements);
void ReplaceComputations(
const absl::flat_hash_map<HloComputation*, HloComputation*>&
replacements);
const std::string& name() const { return name_; }
void set_name(std::string name) { name_ = std::move(name); }
void MoveComputationsFrom(HloModule* module, bool make_names_unique = false);
std::unique_ptr<HloModule> Clone(const std::string& suffix = "clone") const;
std::unique_ptr<HloModule> Clone(const HloModuleConfig& config,
const std::string& suffix = "clone") const;
std::unique_ptr<HloModule> Clone(
std::shared_ptr<const HloModuleConfig> config,
const std::string& suffix = "clone") const;
HloComputation* DeepCloneComputation(HloComputation* computation,
HloCloneContext* context = nullptr);
HloComputation* entry_computation() const {
CHECK_NE(nullptr, entry_computation_);
return entry_computation_;
}
bool has_entry_computation() const { return entry_computation_ != nullptr; }
const Shape& result_shape() const {
CHECK_NE(nullptr, entry_computation_);
return entry_computation()->root_instruction()->shape();
}
ComputationLayout compute_computation_layout() const {
return ComputationLayout(entry_computation()->ComputeProgramShape(),
false);
}
ComputationLayout* mutable_entry_computation_layout() {
return config_.get_mutable().mutable_entry_computation_layout();
}
const ComputationLayout& entry_computation_layout() const {
return config_.get().entry_computation_layout();
}
void set_frontend_attributes(FrontendAttributes frontend_attributes) {
frontend_attributes_ = std::move(frontend_attributes);
}
void add_frontend_attributes(FrontendAttributes frontend_attributes) {
frontend_attributes_.mutable_map()->insert(
frontend_attributes.map().begin(), frontend_attributes.map().end());
}
const FrontendAttributes& frontend_attributes() const {
return frontend_attributes_;
}
void set_use_auto_spmd_partitioning(bool use) {
use_auto_spmd_partitioning_ = use;
}
bool use_auto_spmd_partitioning() const {
return use_auto_spmd_partitioning_;
}
void set_layout_canonicalization_callback(
LayoutCanonicalizationCallback callback) {
layout_canonicalization_callback_ = std::move(callback);
}
LayoutCanonicalizationCallback layout_canonicalization_callback() const {
return layout_canonicalization_callback_;
}
template <typename H>
friend H AbslHashValue(H h, const HloModule& module) {
h = H::combine(std::move(h), module.entry_computation_layout());
auto computations = module.MakeComputationSorted();
for (auto* computation : computations) {
h = H::combine(std::move(h), *computation);
}
return H::combine(std::move(h), computations.size());
}
tsl::gtl::iterator_range<UnwrappingIterator<
std::vector<std::unique_ptr<HloComputation>>::const_iterator>>
computations() const {
return {MakeUnwrappingIterator(computations_.begin()),
MakeUnwrappingIterator(computations_.end())};
}
tsl::gtl::iterator_range<UnwrappingIterator<
std::vector<std::unique_ptr<HloComputation>>::iterator>>
computations() {
return {MakeUnwrappingIterator(computations_.begin()),
MakeUnwrappingIterator(computations_.end())};
}
tsl::gtl::iterator_range<FilteringUnwrappingIterator<
std::vector<std::unique_ptr<HloComputation>>::const_iterator,
std::function<bool(const HloComputation*)>>>
computations(
const absl::flat_hash_set<absl::string_view>& execution_threads) const {
std::function<bool(const HloComputation*)> pred =
[execution_threads](const HloComputation* computation) {
if (execution_threads.empty()) {
return true;
}
return execution_threads.contains(computation->execution_thread());
};
return MakeFilteringUnwrappingIteratorRange(computations_.begin(),
computations_.end(), pred);
}
HloComputation* GetComputationWithName(absl::string_view name);
int64_t computation_count() const { return computations_.size(); }
HloComputation* mutable_computation(int64_t idx) {
CHECK(idx >= 0 && idx < computations_.size());
return computations_[idx].get();
}
int64_t instruction_count() const;
void Cleanup() {
for (auto& comp : computations_) {
comp->Cleanup();
}
}
std::vector<HloComputation*> MakeComputationPostOrder() const {
return MakeComputationPostOrder({});
}
std::vector<HloComputation*> MakeComputationPostOrder(
const absl::flat_hash_set<absl::string_view>& execution_threads) const;
std::vector<HloComputation*> MakeComputationPostOrder(
const absl::flat_hash_set<absl::string_view>& execution_threads,
const absl::flat_hash_set<HloComputation*>& allow_list) const;
std::vector<HloComputation*> MakeComputationSorted() const {
return MakeComputationSorted({});
}
std::vector<HloComputation*> MakeComputationSorted(
const absl::flat_hash_set<absl::string_view>& execution_threads) const;
std::vector<HloComputation*> MakeNonfusionComputations() const {
return MakeNonfusionComputations({});
}
std::vector<HloComputation*> MakeNonfusionComputations(
const absl::flat_hash_set<absl::string_view>& execution_threads) const;
std::vector<HloComputation*> MakeNonfusionComputationsSorted() const {
return MakeNonfusionComputationsSorted({});
}
std::vector<HloComputation*> MakeNonfusionComputationsSorted(
const absl::flat_hash_set<absl::string_view>& execution_threads) const;
HloModuleConfig& mutable_config() { return config_.get_mutable(); }
const HloModuleConfig& config() const { return config_.get(); }
void set_config(HloModuleConfig config) { config_.set(std::move(config)); }
const std::shared_ptr<const HloModuleConfig>& shared_config() const {
return config_.FreezeAndShare();
}
bool is_dynamic() const { return is_dynamic_; }
void set_is_dynamic(bool is_dynamic) { is_dynamic_ = is_dynamic; }
void Print(Printer* printer) const {
return Print(printer, HloPrintOptions::Default());
}
void Print(Printer* printer, const HloPrintOptions& options) const;
std::string ToString() const { return ToString(HloPrintOptions::Default()); }
std::string ToString(const HloPrintOptions& options) const;
absl::Cord ToCord() const { return ToCord(HloPrintOptions::Default()); }
absl::Cord ToCord(const HloPrintOptions& options) const;
HloModuleProto ToProto() const;
static absl::StatusOr<std::unique_ptr<HloModule>> CreateFromProto(
const HloModuleProto& proto, const HloModuleConfig& module_config,
bool prohibit_empty_literal = true);
HloModuleProtoWithConfig ToProtoWithConfig() const;
static absl::StatusOr<std::unique_ptr<HloModule>> CreateFromProtoWithConfig(
const HloModuleProtoWithConfig& proto,
bool prohibit_empty_literal = true);
static absl::StatusOr<HloModuleConfig> CreateModuleConfigFromProto(
const HloModuleProto& module, const DebugOptions& debug_options,
const ExecutionOptions* execution_options = nullptr);
static absl::StatusOr<HloModuleConfig> CreateModuleConfigFromShape(
const ProgramShape& program_shape, const DebugOptions& debug_options,
const ExecutionOptions* execution_options = nullptr);
HloInstruction* OutlineExpressionFromComputation(
absl::Span<HloInstruction* const> instructions_to_outline,
const std::string& outlined_computation_name,
HloComputation* computation);
uint64_t RandomNew64() const;
NameUniquer& instruction_name_uniquer() { return instruction_name_uniquer_; }
int NewUniqueInstructionId() {
int result = next_unique_id_;
next_unique_id_++;
return result;
}
HloInputOutputAliasConfig& input_output_alias_config() {
return input_output_alias_config_;
}
const HloInputOutputAliasConfig& input_output_alias_config() const {
return input_output_alias_config_;
}
void set_input_output_alias_config(HloInputOutputAliasConfig config) {
input_output_alias_config_ = std::move(config);
}
HloBufferDonorConfig& buffer_donor_config() { return buffer_donor_config_; }
const HloBufferDonorConfig& buffer_donor_config() const {
return buffer_donor_config_;
}
void set_buffer_donor_config(HloBufferDonorConfig config) {
buffer_donor_config_ = std::move(config);
}
int unique_id() const { return unique_id_; }
absl::Status set_schedule(HloSchedule schedule);
void clear_schedule() { schedule_.reset(); }
bool has_schedule() const { return schedule_.has_value(); }
const HloSchedule& schedule() const { return *schedule_; }
HloSchedule& schedule() { return *schedule_; }
HloComputation* AddComputationAndUnifyNamesAndIds(
std::unique_ptr<HloComputation> computation, bool is_entry) {
computation->ClearUniqueIdInternal();
for (auto* instruction : computation->instructions()) {
instruction->ClearUniqueIdInternal();
}
return AddComputationInternal(std::move(computation), is_entry,
true,
true);
}
void SetAndUniquifyInstrName(HloInstruction* instr, absl::string_view name) {
instr->SetAndSanitizeName(name);
instr->UniquifyName(&instruction_name_uniquer_);
}
void SetAndUniquifyComputationName(HloComputation* computation,
absl::string_view name) {
computation->SetAndSanitizeName(name);
computation->UniquifyName(&computation_name_uniquer_);
}
absl::Status CheckUniqueNamesAndIdsForComputationsAndInstructions() const;
bool has_spmd_parameters_shardings() const {
return spmd_parameters_shardings_.has_value();
}
const std::vector<HloSharding>& spmd_parameters_shardings() const {
CHECK(spmd_parameters_shardings_.has_value());
return *spmd_parameters_shardings_;
}
void set_spmd_parameters_shardings(
const std::vector<HloSharding>& shardings) {
spmd_parameters_shardings_ = shardings;
}
bool has_spmd_output_sharding() const {
return spmd_output_sharding_.has_value();
}
const HloSharding& spmd_output_sharding() const {
CHECK(spmd_output_sharding_.has_value());
return *spmd_output_sharding_;
}
void set_spmd_output_sharding(const HloSharding& sharding) {
spmd_output_sharding_ = sharding;
}
struct CrossProgramPrefetchInfo {
int64_t parameter;
ShapeIndex index;
std::optional<int64_t> alt_memory_offset;
};
void AddCrossProgramPrefetch(
int64_t parameter, const ShapeIndex& index,
std::optional<int64_t> alt_memory_offset = std::nullopt) {
cross_program_prefetches_.emplace_back(
CrossProgramPrefetchInfo{parameter, index, alt_memory_offset});
}
absl::Status SetCrossProgramPrefetchOffset(int64_t prefetch_index,
int64_t offset) {
TF_RET_CHECK(prefetch_index < cross_program_prefetches_.size());
auto& [parameter, index, optional_offset] =
cross_program_prefetches_[prefetch_index];
TF_RET_CHECK(!optional_offset.has_value());
optional_offset = offset;
return absl::OkStatus();
}
absl::Span<const CrossProgramPrefetchInfo> CrossProgramPrefetches() const {
return cross_program_prefetches_;
}
const HloModuleMetadata& metadata() const { return metadata_; }
HloModuleMetadata* metadata() { return &metadata_; }
void MoveMetadataToModule(HloModule* module) {
module->metadata_ = std::move(metadata_);
}
int64_t profile_version() const { return profile_version_; }
void set_profile_version(int64_t profile_version) {
profile_version_ = profile_version;
}
void add_profile_info(const HloModuleProto::ProfileInfo& profile_info) {
profile_info_list_.push_back(profile_info);
}
void set_profile_info(
const std::vector<HloModuleProto::ProfileInfo>& profile_info) {
profile_info_list_ = profile_info;
}
const std::vector<HloModuleProto::ProfileInfo>& profile_info() const {
return profile_info_list_;
}
void set_autofdo_profile_key(HloModuleProto::ProfileType profile_type,
absl::string_view profile_key) {
autofdo_profile_keys_[profile_type] = std::string(profile_key);
}
void set_autofdo_profile_keys(
const absl::flat_hash_map<HloModuleProto::ProfileType, std::string>&
profile_keys) {
for (const auto& [profile_type, profile_key] : profile_keys) {
autofdo_profile_keys_[profile_type] = profile_key;
}
}
const absl::flat_hash_map<HloModuleProto::ProfileType, std::string>&
autofdo_profile_keys() const {
return autofdo_profile_keys_;
}
bool has_module_autofdo_profiles() const {
return !autofdo_profile_keys_.empty();
}
void set_relative_speedup(double relative_speedup) {
relative_speedup_ = relative_speedup;
}
void set_autofdo_fingerprint(absl::string_view fingerprint) {
autofdo_fingerprint_ = std::string(fingerprint);
}
std::string autofdo_fingerprint() const { return autofdo_fingerprint_; }
CompilationEnvironments& comp_envs() const { return *comp_envs_; }
std::string GetFingerprint128(const HloPrintOptions& options =
HloPrintOptions::ModuleFingerprint()) const;
struct StackFrame {
std::string_view file_name;
std::string_view function_name;
int line = 0;
int column = 0;
int parent_frame_id = 0;
bool empty() const {
return line == 0 && column == 0 && file_name.empty() &&
function_name.empty();
}
};
StackFrame get_stack_frame(int id) const;
private:
HloComputation* AddComputationInternal(
std::unique_ptr<HloComputation> computation, bool is_entry,
bool uniquify_identifiers, bool preserve_entry_layouts);
std::string name_;
CopyOnWrite<HloModuleConfig> config_;
HloComputation* entry_computation_ = nullptr;
std::vector<std::unique_ptr<HloComputation>> computations_;
mutable std::mt19937_64 rng_{42};
mutable absl::Mutex rng_mutex_;
NameUniquer computation_name_uniquer_{"."};
NameUniquer instruction_name_uniquer_{"."};
int next_unique_id_ = 0;
static std::atomic<int> next_unique_module_id_;
const int unique_id_;
std::optional<HloSchedule> schedule_;
HloInputOutputAliasConfig input_output_alias_config_;
HloBufferDonorConfig buffer_donor_config_;
FrontendAttributes frontend_attributes_;
std::optional<std::vector<HloSharding>> spmd_parameters_shardings_;
std::optional<HloSharding> spmd_output_sharding_;
std::vector<CrossProgramPrefetchInfo> cross_program_prefetches_;
HloModuleMetadata metadata_;
bool is_dynamic_ = false;
int64_t profile_version_ = 0;
std::vector<HloModuleProto::ProfileInfo> profile_info_list_;
double relative_speedup_;
std::string autofdo_fingerprint_;
absl::flat_hash_map<HloModuleProto::ProfileType, std::string>
autofdo_profile_keys_;
bool use_auto_spmd_partitioning_ = false;
LayoutCanonicalizationCallback layout_canonicalization_callback_;
std::unique_ptr<CompilationEnvironments> comp_envs_ =
std::make_unique<CompilationEnvironments>();
std::optional<StackFrameIndexProto> stack_frame_index_;
};
}
#endif
/* Copyright 2017 The OpenXLA Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http:
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITI | #include "xla/hlo/ir/hlo_module.h"
#include <algorithm>
#include <cstdint>
#include <memory>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include <gtest/gtest.h>
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/utils/hlo_matchers.h"
#include "xla/literal.h"
#include "xla/service/computation_placer.h"
#include "xla/service/hlo_memory_scheduler.h"
#include "xla/service/test_compilation_environment.pb.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/xla.pb.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/lib/strings/proto_serialization.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
std::unique_ptr<tsl::protobuf::Message> ProcessNewEnv(
std::unique_ptr<tsl::protobuf::Message> msg) {
std::unique_ptr<test::TestCompilationEnvironment1> env(
tensorflow::down_cast<test::TestCompilationEnvironment1*>(msg.release()));
if (!env) {
env = std::make_unique<test::TestCompilationEnvironment1>();
env->set_some_flag(100);
}
return env;
}
namespace {
namespace op = ::xla::testing::opcode_matchers;
class HloModuleTest : public HloTestBase {
protected:
static void SetUpTestSuite() {
CompilationEnvironments::RegisterProcessNewEnvFn(
test::TestCompilationEnvironment1::descriptor(), ProcessNewEnv);
}
std::unique_ptr<HloComputation> CreateConstantComputation() {
auto builder = HloComputation::Builder("Constant");
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
return builder.Build();
}
std::unique_ptr<HloComputation> CreateCallComputation(
absl::Span<HloComputation* const> computations) {
auto builder = HloComputation::Builder("Call");
for (auto computation : computations) {
builder.AddInstruction(
HloInstruction::CreateCall(r0f32_, {}, computation));
}
return builder.Build();
}
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
};
TEST_F(HloModuleTest, OneComputationPostOrder) {
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(CreateConstantComputation());
EXPECT_THAT(module->MakeComputationPostOrder(),
::testing::ElementsAre(computation));
}
TEST_F(HloModuleTest, TwoComputationsPostOrder) {
auto module = CreateNewVerifiedModule();
auto computation1 = module->AddEntryComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateConstantComputation());
EXPECT_THAT(module->MakeComputationPostOrder(),
::testing::UnorderedElementsAre(computation1, computation2));
EXPECT_EQ(computation1->name(), "Constant");
EXPECT_EQ(computation2->name(), "Constant.1");
}
TEST_F(HloModuleTest, CloneTest) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
auto env = std::make_unique<test::TestCompilationEnvironment1>();
env->set_some_flag(10);
TF_ASSERT_OK(module->comp_envs().AddEnv(std::move(env)));
auto post_order = module->MakeComputationPostOrder();
auto cloned_module = module->Clone("copy");
auto post_order_copied = cloned_module->MakeComputationPostOrder();
EXPECT_EQ(cloned_module->comp_envs()
.GetEnv<test::TestCompilationEnvironment1>()
.some_flag(),
10);
EXPECT_EQ(post_order.size(), post_order_copied.size());
for (auto origin = post_order.begin(), copied = post_order_copied.begin();
origin != post_order.end() && copied != post_order_copied.end();
++origin, ++copied) {
EXPECT_EQ(absl::StrCat((*origin)->name(), ".copy"), (*copied)->name());
}
}
TEST_F(HloModuleTest, CloneFrontendAttributes) {
auto module = CreateNewVerifiedModule();
FrontendAttributes frontend_attributes;
frontend_attributes.mutable_map()->emplace("attribute1", "attribute1_value");
module->set_frontend_attributes(frontend_attributes);
std::unique_ptr<HloModule> clone = module->Clone();
bool areEqual = std::equal(
frontend_attributes.map().begin(), frontend_attributes.map().end(),
clone->frontend_attributes().map().begin(),
[](const auto& kv1, const auto& kv2) {
return kv1.first == kv2.first && kv1.second == kv2.second;
});
EXPECT_TRUE(areEqual);
}
TEST_F(HloModuleTest, CloneHasFusion) {
auto module = CreateNewVerifiedModule();
HloComputation* fused_computation;
{
auto b = HloComputation::Builder("Fused");
auto x = b.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "x"));
b.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, x, x));
fused_computation = module->AddEmbeddedComputation(b.Build());
}
{
auto b = HloComputation::Builder("Entry");
auto input = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.0f)));
b.AddInstruction(
HloInstruction::CreateFusion(r0f32_, HloInstruction::FusionKind::kInput,
{input}, fused_computation));
module->AddEntryComputation(b.Build());
}
auto post_order = module->MakeComputationPostOrder();
auto cloned_module = module->Clone("copy");
auto post_order_copied = cloned_module->MakeComputationPostOrder();
EXPECT_EQ(post_order.size(), post_order_copied.size());
for (auto origin = post_order.begin(), copied = post_order_copied.begin();
origin != post_order.end() && copied != post_order_copied.end();
++origin, ++copied) {
if ((*origin)->name() == "Fused") {
EXPECT_EQ(absl::StrCat((*origin)->name(), ".clone"), (*copied)->name());
} else {
EXPECT_EQ(absl::StrCat((*origin)->name(), ".copy"), (*copied)->name());
}
}
}
TEST_F(HloModuleTest, CloneCustomCallComputationToApply) {
const char* const hlo_string = R"(
HloModule a_module
add_s32 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY entry () -> s32[] {
%c1 = s32[] constant(1)
%c2 = s32[] constant(2)
ROOT %custom-call =
s32[] custom-call(s32[] %c1, %c2),
custom_call_target="foo",
backend_config="this string is opaque",
to_apply=add_s32
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
std::unique_ptr<HloModule> cloned_module = module->Clone();
HloComputation* cloned_computation =
cloned_module->GetComputationWithName("add_s32.clone");
HloInstruction* cloned_custom_call =
cloned_module->entry_computation()->GetInstructionWithName("custom-call");
EXPECT_TRUE(cloned_computation->IsCustomCallComputation());
EXPECT_EQ(cloned_computation->CustomCallInstruction(), cloned_custom_call);
}
TEST_F(HloModuleTest, CloneCustomCallComputationCalledComputations) {
const char* const hlo_string = R"(
HloModule a_module
add_s32_0 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
add_s32_1 {
lhs = s32[] parameter(0)
rhs = s32[] parameter(1)
ROOT add = s32[] add(lhs, rhs)
}
ENTRY entry () -> s32[] {
%c1 = s32[] constant(1)
%c2 = s32[] constant(2)
ROOT %custom-call =
s32[] custom-call(s32[] %c1, %c2),
custom_call_target="foo",
backend_config="this string is opaque",
called_computations={%add_s32_0, %add_s32_1}
})";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
std::unique_ptr<HloModule> cloned_module = module->Clone();
HloComputation* cloned_computation_0 =
cloned_module->GetComputationWithName("add_s32_0.clone");
HloComputation* cloned_computation_1 =
cloned_module->GetComputationWithName("add_s32_1.clone");
HloInstruction* cloned_custom_call =
cloned_module->entry_computation()->GetInstructionWithName("custom-call");
EXPECT_TRUE(cloned_computation_0->IsCustomCallComputation());
EXPECT_EQ(cloned_computation_0->CustomCallInstruction(), cloned_custom_call);
EXPECT_TRUE(cloned_computation_1->IsCustomCallComputation());
EXPECT_EQ(cloned_computation_1->CustomCallInstruction(), cloned_custom_call);
}
TEST_F(HloModuleTest, CloneFusionComputation) {
const char* const hlo_string = R"(
HloModule a_module
fused_computation () -> s32[] {
ROOT %result = s32[] parameter(0)
}
ENTRY main {
%c = s32[] constant(1)
ROOT %fusion = s32[] fusion(%c), kind=kLoop, calls=fused_computation
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnVerifiedModule(hlo_string));
std::unique_ptr<HloModule> cloned_module = module->Clone();
HloComputation* cloned_computation =
cloned_module->GetComputationWithName("fused_computation.clone");
HloInstruction* cloned_fusion_instr =
cloned_module->entry_computation()->GetInstructionWithName("fusion");
EXPECT_TRUE(cloned_computation->IsFusionComputation());
EXPECT_EQ(cloned_computation->FusionInstruction(), cloned_fusion_instr);
}
TEST_F(HloModuleTest, DiamondComputationsPostOrder) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation4 = module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
auto post_order = module->MakeComputationPostOrder();
EXPECT_THAT(post_order,
::testing::UnorderedElementsAre(computation1, computation2,
computation3, computation4));
EXPECT_EQ(post_order.back(), computation4);
EXPECT_EQ(post_order.front(), computation1);
}
TEST_F(HloModuleTest, LargeConstantToString) {
auto module = CreateNewVerifiedModule();
auto builder = HloComputation::Builder("Constant");
std::vector<float> values(16, 42.0);
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<float>(values)));
module->AddEntryComputation(builder.Build());
EXPECT_EQ(
"HloModule LargeConstantToString, "
"entry_computation_layout={()->f32[16]{0}}\n\nENTRY %Constant () -> "
"f32[16] {\n ROOT %constant = f32[16]{0} constant({...})\n}\n\n",
module->ToString(HloPrintOptions().set_print_large_constants(false)));
EXPECT_EQ(
"HloModule LargeConstantToString, "
"entry_computation_layout={()->f32[16]{0}}\n\nENTRY %Constant () -> "
"f32[16] {\n ROOT %constant = f32[16]{0} constant({42, 42, 42, 42, 42, "
"42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42})\n}\n\n",
module->ToString(HloPrintOptions().set_print_large_constants(true)));
}
TEST_F(HloModuleTest, UniqueModuleId) {
auto module_a = CreateNewVerifiedModule();
auto module_b = CreateNewVerifiedModule();
EXPECT_NE(module_a->unique_id(), module_b->unique_id());
}
TEST_F(HloModuleTest, ProtoSerializationWithoutSchedule) {
const std::string text = R"(
HloModule axpy_module
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%x = f32[2,4]{1,0} parameter(1)
%y = f32[2,4]{1,0} parameter(2)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_FALSE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(
auto module_copy,
HloModule::CreateFromProto(module->ToProto(), module->config()));
ASSERT_FALSE(module_copy->has_schedule());
}
TEST_F(HloModuleTest, ProtoSerializationWithSchedule) {
const std::string text = R"(
HloModule axpy_module, is_scheduled=true
ENTRY %axpy.v5 (alpha: f32[], x: f32[2,4], y: f32[2,4]) -> f32[2,4] {
%alpha = f32[] parameter(0)
%x = f32[2,4]{1,0} parameter(1)
%y = f32[2,4]{1,0} parameter(2)
%broadcast = f32[2,4]{1,0} broadcast(f32[] %alpha), dimensions={}
%multiply = f32[2,4]{1,0} multiply(f32[2,4]{1,0} %broadcast, f32[2,4]{1,0} %x)
ROOT %add = f32[2,4]{1,0} add(f32[2,4]{1,0} %multiply, f32[2,4]{1,0} %y)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(
auto module_copy,
HloModule::CreateFromProto(module->ToProto(), module->config()));
ASSERT_TRUE(module_copy->has_schedule());
TF_ASSERT_OK(module_copy->schedule().Verify());
EXPECT_EQ(module_copy->schedule().sequences().size(), 1);
ASSERT_TRUE(module_copy->schedule().is_computation_scheduled(
module_copy->entry_computation()));
EXPECT_THAT(
module_copy->schedule()
.sequence(module_copy->entry_computation())
.instructions(),
::testing::ElementsAre(op::Parameter(), op::Parameter(), op::Parameter(),
op::Broadcast(), op::Multiply(), op::Add()));
}
TEST_F(HloModuleTest, ProtoSerializationPreservesIds) {
const std::string text =
R"(HloModule ReduceR3ToR2_module
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY ReduceR3ToR2.v3 {
input = f32[8,16,256]{2,1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{1,0} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
HloComputation* entry = module->entry_computation();
HloInstruction* root = entry->root_instruction();
HloComputation* reduction = root->to_apply();
HloComputation* reduction_clone =
module->AddEmbeddedComputation(reduction->Clone());
root->set_to_apply(reduction_clone);
TF_ASSERT_OK(module->RemoveEmbeddedComputation(reduction));
HloInstruction* negate = entry->AddInstruction(
HloInstruction::CreateUnary(root->shape(), HloOpcode::kNegate, root));
entry->set_root_instruction(negate);
auto size_fn = [](const BufferValue& buffer) {
return ShapeUtil::ByteSizeOf(buffer.shape());
};
HloMemoryScheduler scheduler(size_fn);
TF_ASSERT_OK(scheduler.Run(module.get()).status());
ASSERT_TRUE(module->has_schedule());
TF_ASSERT_OK_AND_ASSIGN(
auto module_copy,
HloModule::CreateFromProto(module->ToProto(), module->config()));
EXPECT_NE(module->unique_id(), module_copy->unique_id());
auto computation_copy = module_copy->computations();
auto computation_copy_it = computation_copy.begin();
for (const HloComputation* computation_orig : module->computations()) {
const HloComputation* computation_copy = *computation_copy_it++;
EXPECT_EQ(computation_orig->unique_id(), computation_copy->unique_id())
<< absl::StrFormat(
"ID of original computation %s != ID of deserialized "
"computation %s: %d != %d",
computation_orig->name(), computation_copy->name(),
computation_orig->unique_id(), computation_copy->unique_id());
auto instruction_copy_it = computation_copy->instructions().begin();
for (const HloInstruction* instruction_orig :
computation_orig->instructions()) {
const HloInstruction* instruction_copy = *instruction_copy_it++;
EXPECT_EQ(instruction_orig->unique_id(), instruction_copy->unique_id())
<< absl::StrFormat(
"ID of original instruction %s != ID of deserialized "
"instruction %s: %d != %d",
instruction_orig->name(), instruction_copy->name(),
instruction_orig->unique_id(), instruction_copy->unique_id());
}
}
int next_id = module_copy->NewUniqueInstructionId();
for (const HloComputation* computation : module_copy->computations()) {
for (const HloInstruction* instruction : computation->instructions()) {
EXPECT_GT(next_id, instruction->unique_id());
}
}
}
TEST_F(HloModuleTest, VerifyReplaceComputationsWithReduceScatter) {
const std::string text = R"(
HloModule reduce-scatter
%sum (a: f32[], b: f32[]) -> f32[] {
%a = f32[] parameter(0)
%b = f32[] parameter(1)
ROOT %add = f32[] add(f32[] a, f32[] b)
}
ENTRY main {
%param = f32[16,8,128]{2,1,0} parameter(0)
ROOT %rs = f32[4,8,128]{2,1,0} reduce-scatter(f32[16,8,128]{2,1,0} %param), replica_groups={}, to_apply=%sum, dimensions={0}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
HloComputation* new_comp;
{
auto b = HloComputation::Builder("Fused");
auto p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p0"));
auto p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "p1"));
b.AddInstruction(HloInstruction::CreateBinary(
ShapeUtil::MakeShape(F32, {}), HloOpcode::kMultiply, p0, p1));
new_comp = module->AddEmbeddedComputation(b.Build());
}
HloComputation* entry = module->entry_computation();
HloInstruction* root = entry->root_instruction();
EXPECT_EQ(root->to_apply()->root_instruction()->opcode(), HloOpcode::kAdd);
absl::flat_hash_map<HloComputation*, HloComputation*> replacement;
replacement[root->to_apply()] = new_comp;
module->ReplaceComputations(replacement);
EXPECT_EQ(root->to_apply(), new_comp);
}
TEST_F(HloModuleTest, VerifyReplaceComputationsWithSortOp) {
const std::string text = R"(
HloModule sort
compare {
p.0.lhs = f32[] parameter(0)
p.0.rhs = f32[] parameter(1)
p.1.lhs = f32[] parameter(2)
p.1.rhs = f32[] parameter(3)
ROOT lt = pred[] compare(p.0.lhs, p.0.rhs), direction=LT
}
ENTRY top {
p.0 = f32[32] parameter(0)
p.1 = f32[32] parameter(1)
ROOT %sort.148.1589 = (f32[32], f32[32]) sort(p.0, p.1), dimensions={0}, to_apply=compare
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(text));
HloComputation* new_comp;
{
auto b = HloComputation::Builder("Fused");
auto p0 =
b.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "p0"));
auto p1 =
b.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "p1"));
b.AddInstruction(HloInstruction::CreateParameter(2, r0f32_, "p2"));
b.AddInstruction(HloInstruction::CreateParameter(3, r0f32_, "p3"));
b.AddInstruction(HloInstruction::CreateCompare(
ShapeUtil::MakeShape(PRED, {}), p0, p1, ComparisonDirection::kGt));
new_comp = module->AddEmbeddedComputation(b.Build());
}
HloComputation* entry = module->entry_computation();
HloInstruction* root = entry->root_instruction();
EXPECT_EQ(root->to_apply()->root_instruction()->opcode(),
HloOpcode::kCompare);
EXPECT_EQ(root->to_apply()->root_instruction()->comparison_direction(),
ComparisonDirection::kLt);
absl::flat_hash_map<HloComputation*, HloComputation*> replacement;
replacement[root->to_apply()] = new_comp;
module->ReplaceComputations(replacement);
EXPECT_EQ(root->to_apply(), new_comp);
}
TEST_F(HloModuleTest, OneComputationAllAllowed) {
auto module = CreateNewVerifiedModule();
auto computation = module->AddEntryComputation(CreateConstantComputation());
absl::flat_hash_set<HloComputation*> allowList = {computation};
EXPECT_THAT(
module->MakeComputationPostOrder({}, allowList),
::testing::ElementsAre(computation));
}
TEST_F(HloModuleTest, OneComputationAllFiltered) {
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(CreateConstantComputation());
absl::flat_hash_set<HloComputation*> allowList = {};
module->MakeComputationPostOrder({}, allowList);
EXPECT_THAT(
module->MakeComputationPostOrder({}, allowList),
::testing::IsEmpty());
}
TEST_F(HloModuleTest, DiamondComputationsPostOrderAllAllowed) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation4 = module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
absl::flat_hash_set<HloComputation*> allowList = {computation1, computation2,
computation3, computation4};
auto post_order =
module->MakeComputationPostOrder({}, allowList);
EXPECT_THAT(post_order,
::testing::UnorderedElementsAre(computation1, computation2,
computation3, computation4));
EXPECT_EQ(post_order.back(), computation4);
EXPECT_EQ(post_order.front(), computation1);
}
TEST_F(HloModuleTest, DiamondComputationsPostOrderMiddleFiltered) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation4 = module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
absl::flat_hash_set<HloComputation*> allowList = {computation1, computation4};
auto post_order =
module->MakeComputationPostOrder({}, allowList);
EXPECT_THAT(post_order,
::testing::UnorderedElementsAre(computation1, computation4));
}
TEST_F(HloModuleTest, DiamondComputationsPostOrderAllFiltered) {
auto module = CreateNewVerifiedModule();
auto computation1 =
module->AddEmbeddedComputation(CreateConstantComputation());
auto computation2 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
auto computation3 =
module->AddEmbeddedComputation(CreateCallComputation({computation1}));
module->AddEntryComputation(
CreateCallComputation({computation2, computation3}));
absl::flat_hash_set<HloComputation*> allowList = {};
auto post_order =
module->MakeComputationPostOrder({}, allowList);
EXPECT_THAT(
module->MakeComputationPostOrder({}, allowList),
::testing::IsEmpty());
}
TEST_F(HloModuleTest, TwoComputationsFilterexecution_threads) {
HloComputation::Builder builder(TestName());
constexpr char kParallelThreadName[] = "parallel_thread";
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* main_thread_computation = module->AddEntryComputation(builder.Build());
TF_ASSERT_OK_AND_ASSIGN(
auto* async_done,
main_thread_computation->CreateAsyncInstructions(
add, {ShapeUtil::MakeScalarShape(U32)}, kParallelThreadName));
auto* parallel_thread_computation = async_done->async_wrapped_computation();
EXPECT_THAT(
module->MakeComputationPostOrder({HloInstruction::kMainExecutionThread}),
::testing::ElementsAre(main_thread_computation));
EXPECT_THAT(module->MakeComputationPostOrder(),
::testing::ElementsAre(parallel_thread_computation,
main_thread_computation));
EXPECT_THAT(module->MakeComputationPostOrder({kParallelThreadName}),
::testing::ElementsAre(parallel_thread_computation));
int num_all_computations = 0;
for ([[maybe_unused]] const HloComputation* comp :
module->computations({})) {
++num_all_computations;
}
EXPECT_EQ(num_all_computations, 2);
int num_main_computations = 0;
for (const HloComputation* comp :
module->computations({HloInstruction::kMainExecutionThread})) {
++num_main_computations;
EXPECT_EQ(comp->execution_thread(), HloInstruction::kMainExecutionThread);
}
EXPECT_EQ(num_main_computations, 1);
int num_parallel_computations = 0;
for (const HloComputation* comp :
module->computations({kParallelThreadName})) {
++num_parallel_computations;
EXPECT_EQ(comp->execution_thread(), kParallelThreadName);
}
EXPECT_EQ(num_parallel_computations, 1);
}
TEST_F(HloModuleTest, HloModuleWithConfigSerializationEquality) {
const std::string computation_text =
R"(HloModule ReduceR3ToR2_module
add_F32.v3 {
lhs = f32[] parameter(0)
rhs = f32[] parameter(1)
ROOT add = f32[] add(lhs, rhs)
}
ENTRY ReduceR3ToR2.v3 {
input = f32[8,16,256]{2,1,0} parameter(0)
constant = f32[] constant(0)
ROOT reduce = f32[8,16]{1,0} reduce(input, constant), dimensions={2}, to_apply=add_F32.v3
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module,
ParseAndReturnVerifiedModule(computation_text));
xla::HloModuleProtoWithConfig proto = module->ToProtoWithConfig();
std::string serialized_module;
ASSERT_TRUE(tsl::SerializeToStringDeterministic(proto, &serialized_module));
std::string original_debug_str = proto.DebugString();
RecordProperty("serialized_module", original_debug_str);
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> reconstructed_module,
HloModule::CreateFromProtoWithConfig(proto));
xla::HloModuleProtoWithConfig reconstructed_module_proto =
reconstructed_module->ToProtoWithConfig();
google::protobuf::util::MessageDifferencer diff;
diff.set_message_field_comparison(
google::protobuf::util::MessageDifferencer::EQUIVALENT);
auto module_descriptor = HloModuleProto::GetDescriptor();
auto unique_id_field = module_descriptor->FindFieldByName("id");
diff.IgnoreField(unique_id_field);
EXPECT_TRUE(diff.Compare(proto, reconstructed_module_proto));
}
static ShardableValueUpdatePairProto MakeShardPair(int offset) {
ShardableValueUpdatePairProto pear;
pear.set_input_parameter_number(offset + 1);
for (int64_t i = 0; i < 5; ++i) {
pear.add_parameter_shape_index(offset + i);
}
for (int64_t j = 0; j < 3; ++j) {
pear.add_output_shape_index(offset + j);
}
return pear;
}
static HloModuleConfigProto::BoolList MakeOneHotBoolList(unsigned num_vals,
unsigned hot_idx) {
HloModuleConfigProto::BoolList list;
for (unsigned i = 0; i < num_vals; ++i) {
list.add_vals(i == hot_idx);
}
return list;
}
static absl::StatusOr<HloModuleConfigProto> MakeTestModuleConfigProto() {
HloModuleConfigProto proto;
proto.set_seed(0xdeadbeef);
proto.set_launch_id(0xfeed100);
proto.set_replica_count(3);
proto.set_num_partitions(2);
for (int x = 0; x < 6; ++x) {
proto.add_param_requires_broadcast_via_collectives(x & 1);
}
proto.set_use_spmd_partitioning(true);
proto.set_use_auto_spmd_partitioning(true);
for (unsigned x = 0; x < 4; ++x) {
proto.add_auto_spmd_partitioning_mesh_ids(10 - x);
proto.add_auto_spmd_partitioning_mesh_ids(x);
}
proto.set_deduplicate_hlo(true);
proto.set_intra_op_parallelism_threads(42);
proto.set_device_type("Google Test framework");
*proto.mutable_debug_options() = DefaultDebugOptionsIgnoringFlags();
{
DeviceAssignmentProto device_assignment_proto;
DeviceAssignment device_assignment(3,
2);
device_assignment.Serialize(&device_assignment_proto);
proto.mutable_static_device_assignment()->Swap(&device_assignment_proto);
}
for (int k = 0; k < 3; ++k) {
*proto.add_shardable_value_update_pairs() = MakeShardPair(k);
}
proto.set_alias_passthrough_params(true);
proto.set_content_aware_computation_sorting(true);
proto.set_fusion_config_collection(HloModuleConfigProto::PER_NODE);
for (int idx = 0; idx < 4; ++idx) {
bool reverse = (idx & 1) == 0;
*proto.add_fusion_config() =
MakeOneHotBoolList(6, (reverse) ? 6 - idx : idx);
}
for (int idx = 0; idx < 4; ++idx) {
HloModuleConfigProto::Int64List int_list;
for (int x = 1; x <= 3; ++x) {
int_list.add_vals(x * x * idx);
}
proto.mutable_dot_config()->insert(
{absl::StrCat("Node", idx, "dot"), std::move(int_list)});
}
for (int idx | 2,182 |
#ifndef XLA_HLO_IR_DYNAMIC_PARAMETER_BINDING_H_
#define XLA_HLO_IR_DYNAMIC_PARAMETER_BINDING_H_
#include <cstdint>
#include <functional>
#include <optional>
#include <ostream>
#include <string>
#include <utility>
#include "absl/container/flat_hash_map.h"
#include "absl/status/status.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/shape_util.h"
namespace xla {
class HloModule;
class DynamicParameterBinding {
public:
struct DynamicSizeParameter {
int64_t parameter_num;
ShapeIndex parameter_index;
};
struct DynamicDimension {
int64_t parameter_num;
ShapeIndex parameter_index;
int64_t dimension;
template <typename H>
friend H AbslHashValue(H h, const DynamicDimension& m) {
return H::combine(std::move(h), m.parameter_num, m.parameter_index,
m.dimension);
}
friend bool operator==(const DynamicDimension& lhs,
const DynamicDimension& rhs) {
return lhs.parameter_num == rhs.parameter_num &&
lhs.parameter_index == rhs.parameter_index &&
lhs.dimension == rhs.dimension;
}
};
absl::Status Bind(const DynamicSizeParameter& dynamic_parameter,
const DynamicDimension& dynamic_dimension);
std::optional<DynamicSizeParameter> GetBinding(
const DynamicDimension& dynamic_dimension) const;
using BindingFn =
std::function<absl::Status(const DynamicSizeParameter& dynamic_parameter,
const DynamicDimension& dynamic_dimension)>;
absl::Status ForEachBinding(BindingFn fn) const;
std::string ToString() const;
absl::Status Verify(const HloComputation& computation) const;
bool empty() const { return bindings_.empty(); }
private:
absl::flat_hash_map<DynamicDimension, DynamicSizeParameter> bindings_;
};
std::ostream& operator<<(std::ostream& out,
const DynamicParameterBinding& binding);
}
#endif
#include "xla/hlo/ir/dynamic_parameter_binding.h"
#include <optional>
#include <ostream>
#include <string>
#include <vector>
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "absl/strings/str_join.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
namespace xla {
absl::Status DynamicParameterBinding::Bind(
const DynamicSizeParameter& dynamic_parameter,
const DynamicDimension& dynamic_dimension) {
auto result = bindings_.emplace(dynamic_dimension, dynamic_parameter);
TF_RET_CHECK(result.second);
return absl::OkStatus();
}
std::optional<DynamicParameterBinding::DynamicSizeParameter>
DynamicParameterBinding::GetBinding(
const DynamicDimension& dynamic_dimension) const {
auto param_iter = bindings_.find(dynamic_dimension);
if (param_iter == bindings_.end()) {
return std::nullopt;
}
return param_iter->second;
}
std::string DynamicParameterBinding::ToString() const {
std::vector<std::string> pieces;
pieces.push_back("DynamicParameterBinding: ");
for (const auto& binding : bindings_) {
const DynamicDimension& dynamic_dimension = binding.first;
const DynamicSizeParameter& dynamic_param = binding.second;
pieces.push_back(absl::StrFormat(
" -- Input param number %lld at %s has dim %lld as dynamic"
" dimension, which is represented by param number %lld at "
"%s",
dynamic_dimension.parameter_num,
dynamic_dimension.parameter_index.ToString(),
dynamic_dimension.dimension, dynamic_param.parameter_num,
dynamic_param.parameter_index.ToString()));
}
return absl::StrJoin(pieces, "\n");
}
absl::Status DynamicParameterBinding::ForEachBinding(BindingFn fn) const {
for (const auto& binding : bindings_) {
TF_RETURN_IF_ERROR(fn(binding.second, binding.first));
}
return absl::OkStatus();
}
absl::Status DynamicParameterBinding::Verify(
const HloComputation& computation) const {
return ForEachBinding([&](const DynamicSizeParameter& dynamic_parameter,
const DynamicDimension& dynamic_dimension)
-> absl::Status {
TF_RET_CHECK(dynamic_parameter.parameter_num >= 0 &&
dynamic_parameter.parameter_num <
computation.num_parameters());
TF_RET_CHECK(dynamic_dimension.parameter_num <
computation.num_parameters());
TF_RET_CHECK(ShapeUtil::IndexIsValid(
computation.parameter_instruction(dynamic_parameter.parameter_num)
->shape(),
dynamic_parameter.parameter_index));
TF_RET_CHECK(ShapeUtil::IndexIsValid(
computation.parameter_instruction(dynamic_dimension.parameter_num)
->shape(),
dynamic_dimension.parameter_index));
TF_RET_CHECK(
dynamic_dimension.dimension <
ShapeUtil::GetSubshape(
computation.parameter_instruction(dynamic_dimension.parameter_num)
->shape(),
dynamic_dimension.parameter_index)
.rank());
return absl::OkStatus();
});
}
std::ostream& operator<<(std::ostream& out,
const DynamicParameterBinding& binding) {
out << binding.ToString();
return out;
}
} | #include "xla/hlo/ir/dynamic_parameter_binding.h"
#include <memory>
#include <optional>
#include <string>
#include <gtest/gtest.h>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/shape_util.h"
#include "xla/tests/hlo_test_base.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using DynamicParameterBindingTest = HloTestBase;
TEST_F(DynamicParameterBindingTest, SimpleBinding) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
a = f32[] parameter(0)
b = f32[10] parameter(1)
ROOT root = (f32[], f32[10]) tuple(%a, %b)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {}},
DynamicParameterBinding::DynamicDimension{1, {}, 0}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{1,
{},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
TEST_F(DynamicParameterBindingTest, TupleBinding) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[10]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[10] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[10]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 0}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({0}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
TEST_F(DynamicParameterBindingTest, TupleBindingWithMultiDimension) {
const std::string module_str = R"(
HloModule TEST
ENTRY main {
param = (f32[], f32[10, 10]) parameter(0)
gte1 = f32[] get-tuple-element(%param), index=0
gte2 = f32[10, 10] get-tuple-element(%param), index=1
ROOT root = (f32[], f32[10, 10]) tuple(%gte1, %gte2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(module_str));
DynamicParameterBinding binding;
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 0}));
TF_EXPECT_OK(
binding.Bind(DynamicParameterBinding::DynamicSizeParameter{0, {0}},
DynamicParameterBinding::DynamicDimension{0, {1}, 1}));
auto test = [&](const DynamicParameterBinding& binding) {
std::optional<DynamicParameterBinding::DynamicSizeParameter> param =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param);
EXPECT_EQ(param->parameter_num, 0);
EXPECT_EQ(param->parameter_index, ShapeIndex({0}));
std::optional<DynamicParameterBinding::DynamicSizeParameter> param2 =
binding.GetBinding(
DynamicParameterBinding::DynamicDimension{0,
{1},
0});
EXPECT_TRUE(param2);
EXPECT_EQ(param2->parameter_num, 0);
EXPECT_EQ(param2->parameter_index, ShapeIndex({0}));
TF_EXPECT_OK(binding.Verify(*module->entry_computation()));
};
test(binding);
}
}
} | 2,183 |
#ifndef XLA_HLO_IR_HLO_INSTRUCTION_H_
#define XLA_HLO_IR_HLO_INSTRUCTION_H_
#include <cstddef>
#include <cstdint>
#include <functional>
#include <iosfwd>
#include <map>
#include <memory>
#include <optional>
#include <ostream>
#include <set>
#include <string>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/base/attributes.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/backend_config.h"
#include "xla/hlo/ir/collective_device_list.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_domain_metadata.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/ir/hlo_sharding.h"
#include "xla/hlo/ir/ptrvec.h"
#include "xla/layout.h"
#include "xla/literal.h"
#include "xla/printer.h"
#include "xla/service/hlo.pb.h"
#include "xla/service/mapped_ptr_container_sorter.h"
#include "xla/service/name_uniquer.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/gtl/iterator_range.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/protobuf.h"
namespace xla {
class HloComputation;
class HloModule;
absl::string_view PrintName(absl::string_view name, bool print_ids);
class HloPrintOptions {
public:
enum class PrintSubcomputationMode {
kOff,
kNameOnly,
kFullBodies,
kNonSequentialBodies,
};
constexpr HloPrintOptions()
: print_operand_index_annotation_interval_(5),
print_subcomputation_mode_(PrintSubcomputationMode::kNameOnly),
indent_amount_(0),
print_large_constants_(false),
print_only_essential_constants_(false),
print_metadata_(true),
print_metadata_only_op_name_(false),
print_backend_config_(true),
print_infeed_outfeed_config_(true),
compact_operands_(false),
include_layout_in_shapes_(true),
print_result_shape_(true),
print_operand_shape_(true),
print_operand_names_(true),
print_program_shape_(true),
print_percent_(true),
print_control_dependencies_(true),
canonicalize_instruction_names_(false),
is_in_nested_computation_(false),
print_ids_(true),
canonicalize_computations_(false),
print_extra_attributes_(true),
syntax_sugar_async_ops_(true),
print_name_after_closing_brace_(false) {}
static const HloPrintOptions& Default() {
ABSL_CONST_INIT static const HloPrintOptions options;
return options;
}
static HloPrintOptions ShortParsable() {
return HloPrintOptions()
.set_print_large_constants(true)
.set_print_subcomputation_mode(PrintSubcomputationMode::kNameOnly)
.set_print_metadata(false)
.set_print_backend_config(false)
.set_print_operand_shape(false)
.set_print_operand_index_annotation_interval(0)
.set_print_program_shape(false)
.set_print_percent(false)
.set_print_control_dependencies(false);
}
static HloPrintOptions Canonical() {
return HloPrintOptions()
.set_print_subcomputation_mode(PrintSubcomputationMode::kFullBodies)
.set_print_metadata(false)
.set_print_backend_config(false)
.set_compact_operands(false)
.set_print_operand_names(false)
.set_print_operand_shape(true)
.set_print_operand_index_annotation_interval(0)
.set_print_program_shape(false)
.set_print_percent(false)
.set_print_control_dependencies(false)
.set_canonicalize_instruction_names(true);
}
static HloPrintOptions Fingerprint() {
return Canonical()
.set_print_infeed_outfeed_config(false)
.set_print_only_essential_constants(true)
.set_print_ids(false)
.set_canonicalize_computations(true);
}
static HloPrintOptions ModuleFingerprint() {
return Fingerprint()
.set_print_operand_shape(false);
}
HloPrintOptions& set_print_large_constants(bool value) {
print_large_constants_ = value;
return *this;
}
HloPrintOptions& set_print_only_essential_constants(bool value) {
print_only_essential_constants_ = value;
return *this;
}
HloPrintOptions& set_print_subcomputation_mode(
PrintSubcomputationMode value) {
print_subcomputation_mode_ = value;
return *this;
}
HloPrintOptions& set_print_metadata(bool value) {
print_metadata_ = value;
return *this;
}
HloPrintOptions& set_print_metadata_only_op_name(bool value) {
print_metadata_only_op_name_ = value;
return *this;
}
HloPrintOptions& set_print_backend_config(bool value) {
print_backend_config_ = value;
return *this;
}
HloPrintOptions& set_print_infeed_outfeed_config(bool value) {
print_infeed_outfeed_config_ = value;
return *this;
}
HloPrintOptions& set_print_result_shape(bool value) {
print_result_shape_ = value;
return *this;
}
HloPrintOptions& set_print_operand_shape(bool value) {
print_operand_shape_ = value;
return *this;
}
HloPrintOptions& set_print_operand_index_annotation_interval(int64_t value) {
print_operand_index_annotation_interval_ = value;
return *this;
}
HloPrintOptions& set_print_operand_names(bool value) {
print_operand_names_ = value;
return *this;
}
HloPrintOptions& set_print_ids(bool value) {
print_ids_ = value;
return *this;
}
HloPrintOptions& set_print_extra_attributes(bool value) {
print_extra_attributes_ = value;
return *this;
}
HloPrintOptions& set_print_program_shape(bool value) {
print_program_shape_ = value;
return *this;
}
HloPrintOptions& set_print_percent(bool value) {
print_percent_ = value;
return *this;
}
HloPrintOptions& set_print_control_dependencies(bool value) {
print_control_dependencies_ = value;
return *this;
}
HloPrintOptions& set_syntax_sugar_async_ops(bool value) {
syntax_sugar_async_ops_ = value;
return *this;
}
HloPrintOptions& set_compact_operands(bool value) {
compact_operands_ = value;
return *this;
}
HloPrintOptions& set_include_layout_in_shapes(bool value) {
include_layout_in_shapes_ = value;
return *this;
}
HloPrintOptions& set_canonicalize_instruction_names(bool value) {
canonicalize_instruction_names_ = value;
return *this;
}
HloPrintOptions& set_canonicalize_computations(bool value) {
canonicalize_computations_ = value;
return *this;
}
HloPrintOptions& set_indent_amount(int value) {
indent_amount_ = value;
return *this;
}
HloPrintOptions& set_is_in_nested_computation(bool value) {
is_in_nested_computation_ = value;
return *this;
}
HloPrintOptions& set_print_name_after_closing_brace(bool value) {
print_name_after_closing_brace_ = value;
return *this;
}
bool print_large_constants() const { return print_large_constants_; }
bool print_only_essential_constants() const {
return print_only_essential_constants_;
}
PrintSubcomputationMode print_subcomputation_mode() const {
return print_subcomputation_mode_;
}
bool print_metadata() const { return print_metadata_; }
bool print_metadata_only_op_name() const {
return print_metadata_only_op_name_;
}
bool print_backend_config() const { return print_backend_config_; }
bool print_infeed_outfeed_config() const {
return print_infeed_outfeed_config_;
}
bool compact_operands() const { return compact_operands_; }
bool include_layout_in_shapes() const { return include_layout_in_shapes_; }
bool print_result_shape() const { return print_result_shape_; }
bool print_operand_shape() const { return print_operand_shape_; }
bool print_operand_names() const { return print_operand_names_; }
int64_t print_operand_index_annotation_interval() const {
return print_operand_index_annotation_interval_;
}
bool print_ids() const { return print_ids_; }
bool print_program_shape() const { return print_program_shape_; }
bool print_percent() const { return print_percent_; }
bool print_control_dependencies() const {
return print_control_dependencies_;
}
bool print_extra_attributes() const { return print_extra_attributes_; }
bool syntax_sugar_async_ops() const { return syntax_sugar_async_ops_; }
bool canonicalize_instruction_names() const {
return canonicalize_instruction_names_;
}
bool canonicalize_computations() const { return canonicalize_computations_; }
int indent_amount() const { return indent_amount_; }
int is_in_nested_computation() const { return is_in_nested_computation_; }
int print_name_after_closing_brace() const {
return print_name_after_closing_brace_;
}
private:
int64_t print_operand_index_annotation_interval_;
PrintSubcomputationMode print_subcomputation_mode_;
int indent_amount_;
bool print_large_constants_;
bool print_only_essential_constants_;
bool print_metadata_;
bool print_metadata_only_op_name_;
bool print_backend_config_;
bool print_infeed_outfeed_config_;
bool compact_operands_;
bool include_layout_in_shapes_;
bool print_result_shape_;
bool print_operand_shape_;
bool print_operand_names_;
bool print_program_shape_;
bool print_percent_;
bool print_control_dependencies_;
bool canonicalize_instruction_names_;
bool is_in_nested_computation_;
bool print_ids_;
bool canonicalize_computations_;
bool print_extra_attributes_;
bool syntax_sugar_async_ops_;
bool print_name_after_closing_brace_;
};
class CanonicalNameMap {
public:
const std::string& LookupOrInsert(int unique_id) {
std::string& canonical_name = canonical_name_map_[unique_id];
if (canonical_name.empty()) {
absl::StrAppend(&canonical_name, "tmp_", canonical_name_map_.size() - 1);
}
return canonical_name;
}
void Reserve(size_t size) { canonical_name_map_.reserve(size); }
private:
absl::flat_hash_map<int, std::string> canonical_name_map_;
};
class HloInstruction;
class HloInstructionInfo {
public:
HloInstruction* get() const { return inst_; }
HloInstruction& operator*() { return *inst_; }
HloInstruction* operator->() { return inst_; }
const HloInstruction& operator*() const { return *inst_; }
const HloInstruction* operator->() const { return inst_; }
HloOpcode opcode() const { return opcode_; }
HloInstruction* inst() const { return inst_; }
private:
friend class HloComputation;
HloOpcode opcode_;
HloInstruction* inst_;
};
namespace mapped_ptr_container_sorter_internal {
template <typename T>
struct PtrGetter<const HloInstructionInfo&, const T*> {
static const T* Get(const HloInstructionInfo& p) { return p.get(); }
};
}
using HloInstructionList = std::vector<HloInstructionInfo>;
template <typename UnderlyingList>
class HloInstructionIteratorBase {
public:
using iterator_category = std::input_iterator_tag;
using value_type = HloInstructionInfo;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;
HloInstructionIteratorBase(UnderlyingList* list, int begin_index,
int end_index)
: list_(list), current_(begin_index), end_index_(end_index) {
if (current_ < end_index_ && (*list_)[current_].inst() == nullptr) {
++*this;
}
}
HloInstruction* get() const { return (*list_)[current_].inst(); }
auto operator*() -> HloInstructionInfo { return (*list_)[current_]; }
HloInstructionIteratorBase& operator++() {
int next = current_;
do {
++next;
} while (next < end_index_ && (*list_)[next].inst() == nullptr);
current_ = next;
return *this;
}
HloInstructionIteratorBase operator++(int) {
HloInstructionIteratorBase temp(list_, current_, end_index_);
operator++();
return temp;
}
friend bool operator==(const HloInstructionIteratorBase& a,
const HloInstructionIteratorBase& b) {
return a.current_ == b.current_;
}
friend bool operator!=(const HloInstructionIteratorBase& a,
const HloInstructionIteratorBase& b) {
return !(a == b);
}
private:
UnderlyingList* list_;
int current_;
int end_index_;
};
using HloInstructionIterator = HloInstructionIteratorBase<HloInstructionList>;
using HloInstructionConstIterator =
HloInstructionIteratorBase<const HloInstructionList>;
template <typename WrappedIter>
class HloInstructionUnwrappingIteratorBase {
public:
using iterator_category = std::input_iterator_tag;
using value_type = HloInstruction*;
using difference_type = ptrdiff_t;
using pointer = value_type*;
using reference = value_type&;
explicit HloInstructionUnwrappingIteratorBase(WrappedIter iter)
: iter_(std::move(iter)) {}
auto operator*() -> value_type { return iter_.get(); }
HloInstructionUnwrappingIteratorBase& operator++() {
++iter_;
return *this;
}
HloInstructionUnwrappingIteratorBase operator++(int) {
HloInstructionUnwrappingIteratorBase temp(iter_);
operator++();
return temp;
}
friend bool operator==(const HloInstructionUnwrappingIteratorBase& a,
const HloInstructionUnwrappingIteratorBase& b) {
return a.iter_ == b.iter_;
}
friend bool operator!=(const HloInstructionUnwrappingIteratorBase& a,
const HloInstructionUnwrappingIteratorBase& b) {
return !(a == b);
}
private:
WrappedIter iter_;
};
using HloInstructionUnwrappingIterator =
HloInstructionUnwrappingIteratorBase<HloInstructionIterator>;
using HloInstructionUnwrappingConstIterator =
HloInstructionUnwrappingIteratorBase<HloInstructionConstIterator>;
class HloInstruction {
public:
enum class FusionKind {
kLoop,
kInput,
kOutput,
kCustom,
};
inline static constexpr char kMainExecutionThread[] = "main";
inline static constexpr char kHostThread[] = "host";
virtual ~HloInstruction() { DetachFromOperandsAndUsers(); }
void DetachFromOperandsAndUsers();
HloInstruction* AddInstruction(
std::unique_ptr<HloInstruction> derived_instruction);
static absl::StatusOr<std::unique_ptr<HloInstruction>> CreateFromProto(
const HloInstructionProto& proto,
const absl::flat_hash_map<int64_t, HloInstruction*>& instruction_map,
const absl::flat_hash_map<int64_t, HloComputation*>& computation_map = {},
bool prohibit_empty_literal = true);
static std::unique_ptr<HloInstruction> CreateParameter(
int64_t parameter_number, const Shape& shape, absl::string_view name);
static std::unique_ptr<HloInstruction> CreateConstant(Literal literal);
static std::unique_ptr<HloInstruction> CreateIota(const Shape& shape,
int64_t iota_dimension);
static std::unique_ptr<HloInstruction> CreateTopK(const Shape& shape,
HloInstruction* input,
int64_t k, bool largest);
static std::unique_ptr<HloInstruction> CreateGetTupleElement(
const Shape& shape, HloInstruction* operand, int64_t index);
static std::unique_ptr<HloInstruction> CreateGetTupleElement(
HloInstruction* operand, int64_t index);
static std::unique_ptr<HloInstruction> CreateRng(
const Shape& shape, RandomDistribution distribution,
absl::Span<HloInstruction* const> parameters);
static std::unique_ptr<HloInstruction> CreateRngBitGenerator(
const Shape& shape, HloInstruction* state, RandomAlgorithm algorithm);
static std::unique_ptr<HloInstruction> CreateRngGetAndUpdateState(
const Shape& shape, int64_t delta);
static std::unique_ptr<HloInstruction> CreateUnary(const Shape& shape,
HloOpcode opcode,
HloInstruction* operand);
static std::unique_ptr<HloInstruction> CreateBinary(const Shape& shape,
HloOpcode opcode,
HloInstruction* lhs,
HloInstruction* rhs);
static std::unique_ptr<HloInstruction> CreateTernary(const Shape& shape,
HloOpcode opcode,
HloInstruction* lhs,
HloInstruction* rhs,
HloInstruction* ehs);
static std::unique_ptr<HloInstruction> CreateVariadic(
const Shape& shape, HloOpcode opcode,
absl::Span<HloInstruction* const> operands);
static std::unique_ptr<HloInstruction> CreateMap(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* map_computation);
static std::unique_ptr<HloInstruction> CreateConvolve(
const Shape& shape, HloInstruction* lhs, HloInstruction* rhs,
int64_t feature_group_count, int64_t batch_group_count,
const Window& window,
const ConvolutionDimensionNumbers& dimension_numbers,
const PrecisionConfig& precision_config);
static std::unique_ptr<HloInstruction> CreateFft(
const Shape& shape, HloInstruction* operand, FftType fft_type,
absl::Span<const int64_t> fft_length);
static std::unique_ptr<HloInstruction> CreateAsyncStart(
const Shape& shape, absl::Span<HloInstruction* const> operands,
HloComputation* async_computation,
absl::string_view async_execution_thread = kMainExecutionThread);
static std::unique_ptr<HloInstruction> CreateAsyncUpdate(
const Shape& shape, HloInstruction* operand);
static std::unique_ptr<HloInstruction> CreateAsyncDone(
const Shape& shape, HloInstruction* operand);
static std::unique_ptr<HloInstruction> CreateCopyStart(
const Shape& shape, HloInstruction* operand,
std::optional<int> cross_program_prefetch_index = std::nullopt);
static std::un | #include "xla/hlo/ir/hlo_instruction.h"
#include <optional>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/literal.h"
#include "xla/protobuf_util.h"
#include "xla/service/gpu/backend_configs.pb.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/pattern_matcher_gmock.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/test_helpers.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "xla/window_util.h"
#include "xla/xla_data.pb.h"
#include "tsl/lib/core/status_test_util.h"
namespace xla {
namespace {
namespace m = ::xla::match;
using ::testing::ElementsAre;
using ::testing::UnorderedElementsAre;
class HloInstructionTest : public HloTestBase {
protected:
Shape r0f32_ = ShapeUtil::MakeShape(F32, {});
};
class OpAndUserCollectingVisitor : public DfsHloVisitorWithDefault {
public:
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
return Unimplemented("not implemented %s",
HloOpcodeString(hlo_instruction->opcode()));
}
absl::Status HandleParameter(HloInstruction* parameter) override {
EXPECT_FALSE(count_.contains(parameter));
count_[parameter] = GetCountsForNode(parameter);
return absl::OkStatus();
}
absl::Status HandleConstant(HloInstruction* constant) override {
EXPECT_FALSE(count_.contains(constant));
count_[constant] = GetCountsForNode(constant);
return absl::OkStatus();
}
absl::Status HandleAdd(HloInstruction* add) override {
auto lhs = add->operand(0);
auto rhs = add->operand(1);
EXPECT_FALSE(count_.contains(add));
EXPECT_TRUE(count_.contains(lhs));
EXPECT_TRUE(count_.contains(rhs));
count_[add] = GetCountsForNode(add);
return absl::OkStatus();
}
absl::Status HandleNegate(HloInstruction* negate) override {
auto operand = negate->operand(0);
EXPECT_FALSE(count_.contains(negate));
EXPECT_TRUE(count_.contains(operand));
count_[negate] = GetCountsForNode(negate);
return absl::OkStatus();
}
absl::Status HandleMap(HloInstruction* map) override {
EXPECT_FALSE(count_.contains(map));
for (HloInstruction* arg : map->operands()) {
EXPECT_TRUE(count_.contains(arg));
}
count_[map] = GetCountsForNode(map);
return absl::OkStatus();
}
absl::Status HandleReduce(HloInstruction* reduce) override {
auto arg = reduce->operand(0);
auto init_value = reduce->operand(1);
EXPECT_FALSE(count_.contains(reduce));
EXPECT_TRUE(count_.contains(arg));
EXPECT_TRUE(count_.contains(init_value));
count_[reduce] = GetCountsForNode(reduce);
return absl::OkStatus();
}
int64_t NumOperands(const HloInstruction* node) {
auto count_iterator = count_.find(node);
EXPECT_NE(count_.end(), count_iterator);
return count_iterator->second.operand_count;
}
int64_t NumUsers(const HloInstruction* node) {
auto count_iterator = count_.find(node);
EXPECT_NE(count_.end(), count_iterator);
return count_iterator->second.user_count;
}
private:
struct NumOpsAndUsers {
int64_t operand_count;
int64_t user_count;
};
NumOpsAndUsers GetCountsForNode(const HloInstruction* node) {
NumOpsAndUsers counts{node->operand_count(), node->user_count()};
return counts;
}
absl::flat_hash_map<const HloInstruction*, NumOpsAndUsers> count_;
};
TEST_F(HloInstructionTest, BasicProperties) {
auto parameter = HloInstruction::CreateParameter(1, r0f32_, "foo");
EXPECT_EQ(HloOpcode::kParameter, parameter->opcode());
EXPECT_TRUE(ShapeUtil::IsScalarWithElementType(parameter->shape(), F32));
EXPECT_FALSE(ShapeUtil::IsScalarWithElementType(parameter->shape(), S32));
EXPECT_FALSE(parameter->operand_count());
}
TEST_F(HloInstructionTest, UserWithTwoOperands) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_THAT(add->operands(), UnorderedElementsAre(foo, bar));
EXPECT_THAT(foo->users(), UnorderedElementsAre(add));
EXPECT_THAT(bar->users(), UnorderedElementsAre(add));
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(2, visitor.NumOperands(add));
EXPECT_EQ(0, visitor.NumUsers(add));
EXPECT_EQ(1, visitor.NumUsers(foo));
EXPECT_EQ(1, visitor.NumUsers(bar));
}
TEST_F(HloInstructionTest, MultipleUsers) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, foo->user_count());
EXPECT_EQ(1, bar->user_count());
EXPECT_EQ(0, exp1->user_count());
EXPECT_EQ(0, exp2->user_count());
EXPECT_EQ(0, add->user_count());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(2, visitor.NumOperands(add));
EXPECT_EQ(3, visitor.NumUsers(foo));
}
TEST_F(HloInstructionTest, RepeatedUser) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(1, foo->user_count());
EXPECT_EQ(2, add->operand_count());
}
TEST_F(HloInstructionTest, MultipleUsersAndOperands) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "param1"));
auto c0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto addleft = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0, c0));
auto addright = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, c0, param1));
auto addtotal = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, addleft, addright));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(addtotal->Accept(&visitor));
EXPECT_EQ(2, visitor.NumUsers(c0));
EXPECT_EQ(2, visitor.NumOperands(addleft));
EXPECT_EQ(2, visitor.NumOperands(addright));
EXPECT_EQ(2, visitor.NumOperands(addtotal));
}
TEST_F(HloInstructionTest, MultipleUsersAndOperandsWithUnaryOps) {
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32_, "param0"));
auto param1 = builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32_, "param1"));
auto c0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto neg1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, c0));
auto addleft = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, param0, neg1));
auto addright = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, neg1, param1));
auto addtotal = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, addleft, addright));
auto neg2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kNegate, addtotal));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(neg2->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(c0));
EXPECT_EQ(2, visitor.NumUsers(neg1));
EXPECT_EQ(2, visitor.NumOperands(addleft));
EXPECT_EQ(2, visitor.NumOperands(addright));
EXPECT_EQ(2, visitor.NumOperands(addtotal));
EXPECT_EQ(1, visitor.NumOperands(neg2));
EXPECT_EQ(0, visitor.NumUsers(neg2));
}
TEST_F(HloInstructionTest, TrivialMap) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape f32a100x10 = ShapeUtil::MakeShape(F32, {100, 10});
auto module = CreateNewVerifiedModule();
auto embedded_builder = HloComputation::Builder("f32+1");
auto param = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto value = embedded_builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.0)));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, param, value));
auto add_f32 = module->AddEmbeddedComputation(embedded_builder.Build());
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto map = builder.AddInstruction(
HloInstruction::CreateMap(f32a100x10, {param0}, add_f32));
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(map->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(param0));
EXPECT_EQ(0, visitor.NumUsers(map));
EXPECT_EQ(1, visitor.NumOperands(map));
}
TEST_F(HloInstructionTest, TrivialReduce) {
Shape r0f32 = ShapeUtil::MakeShape(F32, {});
Shape f32v100 = ShapeUtil::MakeShape(F32, {100});
Shape f32a100x10 = ShapeUtil::MakeShape(F32, {100, 10});
auto embedded_builder = HloComputation::Builder("f32+f32");
auto paramx = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(0, r0f32, "x"));
auto paramy = embedded_builder.AddInstruction(
HloInstruction::CreateParameter(1, r0f32, "y"));
embedded_builder.AddInstruction(
HloInstruction::CreateBinary(r0f32, HloOpcode::kAdd, paramx, paramy));
auto module = CreateNewVerifiedModule();
auto add_f32 = module->AddEmbeddedComputation(embedded_builder.Build());
HloComputation::Builder builder(TestName());
auto param0 = builder.AddInstruction(
HloInstruction::CreateParameter(0, f32a100x10, "p"));
auto const0 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.0f)));
builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto reduce = builder.AddInstruction(
HloInstruction::CreateReduce(f32v100, param0, const0,
{1}, add_f32));
module->AddEntryComputation(builder.Build());
OpAndUserCollectingVisitor visitor;
ASSERT_IS_OK(reduce->Accept(&visitor));
EXPECT_EQ(1, visitor.NumUsers(param0));
EXPECT_EQ(1, visitor.NumUsers(const0));
EXPECT_EQ(0, visitor.NumUsers(reduce));
EXPECT_EQ(2, visitor.NumOperands(reduce));
}
TEST_F(HloInstructionTest, ReplaceUseInBinaryOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto add_foofoo = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
add_foobar, add_foofoo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_EQ(1, bar->user_count());
ASSERT_IS_OK(foo->ReplaceUseWith(add_foofoo, bar));
EXPECT_EQ(1, foo->user_count());
EXPECT_EQ(2, bar->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(add_foobar));
EXPECT_THAT(add_foobar->operands(), ElementsAre(foo, bar));
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, add_foofoo));
EXPECT_THAT(add_foobar->operands(), ElementsAre(foo, bar));
EXPECT_THAT(add_foofoo->operands(), ElementsAre(bar, bar));
}
TEST_F(HloInstructionTest, ReplaceUseInVariadicOp) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto baz =
builder.AddInstruction(HloInstruction::CreateParameter(2, r0f32_, "baz"));
auto tuple =
builder.AddInstruction(HloInstruction::CreateTuple({foo, bar, baz, foo}));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(tuple, add_foobar));
ASSERT_IS_OK(foo->ReplaceUseWith(tuple, bar));
EXPECT_THAT(foo->users(), UnorderedElementsAre(add_foobar));
EXPECT_THAT(tuple->operands(), ElementsAre(bar, bar, baz, bar));
}
TEST_F(HloInstructionTest, ReplaceUseInUnaryOp) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kLog, foo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(exp, log));
EXPECT_EQ(0, bar->user_count());
ASSERT_IS_OK(foo->ReplaceUseWith(exp, bar));
EXPECT_EQ(1, foo->user_count());
EXPECT_THAT(foo->users(), UnorderedElementsAre(log));
EXPECT_THAT(log->operands(), ElementsAre(foo));
EXPECT_EQ(1, bar->user_count());
EXPECT_EQ(*bar->users().begin(), exp);
EXPECT_EQ(1, exp->operands().size());
EXPECT_EQ(*exp->operands().begin(), bar);
}
TEST_F(HloInstructionTest, ReplaceAllUsesWithInBinaryOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto add_foofoo = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, foo));
builder.AddInstruction(HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd,
add_foobar, add_foofoo));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(2, foo->user_count());
EXPECT_EQ(1, bar->user_count());
ASSERT_IS_OK(foo->ReplaceAllUsesWith(bar));
EXPECT_EQ(0, foo->user_count());
EXPECT_EQ(2, bar->user_count());
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, add_foofoo));
}
TEST_F(HloInstructionTest, ReplaceAllUsesInMultipleOps) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto bar =
builder.AddInstruction(HloInstruction::CreateParameter(1, r0f32_, "bar"));
auto add_foobar = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, foo, bar));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto tuple = builder.AddInstruction(HloInstruction::CreateTuple({foo, bar}));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
EXPECT_EQ(3, foo->user_count());
EXPECT_EQ(2, bar->user_count());
ASSERT_IS_OK(foo->ReplaceAllUsesWith(bar));
EXPECT_EQ(0, foo->user_count());
EXPECT_EQ(3, bar->user_count());
EXPECT_THAT(bar->users(), UnorderedElementsAre(add_foobar, exp, tuple));
}
class NodeCollectorAndPostProcessor : public DfsHloVisitorWithDefault {
public:
NodeCollectorAndPostProcessor() {}
absl::Status Postprocess(HloInstruction* hlo) override {
post_processed_nodes_.push_back(hlo);
return absl::OkStatus();
}
absl::Status DefaultAction(HloInstruction* hlo_instruction) override {
visited_nodes_.push_back(hlo_instruction);
return absl::OkStatus();
}
const std::vector<const HloInstruction*>& visited_nodes() {
return visited_nodes_;
}
const std::vector<const HloInstruction*>& post_processed_nodes() {
return post_processed_nodes_;
}
private:
std::vector<const HloInstruction*> visited_nodes_;
std::vector<const HloInstruction*> post_processed_nodes_;
};
bool Distinct(const std::vector<const HloInstruction*>& vec) {
std::set<const HloInstruction*> distinct_nodes(vec.begin(), vec.end());
return distinct_nodes.size() == vec.size();
}
TEST_F(HloInstructionTest, PostProcessAllVisitedNodes) {
HloComputation::Builder builder(TestName());
auto foo =
builder.AddInstruction(HloInstruction::CreateParameter(0, r0f32_, "foo"));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, foo));
auto log = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kLog, foo));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, exp, log));
auto module = CreateNewVerifiedModule();
module->AddEntryComputation(builder.Build());
NodeCollectorAndPostProcessor visitor;
ASSERT_IS_OK(add->Accept(&visitor));
EXPECT_EQ(visitor.visited_nodes(), visitor.post_processed_nodes());
EXPECT_TRUE(Distinct(visitor.visited_nodes()));
}
TEST_F(HloInstructionTest, PostProcessAllVisitedNodesMultiComputation) {
const std::string& hlo_string = R"(
HloModule axpy_module
calculate_alpha {
c.1 = f32[] constant(1)
c.2 = f32[] constant(2)
c.3 = f32[] add(c.1, c.2)
c.4 = f32[] constant(4)
ROOT ret = f32[] multiply(c.4, c.3)
}
ENTRY axpy_computation {
p.0 = f32[10] parameter(0)
p.1 = f32[10] parameter(1)
add.0 = f32[10] add(p.0, p.1)
alpha = f32[] call(), to_apply=calculate_alpha
broadcast = f32[10] broadcast(alpha), dimensions={}
p.2 = f32[10] parameter(2)
y = f32[10] multiply(broadcast, p.2)
x = f32[10] subtract(y, add.0)
p.3 = f32[10] parameter(3)
ROOT add.1 = f32[10] add(x, p.3)
}
)";
TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module,
ParseAndReturnVerifiedModule(hlo_string));
HloInstruction* add1 = FindInstruction(module.get(), "add.1");
EXPECT_EQ(add1, module->entry_computation()->root_instruction());
NodeCollectorAndPostProcessor visitor;
ASSERT_IS_OK(add1->Accept(&visitor, true,
false,
true));
EXPECT_EQ(visitor.visited_nodes(), visitor.post_processed_nodes());
EXPECT_TRUE(Distinct(visitor.visited_nodes()));
}
TEST_F(HloInstructionTest, SingletonFusionOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, BinaryFusionOp) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{add}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant1, constant2));
EXPECT_THAT(constant1->users(), ElementsAre(fusion));
EXPECT_THAT(constant2->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, ChainFusionOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp3, exp2, exp1}, HloInstruction::FusionKind::kLoop);
EXPECT_THAT(fusion->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(fusion));
}
TEST_F(HloInstructionTest, PreserveMetadataInFusionAndClone) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
OpMetadata metadata;
metadata.set_op_name("tf_op");
exp1->set_metadata(metadata);
exp2->set_metadata(metadata);
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* fusion = computation->CreateFusionInstruction(
{exp2, exp1}, HloInstruction::FusionKind::kLoop);
EXPECT_TRUE(protobuf_util::ProtobufEquals(metadata, fusion->metadata()));
EXPECT_TRUE(protobuf_util::ProtobufEquals(
metadata, fusion->fused_expression_root()->metadata()));
EXPECT_TRUE(protobuf_util::ProtobufEquals(
metadata, fusion->fused_expression_root()->operand(0)->metadata()));
std::string new_name = "foobarfoo";
auto cloned = fusion->CloneWithNewOperands(fusion->shape(), {}, new_name);
EXPECT_TRUE(protobuf_util::ProtobufEquals(metadata, fusion->metadata()));
size_t index = cloned->name().rfind(new_name);
EXPECT_TRUE(index != std::string::npos);
}
TEST_F(HloInstructionTest, BinaryCallOp) {
HloComputation::Builder builder(TestName());
auto constant1 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto constant2 = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(42.1f)));
auto add = builder.AddInstruction(HloInstruction::CreateBinary(
r0f32_, HloOpcode::kAdd, constant1, constant2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({add});
EXPECT_THAT(call->operands(), ElementsAre(constant1, constant2));
EXPECT_THAT(constant1->users(), ElementsAre(call));
EXPECT_THAT(constant2->users(), ElementsAre(call));
}
TEST_F(HloInstructionTest, ChainCallOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({exp3, exp2, exp1});
EXPECT_THAT(call->operands(), ElementsAre(constant));
EXPECT_THAT(constant->users(), ElementsAre(call));
}
TEST_F(HloInstructionTest, MultiOutputCallOp) {
HloComputation::Builder builder(TestName());
auto constant = builder.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(1.1f)));
auto exp1 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto exp2 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp1));
auto exp3 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, exp2));
auto exp4 = builder.AddInstruction(
HloInstruction::CreateUnary(r0f32_, HloOpcode::kExp, constant));
auto add = builder.AddInstruction(
HloInstruction::CreateBinary(r0f32_, HloOpcode::kAdd, exp3, exp4));
auto module = CreateNewVerifiedModule();
auto* computation = module->AddEntryComputation(builder.Build());
auto* call = computation->CreateCallInstruction({exp3, exp2, exp1});
call->AppendInstructionIntoCalledComputation(exp4, true);
EXPECT_THAT(call->operands(), ElementsAre(constant));
EXPECT_EQ(add->operand(0)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_THAT(add->operand(0)->operands(), ElementsAre(call));
EXPECT_EQ(add->operand(1)->opcode(), HloOpcode::kGetTupleElement);
EXPECT_THAT(add->operand(1)->operands(), ElementsAre(call));
}
TEST_F(HloInstructionTest, AsyncOp) {
HloComputation::Builder builder(TestName());
auto | 2,184 |
#ifndef XLA_HLO_EVALUATOR_HLO_EVALUATOR_H_
#define XLA_HLO_EVALUATOR_HLO_EVALUATOR_H_
#include "absl/log/check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "Eigen/Core"
#include "xla/comparison_util.h"
#include "xla/hlo/ir/dfs_hlo_visitor.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/shape.h"
#include "xla/status_macros.h"
#include "tsl/platform/errors.h"
#define _USE_MATH_DEFINES
#include <functional>
#include <memory>
#include <optional>
#include "absl/container/flat_hash_map.h"
#include "absl/container/node_hash_map.h"
#include "absl/status/statusor.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape_util.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
namespace xla {
struct ParsedStaticWhileLoop {
int64_t trip_count = -1;
int64_t induction_var_index = -1;
int64_t induction_var_init_value = -1;
int64_t step_size = -1;
int64_t loop_bound = -1;
};
struct ParsedWhileLoop {
std::optional<ParsedStaticWhileLoop> static_while_loop;
bool is_dynamic() const { return !static_while_loop.has_value(); }
};
constexpr ParsedWhileLoop kParsedDynamicWhileLoop = ParsedWhileLoop();
std::optional<ParsedWhileLoop> PatternMatchParseWhileLoop(
const HloInstruction* while_op);
class HloEvaluator : public ConstDfsHloVisitorWithDefault {
public:
explicit HloEvaluator(int64_t max_loop_iterations = -1);
virtual std::unique_ptr<HloEvaluator> CreateEmbedded(
int64_t max_loop_iterations) {
auto result = std::make_unique<HloEvaluator>(max_loop_iterations);
result->set_custom_call_handler(custom_call_handler_);
return result;
}
virtual void OnEvaluateComputation(const HloComputation& computation) {}
absl::StatusOr<Literal> Evaluate(
const HloModule& module, absl::Span<const Literal* const> arg_literals) {
return Evaluate(*module.entry_computation(), arg_literals);
}
template <typename Dummy = void>
absl::StatusOr<Literal> Evaluate(const HloModule& module,
absl::Span<const Literal> arg_literals) {
return Evaluate(*module.entry_computation(), arg_literals);
}
absl::StatusOr<Literal> Evaluate(
const HloComputation& computation,
absl::Span<const Literal* const> arg_literals);
template <typename Dummy = void>
absl::StatusOr<Literal> Evaluate(const HloComputation& computation,
absl::Span<const Literal> arg_literals) {
std::vector<const Literal*> arg_literal_ptrs;
for (const auto& l : arg_literals) {
arg_literal_ptrs.push_back(&l);
}
return Evaluate(computation, arg_literal_ptrs);
}
absl::StatusOr<Literal> Evaluate(
const HloInstruction* instruction,
bool recursively_evaluate_nonconstant_operands = false);
bool TryEvaluate(const HloInstruction* instruction, Literal* result,
bool recursively_evaluate_nonconstant_operands = false);
absl::StatusOr<Literal> EvaluateWithSubstitutions(
const HloInstruction* instruction,
const absl::flat_hash_map<const HloInstruction*, const Literal*>&
substitutions);
absl::StatusOr<Literal> EvaluateElementwiseBinaryOp(HloOpcode opcode,
const Literal& lhs,
const Literal& rhs);
absl::StatusOr<Literal> EvaluateElementwiseUnaryOp(HloOpcode opcode,
const Literal& operand);
absl::StatusOr<Literal> EvaluateElementwiseTernaryOp(HloOpcode opcode,
const Literal& lhs,
const Literal& rhs,
const Literal& ehs);
absl::StatusOr<Literal> EvaluateElementwiseCompareOp(
ComparisonDirection direction, const Literal& lhs, const Literal& rhs);
absl::StatusOr<Literal> EvaluateDotOp(const DotDimensionNumbers& dim_numbers,
const PrecisionConfig& precision_config,
const Literal& lhs, const Literal& rhs);
void set_dynamic_dimension_inference(
DynamicDimensionInference* dynamic_dimension_inference) {
dynamic_dimension_inference_ = dynamic_dimension_inference;
}
DynamicDimensionInference* dynamic_dimension_inference() {
return dynamic_dimension_inference_;
}
void set_use_fast_path(bool value) { use_fast_path_ = value; }
void set_reduce_use_fast_path(bool value) { use_fast_path_reduce_ = value; }
using CustomCallHandler = std::function<absl::StatusOr<Literal>(
const HloInstruction* custom_call, absl::Span<const Literal*> operands)>;
void set_custom_call_handler(CustomCallHandler handler) {
custom_call_handler_ = std::move(handler);
}
using TraceMACHandler = std::function<void(
int64_t result_index, int64_t lhs_index, int64_t rhs_index)>;
void set_trace_mac_handler(TraceMACHandler handler) {
trace_mac_handler_ = std::move(handler);
}
static std::unique_ptr<Array2D<Eigen::half>> MatmulArray2D(
const Array2D<Eigen::half>& lhs, const Array2D<Eigen::half>& rhs);
static std::unique_ptr<Array2D<float>> MatmulArray2D(
const Array2D<float>& lhs, const Array2D<float>& rhs);
static std::unique_ptr<Array2D<double>> MatmulArray2D(
const Array2D<double>& lhs, const Array2D<double>& rhs);
static std::unique_ptr<Array2D<std::complex<float>>> MatmulArray2D(
const Array2D<std::complex<float>>& lhs,
const Array2D<std::complex<float>>& rhs);
static std::unique_ptr<Array2D<std::complex<double>>> MatmulArray2D(
const Array2D<std::complex<double>>& lhs,
const Array2D<std::complex<double>>& rhs);
static std::unique_ptr<Array2D<int32_t>> MatmulArray2D(
const Array2D<int32_t>& lhs, const Array2D<int32_t>& rhs);
protected:
absl::Status EvaluateInternal(
const HloInstruction* instruction, const ShapeIndex& shape_index = {},
bool recursively_evaluate_nonconstant_operands = false);
absl::Status EvaluateParameterFromCallerArgument(
const HloInstruction* parameter, const ShapeIndex& shape_index);
std::vector<int64_t> GetS64Indices(
absl::Span<HloInstruction* const> start_indices);
static DimensionVector MakeDimMultipliers(const Shape& shape);
template <typename ReturnT, typename ElementwiseT>
friend class HloEvaluatorTypedVisitor;
absl::Status DefaultAction(const HloInstruction* hlo) override {
return hlo->Visit(typed_visitors_[hlo->shape().element_type()].get());
}
absl::Status Preprocess(const HloInstruction* hlo) override;
absl::Status Postprocess(const HloInstruction* hlo) override;
absl::Status HandleBitcast(const HloInstruction* bitcast) override;
absl::Status HandleBitcastConvert(const HloInstruction* convert) override;
absl::Status HandleGetDimensionSize(
const HloInstruction* get_dimension_size) override;
absl::Status HandleSetDimensionSize(
const HloInstruction* set_dimension_size) override;
absl::Status HandleParameter(const HloInstruction* parameter) override;
absl::Status HandleInfeed(const HloInstruction* infeed) override;
absl::Status HandleConstant(const HloInstruction* constant) override;
absl::Status HandleConcatenate(const HloInstruction* concatenate) override;
absl::Status HandleReshape(const HloInstruction* reshape) override;
absl::Status HandleTranspose(const HloInstruction* transpose) override;
absl::Status HandleIsFinite(const HloInstruction* is_finite) override;
absl::Status HandleCompare(const HloInstruction* compare) override;
absl::Status HandleTuple(const HloInstruction* tuple) override;
absl::Status HandleFft(const HloInstruction* fft) override;
absl::Status HandleGather(const HloInstruction* gather) override;
absl::Status HandleScatter(const HloInstruction* hlo) override;
absl::Status HandleGetTupleElement(
const HloInstruction* get_tuple_element) override;
absl::Status HandleAsyncStart(const HloInstruction* async_start) override;
absl::Status HandleAsyncUpdate(const HloInstruction* async_update) override;
absl::Status HandleAsyncDone(const HloInstruction* async_done) override;
absl::Status HandleCopy(const HloInstruction* copy) override;
absl::Status HandleCopyStart(const HloInstruction* copy_start) override;
absl::Status HandleCopyDone(const HloInstruction* copy_done) override;
absl::Status HandleConditional(const HloInstruction* conditional) override;
absl::Status HandleConvert(const HloInstruction* convert) override;
absl::Status HandleCall(const HloInstruction* call) override;
absl::Status HandleDynamicSlice(const HloInstruction* dynamic_slice) override;
absl::Status HandleDynamicUpdateSlice(const HloInstruction* dus) override;
absl::Status HandleFusion(const HloInstruction* fusion) override;
absl::Status HandleWhile(const HloInstruction* while_hlo) override;
absl::Status HandleSelect(const HloInstruction* select) override;
absl::Status HandleBroadcast(const HloInstruction* broadcast) override;
absl::Status HandleAfterAll(const HloInstruction* after_all) override;
absl::Status HandleAddDependency(
const HloInstruction* add_dependency) override;
absl::Status HandleReverse(const HloInstruction* reverse) override;
absl::Status HandleSelectAndScatter(
const HloInstruction* select_and_scatter) override;
absl::Status HandleSlice(const HloInstruction* slice) override;
absl::Status HandleSort(const HloInstruction* sort) override;
absl::Status HandleStochasticConvert(
const HloInstruction* stochastic_convert) override;
absl::Status HandleReal(const HloInstruction* real) override;
absl::Status HandleImag(const HloInstruction* imag) override;
absl::Status HandleComplex(const HloInstruction* complex) override;
absl::Status HandleReduce(const HloInstruction* hlo) override;
absl::Status HandleReduceWindow(const HloInstruction* hlo) override;
absl::Status HandleMap(const HloInstruction* map) override;
absl::Status HandleCustomCall(const HloInstruction* custom_call) override;
absl::Status HandleBatchNormGrad(
const HloInstruction* batch_norm_grad) override {
return Unimplemented("BatchNormGrad HLO is unsupported by the evaluator.");
}
absl::Status HandleBatchNormInference(
const HloInstruction* batch_norm_inference) override {
return Unimplemented(
"BatchNormInference HLO is unsupported by the evaluator.");
}
absl::Status HandleBatchNormTraining(
const HloInstruction* batch_norm_training) override {
return Unimplemented(
"BatchNormTraining HLO is unsupported by the evaluator.");
}
absl::Status HandleOutfeed(const HloInstruction* outfeed) override {
return Unimplemented("Outfeed HLO is unsupported by the evaluator.");
}
const Literal& GetEvaluatedLiteralFor(const HloInstruction* hlo) {
if (hlo->IsConstant()) {
return hlo->literal();
}
if (hlo->opcode() == HloOpcode::kParameter && !arg_literals_.empty()) {
return *arg_literals_.at(hlo->parameter_number());
}
auto it = evaluated_.find(hlo);
CHECK(it != evaluated_.end())
<< "could not find evaluated value for: " << hlo->ToString();
return it->second;
}
bool IsAlreadyEvaluated(const HloInstruction* hlo,
const ShapeIndex& shape_index = {}) {
if (hlo->IsConstant()) {
return true;
}
if (hlo->opcode() == HloOpcode::kParameter && !arg_literals_.empty()) {
return true;
}
auto it = evaluated_.find(hlo);
if (it == evaluated_.end()) {
return false;
}
return it->second.IsDetermined(shape_index);
}
absl::node_hash_map<const HloInstruction*, Literal> evaluated_;
ShapeIndex visitor_shape_index_;
bool enable_partial_evaluation_ = false;
std::unique_ptr<CallGraph> call_graph_cache_;
std::unique_ptr<TuplePointsToAnalysis> tuple_points_to_analysis_cache_;
bool use_fast_path_ = false;
bool use_fast_path_reduce_ = true;
private:
template <typename ReturnT, typename NativeT>
static absl::StatusOr<Literal> ElementWiseUnaryOpImpl(
const HloInstruction* instruction,
const std::function<ReturnT(NativeT)>& unary_op,
const Literal& operand_literal) {
const Shape& shape = instruction->shape();
const auto* operand = instruction->operand(0);
TF_RET_CHECK(ShapeUtil::SameDimensions(shape, operand->shape()));
Literal result(shape);
TF_RETURN_IF_ERROR(result.PopulateParallel<ReturnT>(
[&](absl::Span<const int64_t> multi_index, int) {
return unary_op(operand_literal.Get<NativeT>(multi_index));
}));
return std::move(result);
}
std::unique_ptr<ConstDfsHloVisitor> typed_visitors_[PrimitiveType_ARRAYSIZE];
std::vector<const Literal*> arg_literals_;
int64_t max_loop_iterations_ = 0;
uint64_t seed_ = 0;
std::minstd_rand0 engine_;
DynamicDimensionInference* dynamic_dimension_inference_ = nullptr;
CustomCallHandler custom_call_handler_;
TraceMACHandler trace_mac_handler_;
HloEvaluator(const HloEvaluator&) = delete;
HloEvaluator& operator=(const HloEvaluator&) = delete;
};
std::unique_ptr<Array2D<float>> MatmulArray2D(const Array2D<float>& lhs,
const Array2D<float>& rhs);
}
#endif
#include "xla/hlo/evaluator/hlo_evaluator.h"
#include <algorithm>
#include <atomic>
#include <cmath>
#include <complex>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <functional>
#include <iterator>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <random>
#include <string>
#include <utility>
#include <variant>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/base/internal/endian.h"
#include "absl/cleanup/cleanup.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/inlined_vector.h"
#include "absl/functional/function_ref.h"
#include "absl/memory/memory.h"
#include "absl/numeric/bits.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/cord.h"
#include "absl/strings/str_cat.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "Eigen/Core"
#include "xla/array2d.h"
#include "xla/comparison_util.h"
#include "xla/hlo/evaluator/hlo_evaluator_typed_visitor.h"
#include "xla/hlo/ir/dfs_hlo_visitor_with_default.h"
#include "xla/hlo/ir/hlo_casting_utils.h"
#include "xla/hlo/ir/hlo_clone_context.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_instructions.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/hlo/utils/hlo_query.h"
#include "xla/index_util.h"
#include "xla/layout.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/primitive_util.h"
#include "xla/service/call_graph.h"
#include "xla/service/compilation_environments.h"
#include "xla/service/cpu/runtime_single_threaded_matmul.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/logical_buffer.h"
#include "xla/service/pattern_matcher.h"
#include "xla/service/shape_inference.h"
#include "xla/service/tuple_points_to_analysis.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/status_macros.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/cpu_info.h"
#include "tsl/platform/env.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/logging.h"
#include "tsl/platform/status.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using primitive_util::NativeTypeOf;
template <typename OperandT>
absl::StatusOr<Literal> Compare(const Shape& shape, Comparison comparison,
LiteralSlice lhs_literal,
LiteralSlice rhs_literal) {
auto populate = [&](auto compare_op) -> absl::StatusOr<Literal> {
Literal result(shape);
TF_RETURN_IF_ERROR(result.PopulateParallel<bool>(
[&](absl::Span<const int64_t> multi_index, int ) {
auto lhs = lhs_literal.Get<OperandT>(multi_index);
auto rhs = rhs_literal.Get<OperandT>(multi_index);
if constexpr (is_specialized_floating_point_v<OperandT>) {
if (comparison.IsTotalOrder()) {
return compare_op(ToSignMagnitude(lhs), ToSignMagnitude(rhs));
}
}
return compare_op(lhs, rhs);
}));
return std::move(result);
};
switch (comparison.GetDirection()) {
case ComparisonDirection::kEq:
return populate([](auto lhs, auto rhs) { return lhs == rhs; });
case ComparisonDirection::kNe:
return populate([](auto lhs, auto rhs) { return lhs != rhs; });
case ComparisonDirection::kGe:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs >= rhs; });
}
break;
case ComparisonDirection::kGt:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs > rhs; });
}
break;
case ComparisonDirection::kLe:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs <= rhs; });
}
break;
case ComparisonDirection::kLt:
if constexpr (!is_complex_v<OperandT>) {
return populate([](auto lhs, auto rhs) { return lhs < rhs; });
}
break;
}
LOG(FATAL) << "unhandled direction for conversion to Comparison: "
<< comparison.ToString();
}
std::optional<bool> GetInstructionStaticValueAsBool(
const HloInstruction* instruction) {
HloEvaluator evaluator;
absl::StatusOr<Literal> static_value = evaluator.Evaluate(
instruction, true);
if (static_value.ok()) {
return static_value->GetFirstElement<bool>();
}
return std::nullopt;
}
template <PrimitiveType kType>
struct PopulateParallelImpl {
using NativeT = NativeTypeOf<kType>;
static absl::Status Run(
Literal& literal,
absl::FunctionRef<Literal(absl::Span<const int64_t>, int)>
literal_generator) {
return literal.PopulateParallel<NativeT>(
[&literal_generator](absl::Span<const int64_t> output_index,
int thread_id) {
return literal_generator(output_index, thread_id)
.template Get<NativeT>({});
});
}
};
template <PrimitiveType kType>
struct PopulateImpl {
using NativeT = NativeTypeOf<kType>;
static absl::Status Run(
Literal& literal,
absl::FunctionRef<Literal(absl::Span<const int64_t>)> literal_generator) {
return literal.Populate<NativeT>(
[&literal_generator](absl::Span<const int64_t> output_index) {
return literal_generator(output_index).template Get<NativeT>({});
});
}
};
template <template <PrimitiveType> typename Trait, typename F>
absl::Status Apply(Literal& literal, F&& literal_generator) {
return primitive_util::PrimitiveTypeSwitch<absl::Status>(
[&, literal_generator = std::forward<F>(literal_generator)](
auto primitive_type_constant) -> absl::Status {
if constexpr (primitive_util::IsArrayType(primitive_type_constant)) {
return Trait<primitive_type_constant>::Run(
literal, std::move(literal_generator));
}
LOG(FATAL) << "Unhandled primitive type "
<< literal.shape().element_type();
},
literal.shape().element_type());
}
constexpr absl::string_view kEvalErrorDetailUrl = "EvalErrorDetailUrl";
enum class EvalErrorDetail : uint32_t {
kDynamicValueDependence = 0,
};
std::optional<EvalErrorDetail> ParseEvalErrorDetail(const absl::Status& error) {
auto error_detail = error.GetPayload(kEvalErrorDetailUrl);
if (!error_detail.has_value() && error_detail->empty()) {
return std::nullopt;
}
return static_cast<EvalErrorDetail>(
absl::little_endian::Load32(error_detail->Flatten().data()));
}
absl::Status MakeEvalErrorDueToParamOrInfeed(
const HloInstruction& eval_instruction) {
absl::Status error = absl::FailedPreconditionError(absl::StrCat(
"Failed to evaluate instruction (", eval_instruction.name(),
") since it depends on infeed or parameters to its parent computation (",
eval_instruction.parent()->name(), ")."));
std::string error_payload;
error_payload.resize(sizeof(EvalErrorDetail));
absl::little_endian::Store32(
const_cast<char*>(error_payload.data()),
static_cast<uint32_t>(EvalErrorDetail::kDynamicValueDependence));
error.SetPayload(kEvalErrorDetailUrl, absl::Cord(error_payload));
return error;
}
struct DynamicOrStaticInteger {
std::optional<int64_t> static_value;
bool is_dynamic() const { return !static_value.has_value(); }
std::string ToString() const {
return is_dynamic() ? std::string("DYNAMIC") : absl::StrCat(*static_value);
}
};
std::optional<DynamicOrStaticInteger> GetInstructionValueAsInteger(
const HloInstruction* instruction) {
HloEvaluator evaluator;
absl::StatusOr<Literal> static_value = evaluator.Evaluate(
instruction, true);
if (static_value.ok()) {
if (instruction->shape().element_type() == PrimitiveType::PRED) {
return DynamicOrStaticInteger{
static_cast<int64_t>(static_value->GetFirstElement<bool | #include "xla/hlo/evaluator/hlo_evaluator.h"
#include <array>
#include <complex>
#include <cstdint>
#include <initializer_list>
#include <limits>
#include <memory>
#include <numeric>
#include <optional>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/container/flat_hash_set.h"
#include "absl/log/check.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/types/span.h"
#include "xla/array2d.h"
#include "xla/array3d.h"
#include "xla/array4d.h"
#include "xla/client/xla_builder.h"
#include "xla/comparison_util.h"
#include "xla/debug_options_flags.h"
#include "xla/error_spec.h"
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/layout_util.h"
#include "xla/literal.h"
#include "xla/literal_util.h"
#include "xla/permutation_util.h"
#include "xla/primitive_util.h"
#include "xla/service/dynamic_dimension_inference.h"
#include "xla/service/hlo_element_type_converter.h"
#include "xla/service/hlo_module_config.h"
#include "xla/service/shape_inference.h"
#include "xla/shape.h"
#include "xla/shape_util.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/tests/literal_test_util.h"
#include "xla/tests/test_utils.h"
#include "xla/types.h"
#include "xla/util.h"
#include "xla/xla_data.pb.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
#include "tsl/platform/test.h"
#include "tsl/platform/test_benchmark.h"
namespace xla {
namespace {
static std::array<bool, 2> use_bf16_params{true, false};
class HloEvaluatorTest : public HloTestBase {
public:
HloEvaluatorTest() : use_bfloat16_(false) { InitializeFftData(); }
absl::StatusOr<Literal> Evaluate(
absl::Span<const Literal* const> arg_literals = {}) {
if (use_bfloat16_) {
HloElementTypeConverter(F32, BF16).Run(m_.get()).value();
}
return evaluator_.Evaluate(*m_->entry_computation(), arg_literals);
}
Literal EvaluateWithModule(
HloModule* module, absl::Span<const Literal* const> arg_literals = {}) {
if (use_bfloat16_) {
HloElementTypeConverter(F32, BF16).Run(m_.get()).value();
}
return evaluator_.Evaluate(*module->entry_computation(), arg_literals)
.value();
}
void TestUnaryOp(HloOpcode opcode, Literal expected, Literal input,
float aabs = 0) {
HloComputation::Builder b(TestName());
auto c1 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
b.AddInstruction(HloInstruction::CreateUnary(expected.shape(), opcode, c1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto element_type = expected.shape().element_type();
if (element_type == F32 || element_type == F64) {
ErrorSpec error(aabs);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, error));
} else {
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
}
void TestBinaryOp(HloOpcode opcode, Literal expected, Literal lhs,
Literal rhs) {
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(lhs)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(rhs)));
b.AddInstruction(
HloInstruction::CreateBinary(expected.shape(), opcode, c1, c2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestTernaryOp(HloOpcode opcode, Literal expected, Literal src0,
Literal src1, Literal src2) {
HloComputation::Builder b(TestName());
auto operand0 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src0)));
auto operand1 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src1)));
auto operand2 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(src2)));
b.AddInstruction(HloInstruction::CreateTernary(
expected.shape(), opcode, operand0, operand1, operand2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestEvaluateInstruction(HloInstruction* instruction,
const Literal& expected) {
TF_ASSERT_OK_AND_ASSIGN(Literal result, evaluator_.Evaluate(instruction));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestEvaluationFailure(HloInstruction* instruction) {
absl::StatusOr<Literal> result = evaluator_.Evaluate(instruction);
EXPECT_TRUE(!result.ok());
}
void TestRecursivelyEvaluateInstruction(HloInstruction* instruction,
const Literal& expected) {
TF_ASSERT_OK_AND_ASSIGN(
Literal result,
evaluator_.Evaluate(
instruction,
true));
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
void TestRecursiveEvaluationFailure(HloInstruction* instruction) {
absl::StatusOr<Literal> result = evaluator_.Evaluate(
instruction, true);
EXPECT_TRUE(!result.ok());
}
std::unique_ptr<HloComputation> MaxComputationScalarF32() {
HloComputation::Builder max_computation("max");
Shape scalar_shape = ShapeUtil::MakeShape(F32, {});
auto param_lhs = max_computation.AddInstruction(
HloInstruction::CreateParameter(0, scalar_shape, "lhs"));
auto param_rhs = max_computation.AddInstruction(
HloInstruction::CreateParameter(1, scalar_shape, "rhs"));
max_computation.AddInstruction(HloInstruction::CreateBinary(
scalar_shape, HloOpcode::kMaximum, param_lhs, param_rhs));
return max_computation.Build();
}
void ReduceWindowMaxIotaTest(int window_size, int padding, int stride,
int window_dilation, int base_dilation,
const Literal& expected) {
HloComputation::Builder b(TestName());
auto arg_array = std::make_unique<Array2D<float>>(4, 4);
arg_array->FillIota(0);
auto arg_literal = LiteralUtil::CreateR2FromArray2D<float>(*arg_array);
HloInstruction* arg_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(arg_literal)));
auto init_value = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0.f)));
auto max_func = m_->AddEmbeddedComputation(MaxComputationScalarF32());
Window window;
WindowDimension dim;
dim.set_size(window_size);
dim.set_stride(stride);
dim.set_padding_low(padding);
dim.set_padding_high(padding);
dim.set_window_dilation(window_dilation);
dim.set_base_dilation(base_dilation);
*window.add_dimensions() = dim;
*window.add_dimensions() = dim;
int dim0 = expected.shape().dimensions(0);
int dim1 = expected.shape().dimensions(1);
Shape shape = ShapeUtil::MakeShape(F32, {dim0, dim1});
b.AddInstruction(HloInstruction::CreateReduceWindow(
shape, arg_instruction, init_value, window, max_func));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
protected:
explicit HloEvaluatorTest(bool use_bfloat16) : use_bfloat16_(use_bfloat16) {
InitializeFftData();
}
void InitializeFftData();
HloEvaluator evaluator_;
const bool use_bfloat16_;
std::unique_ptr<HloModule> m_ = CreateNewVerifiedModule();
ErrorSpec fft_error_ = ErrorSpec(1e-4, 1e-5);
Literal fft_c64x2x4x8_;
Literal fft_c64x2x4x8_1d_;
Literal fft_c64x2x4x8_2d_;
Literal fft_c64x2x4x8_3d_;
};
class HloEvaluatorBf16Test : public ::testing::WithParamInterface<bool>,
public HloEvaluatorTest {
protected:
HloEvaluatorBf16Test() : HloEvaluatorTest(GetParam()) {}
};
INSTANTIATE_TEST_SUITE_P(HloEvaluatorTest_Instantiation, HloEvaluatorBf16Test,
::testing::ValuesIn(use_bf16_params));
TEST_P(HloEvaluatorBf16Test, DoesClamp) {
auto low = LiteralUtil::CreateR2<float>({{0.f, 2.f}, {2.f, 4.f}});
auto value = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
auto high = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
Shape shape = low.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{0, 4}, {2, 4}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DoesClampInt64) {
auto ones = [](int bits) { return (int64_t{1} << bits) - 1; };
auto low =
LiteralUtil::CreateR2<int64_t>({{0, ones(54)}, {ones(54), ones(58)}});
auto value = LiteralUtil::CreateR2<int64_t>({{0, ones(56)}, {0, ones(58)}});
auto high = LiteralUtil::CreateR2<int64_t>(
{{ones(54), ones(55)}, {ones(56), ones(58)}});
Shape shape = low.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected =
LiteralUtil::CreateR2<int64_t>({{0, ones(55)}, {ones(54), ones(58)}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DISABLED_DoesClampSpecialBroadcast) {
auto low = LiteralUtil::CreateR0<float>(0.f);
auto value = LiteralUtil::CreateR2<float>({{-1.f, 0.f}, {1.f, 2.f}});
auto high = LiteralUtil::CreateR0<float>(1.f);
Shape shape = value.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(low)));
auto c2 = b.AddInstruction(HloInstruction::CreateConstant(std::move(value)));
auto c3 = b.AddInstruction(HloInstruction::CreateConstant(std::move(high)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kClamp, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {1, 1}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, DoesSelect) {
auto pred = LiteralUtil::CreateR2<bool>({{true, false}, {false, true}});
auto on_true = LiteralUtil::CreateR2<float>({{2.f, 4.f}, {4.f, 4.f}});
auto on_false = LiteralUtil::CreateR2<float>({{0.f, 5.f}, {0.f, 4.f}});
Shape shape = on_true.shape();
HloComputation::Builder b(TestName());
auto c1 = b.AddInstruction(HloInstruction::CreateConstant(std::move(pred)));
auto c2 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(on_true)));
auto c3 =
b.AddInstruction(HloInstruction::CreateConstant(std::move(on_false)));
b.AddInstruction(
HloInstruction::CreateTernary(shape, HloOpcode::kSelect, c1, c2, c3));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
auto expected = LiteralUtil::CreateR2<float>({{2, 5}, {0, 4}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DoesAdd) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-96, 8}});
TestBinaryOp(HloOpcode::kAdd, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_P(HloEvaluatorBf16Test, DoesAnd) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{0, 0}, {4, 4}});
TestBinaryOp(HloOpcode::kAnd, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesOr) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-100, 4}});
TestBinaryOp(HloOpcode::kOr, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesXor) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{3, 4}, {-104, 0}});
TestBinaryOp(HloOpcode::kXor, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesMultiply) {
auto lhs = LiteralUtil::CreateR2<int32_t>({{-1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int32_t>(
{{std::numeric_limits<int32_t>::min(), 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int32_t>(
{{std::numeric_limits<int32_t>::min(), 0}, {-400, 16}});
TestBinaryOp(HloOpcode::kMultiply, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesDivideInt64) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{0, 0}, {-25, 1}});
TestBinaryOp(HloOpcode::kDivide, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesClampS64) {
auto low = LiteralUtil::CreateR1<int64_t>(
{-8616761059752331528LL, 6780561065411491190LL, -8616761059752331528LL});
auto value = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491190LL, 6780561065411491180LL, 4241131823772864090LL});
auto high = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491180LL, 8616761059752331528LL, 3832151243857508051LL});
auto expected = LiteralUtil::CreateR1<int64_t>(
{-6780561065411491190LL, 6780561065411491190LL, 3832151243857508051LL});
TestTernaryOp(HloOpcode::kClamp, std::move(expected), std::move(low),
std::move(value), std::move(high));
}
TEST_P(HloEvaluatorBf16Test, DoesDivideDouble) {
auto lhs = LiteralUtil::CreateR2<double>({{1.0, 0.0}, {-100.0, 4.0}});
auto rhs = LiteralUtil::CreateR2<double>({{2.2, 4.0}, {4.0, 4.0}});
auto expected =
LiteralUtil::CreateR2<double>({{0.45454545454545453, 0}, {-25, 1}});
TestBinaryOp(HloOpcode::kDivide, std::move(expected), std::move(lhs),
std::move(rhs));
}
TEST_F(HloEvaluatorTest, DoesAbsR2) {
auto operand = LiteralUtil::CreateR2<int64_t>({{1, -20}, {-100, 4}});
auto expected = LiteralUtil::CreateR2<int64_t>({{1, 20}, {100, 4}});
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesAbsR0) {
auto operand = LiteralUtil::CreateR0<float>(-1.0f);
auto expected = LiteralUtil::CreateR0<float>(1.0f);
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesAbsR1WithZeroSize) {
auto operand = LiteralUtil::CreateR1<float>({});
auto expected = LiteralUtil::CreateR1<float>({});
TestUnaryOp(HloOpcode::kAbs, std::move(expected), std::move(operand));
}
TEST_F(HloEvaluatorTest, DoesAbsC128) {
auto x = LiteralUtil::CreateR0<complex128>({1, 2});
auto expected_real = LiteralUtil::CreateR0<double>(2.23607);
TestUnaryOp(HloOpcode::kAbs, std::move(expected_real), std::move(x), 3e-06);
}
TEST_F(HloEvaluatorTest, DoesNegateR2) {
auto operand = LiteralUtil::CreateR2<int32_t>(
{{0, std::numeric_limits<int32_t>::min()}, {-1, 4}});
auto expected = LiteralUtil::CreateR2<int32_t>(
{{0, std::numeric_limits<int>::min()}, {1, -4}});
TestUnaryOp(HloOpcode::kNegate, std::move(expected), std::move(operand));
}
TEST_P(HloEvaluatorBf16Test, DoesCosR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{1, -1}, {-1, 1}});
TestUnaryOp(HloOpcode::kCos, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_P(HloEvaluatorBf16Test, DoesSinR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}});
TestUnaryOp(HloOpcode::kSin, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_P(HloEvaluatorBf16Test, DoesTanR2) {
auto operand = LiteralUtil::CreateR2<float>({{0, M_PI}, {-M_PI, 2 * M_PI}});
auto expected = LiteralUtil::CreateR2<float>({{0, 0}, {0, 0}});
TestUnaryOp(HloOpcode::kTan, std::move(expected), std::move(operand),
use_bfloat16_ ? 0.031250 : 9.5367431640625E-7);
}
TEST_F(HloEvaluatorTest, DoesNotR2) {
auto operand =
LiteralUtil::CreateR2<int32_t>({{0, std::numeric_limits<int>::min()},
{-1, std::numeric_limits<int>::max()}});
auto expected =
LiteralUtil::CreateR2<int32_t>({{-1, std::numeric_limits<int>::max()},
{0, std::numeric_limits<int>::min()}});
TestUnaryOp(HloOpcode::kNot, std::move(expected), std::move(operand));
}
TEST_F(HloEvaluatorTest, DoesRealC128) {
auto x = LiteralUtil::CreateR1<complex128>({{1, 0}, {-100, 4}});
auto expected_real = LiteralUtil::CreateR1<double>({1, -100});
TestUnaryOp(HloOpcode::kReal, std::move(expected_real), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesImagC128) {
auto x = LiteralUtil::CreateR1<complex128>({{1, 0}, {-100, 4}});
auto expected_imag = LiteralUtil::CreateR1<double>({0, 4});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_P(HloEvaluatorBf16Test, DoesImagF32AndBf16) {
auto x = LiteralUtil::CreateR1<float>({1, -100});
auto expected_imag = LiteralUtil::CreateR1<float>({0, 0});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesImagF64) {
auto x = LiteralUtil::CreateR1<double>({1, -100});
auto expected_imag = LiteralUtil::CreateR1<double>({0, 0});
TestUnaryOp(HloOpcode::kImag, std::move(expected_imag), std::move(x));
}
TEST_F(HloEvaluatorTest, DoesTraverseInstructions) {
auto lhs = LiteralUtil::CreateR2<int64_t>({{1, 0}, {-100, 4}});
auto rhs = LiteralUtil::CreateR2<int64_t>({{2, 4}, {4, 4}});
auto rhs2 = LiteralUtil::CreateR2<int64_t>({{1, -20}, {-100, 4}});
std::vector<const Literal*> args = {&lhs, &rhs, &rhs2};
Shape shape = ShapeUtil::MakeShape(S64, {2, 2});
HloComputation::Builder b(TestName());
auto param_lhs =
b.AddInstruction(HloInstruction::CreateParameter(0, shape, "lhs"));
auto param_rhs =
b.AddInstruction(HloInstruction::CreateParameter(1, shape, "rhs"));
auto lhs_instruction = b.AddInstruction(HloInstruction::CreateBinary(
shape, HloOpcode::kAdd, param_lhs, param_rhs));
auto param_rhs2 =
b.AddInstruction(HloInstruction::CreateParameter(2, shape, "rhs2"));
b.AddInstruction(HloInstruction::CreateBinary(shape, HloOpcode::kAdd,
lhs_instruction, param_rhs2));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate(args));
auto expected = LiteralUtil::CreateR2<int64_t>({{4, -16}, {-196, 12}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, DoesReshape) {
HloComputation::Builder b(TestName());
const int64_t dimensions[] = {11, 8, 7, 5, 9};
TF_ASSERT_OK_AND_ASSIGN(auto literal,
LiteralUtil::CreateRandomLiteral<F32>(
ShapeUtil::MakeShape(F32, dimensions), 0.0, 1.0));
auto literal_clone = literal.Clone();
HloInstruction* literal_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(literal)));
Shape shape = ShapeUtil::MakeShape(F32, {8, 7, 11, 9, 5});
const int64_t permutation[] = {1, 2, 0, 4, 3};
b.AddInstruction(
HloInstruction::CreateTranspose(shape, literal_instruction, permutation));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
using NativeT = typename primitive_util::PrimitiveTypeToNative<F32>::type;
result.EachCell<NativeT>(
[&](absl::Span<const int64_t> indices, NativeT value) {
std::vector<int64_t> rindexes = PermuteInverse(indices, permutation);
EXPECT_NEAR(value, literal_clone.Get<NativeT>(rindexes), 0.031250);
});
}
TEST_F(HloEvaluatorTest, DoesBroadcast) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}, {5, 6}});
auto output_literal = LiteralUtil::CreateR3<int32_t>(
{{{1, 2}, {3, 4}, {5, 6}}, {{1, 2}, {3, 4}, {5, 6}}});
HloInstruction* literal_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateBroadcast(
output_literal.shape(), literal_instruction, {1, 2}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
EXPECT_TRUE(LiteralTestUtil::Equal(result, output_literal));
}
TEST_F(HloEvaluatorTest, DoesBroadcastScalar) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR0<int32_t>(111);
auto output_literal = LiteralUtil::CreateR2<int32_t>(
{{111, 111}, {111, 111}, {111, 111}, {111, 111}, {111, 111}, {111, 111}});
HloInstruction* literal_instruction = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateBroadcast(
output_literal.shape(), literal_instruction,
{}));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate({}));
EXPECT_TRUE(LiteralTestUtil::Equal(result, output_literal));
}
TEST_F(HloEvaluatorTest, DoesConcatenateSimple) {
HloComputation::Builder b(TestName());
HloInstruction* operand1 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int64_t>({{-1, -2}, {100, 200}})));
HloInstruction* operand2 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR2<int64_t>({{-2, -3}, {-100, -200}})));
std::vector<HloInstruction*> operands = {operand1, operand2};
Shape shape = ShapeUtil::MakeShape(S64, {4, 2});
b.AddInstruction(HloInstruction::CreateConcatenate(shape, operands, 0));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<int64_t>(
{{-1, -2}, {100, 200}, {-2, -3}, {-100, -200}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_F(HloEvaluatorTest, ConcatenateHandlesShapeWithZeroElement) {
HloComputation::Builder b(TestName());
HloInstruction* operand1 = b.AddInstruction(HloInstruction::CreateConstant(
LiteralUtil::CreateR1<int64_t>({100, 200})));
HloInstruction* operand2 = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR1<int64_t>({})));
std::vector<HloInstruction*> operands = {operand1, operand2};
Shape shape = ShapeUtil::MakeShape(S64, {2});
b.AddInstruction(HloInstruction::CreateConcatenate(shape, operands, 0));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR1<int64_t>({100, 200});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, ConvertWithSameLayout) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2<int32_t>({{1, 2}, {3, 4}, {5, 6}});
auto expected =
LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}});
ASSERT_TRUE(LayoutUtil::LayoutsInShapesEqual(input_literal.shape(),
expected.shape()));
HloInstruction* constant = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateConvert(expected.shape(), constant));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
TEST_P(HloEvaluatorBf16Test, ConvertWithDifferentLayout) {
HloComputation::Builder b(TestName());
auto input_literal = LiteralUtil::CreateR2WithLayout<int32_t>(
{{1, 2}, {3, 4}, {5, 6}}, LayoutUtil::MakeLayout({0, 1}));
auto expected = LiteralUtil::CreateR2WithLayout<float>(
{{1.0, 2.0}, {3.0, 4.0}, {5.0, 6.0}}, LayoutUtil::MakeLayout({1, 0}));
ASSERT_FALSE(LayoutUtil::LayoutsInShapesEqual(input_literal.shape(),
expected.shape()));
HloInstruction* constant = b.AddInstruction(
HloInstruction::CreateConstant(std::move(input_literal)));
b.AddInstruction(HloInstruction::CreateConvert(expected.shape(), constant));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
EXPECT_TRUE(LiteralTestUtil::Equal(result, expected));
}
PaddingConfig CreatePaddingConfig(
std::initializer_list<std::array<int64_t, 3>> padding_dimensions) {
PaddingConfig padding_config;
for (auto& paddings_per_dim : padding_dimensions) {
auto dimension = padding_config.add_dimensions();
dimension->set_edge_padding_low(paddings_per_dim[0]);
dimension->set_edge_padding_high(paddings_per_dim[1]);
dimension->set_interior_padding(paddings_per_dim[2]);
}
return padding_config;
}
TEST_F(HloEvaluatorTest, Pad2DIntegerArrayWithZeroDimension) {
auto operand = LiteralUtil::CreateR2<int32_t>({{}, {}});
HloComputation::Builder b(TestName());
auto operand_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(operand)));
constexpr int32_t kPadValue = 10;
auto pad_value = LiteralUtil::CreateR0<int32_t>(kPadValue);
auto padding_value_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(pad_value)));
auto padding_config = CreatePaddingConfig({{{1, 0, 2}}, {{0, 2, 1}}});
Shape shape = ShapeUtil::MakeShape(S32, {5, 2});
b.AddInstruction(HloInstruction::CreatePad(
shape, operand_instruction, padding_value_instruction, padding_config));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected = LiteralUtil::CreateR2<int32_t>(
{{10, 10}, {10, 10}, {10, 10}, {10, 10}, {10, 10}});
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, Pad4DFloatArrayWithInteriorPadding) {
HloComputation::Builder b(TestName());
Array4D<float> input_array(3, 2, 1, 1, {1, 2, 3, 4, 5, 6});
auto input = LiteralUtil::CreateR4FromArray4D<float>(input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
constexpr float kPadValue = 1.5;
auto pad_value = LiteralUtil::CreateR0<float>(kPadValue);
HloInstruction* pad_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(pad_value)));
Shape shape = ShapeUtil::MakeShape(F32, {8, 5, 1, 1});
auto r4_padding_on_dim0_dim1 =
CreatePaddingConfig({{{1, 0, 2}}, {{0, 2, 1}}, {{0, 0, 0}}, {{0, 0, 0}}});
b.AddInstruction(HloInstruction::CreatePad(
shape, input_instruction, pad_instruction, r4_padding_on_dim0_dim1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = std::make_unique<Array4D<float>>(8, 5, 1, 1);
expected_array->Fill(kPadValue);
(*expected_array)(1, 0, 0, 0) = 1.0f;
(*expected_array)(1, 2, 0, 0) = 2.0f;
(*expected_array)(4, 0, 0, 0) = 3.0f;
(*expected_array)(4, 2, 0, 0) = 4.0f;
(*expected_array)(7, 0, 0, 0) = 5.0f;
(*expected_array)(7, 2, 0, 0) = 6.0f;
auto expected = LiteralUtil::CreateR4FromArray4D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Equal(expected, result));
}
TEST_P(HloEvaluatorBf16Test, NegativePadding2D) {
HloComputation::Builder b(TestName());
auto input_array = std::make_unique<Array2D<float>>(4, 3);
input_array->FillUnique(1.0f);
auto input = LiteralUtil::CreateR2FromArray2D<float>(*input_array);
HloInstruction* input_instruction =
b.AddInstruction(HloInstruction::CreateConstant(std::move(input)));
auto pad_value_instruction = b.AddInstruction(
HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(2.718f)));
auto r2_padding_on_dim0_dim1 =
CreatePaddingConfig({{{-1, -2, 0}}, {{-2, 4, 0}}});
Shape shape = ShapeUtil::MakeShape(F32, {1, 5});
b.AddInstruction(HloInstruction::CreatePad(shape, input_instruction,
pad_value_instruction,
r2_padding_on_dim0_dim1));
m_->AddEntryComputation(b.Build());
TF_ASSERT_OK_AND_ASSIGN(Literal result, Evaluate());
auto expected_array = std::make_unique<Array2D<float>>(1, 5);
(*expected_array)(0, 0) = 7.0f;
(*expected_array)(0, 1) = 2.718f;
(*expected_array)(0, 2) = 2.718f;
(*expected_array)(0, 3) = 2.718f;
(*expected_array)(0, 4) = 2.718f;
auto expected = LiteralUtil::CreateR2FromArray2D<float>(*expected_array);
EXPECT_TRUE(LiteralTestUtil::Near(expected, result, ErrorSpec(0.031250)));
}
TEST_P(HloEvaluatorBf16Tes | 2,185 |
#ifndef XLA_HLO_TRANSFORMS_HLO_CONSTANT_SPLITTER_H_
#define XLA_HLO_TRANSFORMS_HLO_CONSTANT_SPLITTER_H_
#include "absl/container/flat_hash_set.h"
#include "absl/functional/function_ref.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_module.h"
#include "xla/service/hlo_pass_interface.h"
namespace xla {
class HloConstantSplitter : public HloModulePass {
public:
explicit HloConstantSplitter(
bool split_expressions = false,
absl::FunctionRef<bool(const HloInstruction*)> extra_constraints =
[](const HloInstruction* instruction) { return true; })
: split_expressions_(split_expressions),
extra_constraints_(extra_constraints) {}
absl::string_view name() const override { return "hlo-constant-splitter"; }
using HloPassInterface::Run;
absl::StatusOr<bool> Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) override;
private:
bool split_expressions_;
absl::FunctionRef<bool(const HloInstruction*)> extra_constraints_;
};
}
#endif
#include "xla/hlo/transforms/hlo_constant_splitter.h"
#include <iterator>
#include <utility>
#include <vector>
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/inlined_vector.h"
#include "absl/log/check.h"
#include "absl/log/log.h"
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "tsl/platform/errors.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
bool IsSupportedConstant(const HloInstruction* instruction,
bool split_expressions) {
return instruction->opcode() == HloOpcode::kConstant ||
(split_expressions && instruction->opcode() == HloOpcode::kIota);
}
bool IsSupportedConstantExpression(const HloInstruction* instruction) {
if (instruction->HasSideEffect()) {
return false;
}
if (instruction->IsElementwise()) {
return true;
}
switch (instruction->opcode()) {
case HloOpcode::kBroadcast:
case HloOpcode::kSlice:
return true;
default:
return false;
}
}
absl::StatusOr<bool> DuplicateConstantExpressionPerUser(
HloComputation* computation, HloInstruction* to_clone,
HloInstruction* user) {
absl::InlinedVector<std::pair<const HloInstruction*, int>, 8> worklist(
1, std::make_pair(to_clone, 0));
absl::InlinedVector<const HloInstruction*, 8> to_clone_vec;
absl::flat_hash_set<const HloInstruction*> visited;
bool changed = false;
VLOG(10) << "Duplicating: " << to_clone->ToString() << " for user "
<< user->ToString();
while (!worklist.empty()) {
auto& [to_clone_i, index] = worklist.back();
if (index >= to_clone_i->operand_count()) {
to_clone_vec.push_back(to_clone_i);
worklist.pop_back();
continue;
}
int64_t prev_idx = index++;
if (visited.insert(to_clone_i->operands()[prev_idx]).second) {
VLOG(10) << "Adding operand to worklist: "
<< to_clone_i->operands()[prev_idx]->ToString();
worklist.push_back(std::make_pair(to_clone_i->operands()[prev_idx], 0));
}
}
absl::flat_hash_map<const HloInstruction*, HloInstruction*>
cloned_instructions_map;
for (auto* i : to_clone_vec) {
absl::InlinedVector<HloInstruction*, 4> new_operand_vector;
for (auto* op : i->operands()) {
auto it = cloned_instructions_map.find(op);
CHECK(it != cloned_instructions_map.end())
<< "Expected already cloned instruction for operand: "
<< op->ToString() << " Instruction to clone: " << i->ToString();
new_operand_vector.push_back(it->second);
}
HloInstruction* cloned_instr = computation->AddInstruction(
i->CloneWithNewOperands(i->shape(), new_operand_vector));
cloned_instructions_map[i] = cloned_instr;
if (i == to_clone) {
TF_RETURN_IF_ERROR(to_clone->ReplaceUseWith(user, cloned_instr));
changed = true;
}
}
return changed;
}
}
absl::StatusOr<bool> HloConstantSplitter::Run(
HloModule* module,
const absl::flat_hash_set<absl::string_view>& execution_threads) {
bool changed = false;
for (HloComputation* computation : module->computations(execution_threads)) {
absl::flat_hash_set<HloInstruction*> constants_set;
std::vector<HloInstruction*> constants_list;
std::vector<HloInstruction*> worklist;
for (HloInstruction* instruction :
computation->MakeInstructionPostOrder()) {
VLOG(10) << "Considering: " << instruction->ToString();
if (IsSupportedConstant(instruction, split_expressions_) &&
extra_constraints_(instruction)) {
VLOG(10) << "Adding to constant list: " << instruction->ToString();
constants_set.insert(instruction);
constants_list.push_back(instruction);
}
}
int64_t previous_total_constants = 0;
while (constants_list.size() != previous_total_constants) {
VLOG(10) << "Previous total: " << previous_total_constants
<< " current constants: " << constants_list.size();
previous_total_constants = constants_list.size();
worklist.clear();
worklist.insert(worklist.end(), constants_list.begin(),
constants_list.end());
while (!worklist.empty()) {
auto* i = worklist.back();
worklist.pop_back();
bool is_constant = true;
for (auto* ops : i->operands()) {
if (!constants_set.contains(ops)) {
is_constant = false;
break;
}
}
if (is_constant) {
if (constants_set.insert(i).second) {
constants_list.push_back(i);
}
if (split_expressions_) {
for (auto* u : i->users()) {
if (IsSupportedConstantExpression(u) &&
!constants_set.contains(u)) {
worklist.push_back(u);
}
}
}
}
}
}
if (VLOG_IS_ON(5)) {
VLOG(5) << "For computation: " << computation->ToString();
for (HloInstruction* instruction : constants_list) {
VLOG(5) << "Is a constant: " << instruction->ToString();
}
}
for (HloInstruction* instruction : constants_list) {
if (IsSupportedConstant(instruction, split_expressions_) &&
instruction->user_count() <= 1) {
continue;
}
absl::InlinedVector<HloInstruction*, 8> users;
users.reserve(instruction->user_count());
for (HloInstruction* user : instruction->users()) {
if (instruction->opcode() == HloOpcode::kConstant ||
!constants_set.contains(user)) {
users.push_back(user);
}
}
for (auto* u : users) {
TF_ASSIGN_OR_RETURN(bool duplicated, DuplicateConstantExpressionPerUser(
computation, instruction, u));
changed |= duplicated;
}
}
}
return changed;
}
} | #include "xla/hlo/transforms/hlo_constant_splitter.h"
#include <cstdint>
#include "xla/hlo/ir/hlo_computation.h"
#include "xla/hlo/ir/hlo_instruction.h"
#include "xla/hlo/ir/hlo_opcode.h"
#include "xla/service/hlo_dce.h"
#include "xla/service/hlo_parser.h"
#include "xla/test.h"
#include "xla/tests/hlo_test_base.h"
#include "xla/util.h"
#include "tsl/lib/core/status_test_util.h"
#include "tsl/platform/statusor.h"
namespace xla {
namespace {
using HloConstantSplitterTest = HloTestBase;
TEST_F(HloConstantSplitterTest, SplitConstants) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant = f32[] constant(94.1934)
add1 = f32[] add(constant, gte0)
add2 = f32[] add(constant, gte1)
ROOT root = (f32[], f32[], f32[]) tuple(constant, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
TF_ASSERT_OK(HloConstantSplitter().Run(module.get()).status());
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConstant) {
EXPECT_LE(instruction->user_count(), 1);
}
}
}
}
TEST_F(HloConstantSplitterTest, OnlySplitConstantsAllowedBySeedConstraints) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant1 = f32[] constant(1)
add0 = f32[] add(constant1, gte0)
add1 = f32[] add(constant1, add0)
constant2 = f32[] constant(2)
add2 = f32[] multiply(constant2, gte1)
ROOT root = (f32[], f32[], f32[]) tuple(constant2, add1, add2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
TF_ASSERT_OK(HloConstantSplitter( false,
[](const HloInstruction* instruction) {
return instruction->name() != "constant1";
})
.Run(module.get())
.status());
for (HloComputation* computation : module->computations()) {
for (HloInstruction* instruction : computation->instructions()) {
if (instruction->opcode() == HloOpcode::kConstant &&
instruction->name() != "constant1") {
EXPECT_LE(instruction->user_count(), 1);
}
}
}
const HloInstruction* constant1 = FindInstruction(module.get(), "constant1");
ASSERT_NE(constant1, nullptr);
EXPECT_EQ(constant1->user_count(), 2);
}
TEST_F(HloConstantSplitterTest, PreservingConstantsWithZeroUsers) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
param = (f32[], f32[]) parameter(0),
sharding={{maximal device=0}, {maximal device=0}}
gte0 = f32[] get-tuple-element(param), index=0
gte1 = f32[] get-tuple-element(param), index=1
constant1 = f32[] constant(94.1934)
constant2 = f32[] constant(9.1934)
ROOT root = (f32[], f32[]) tuple(gte0, gte1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter();
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_FALSE(status_or.value());
}
TEST_F(HloConstantSplitterTest, SplittingExpressionsWithBroadcast) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
gte0 = f32[1024] parameter(0)
gte1 = f32[1024] parameter(1)
constant1 = f32[1024] iota(), iota_dimension=0
constant2 = f32[] constant(9.1934)
constant3 = f32[] constant(0.0)
constant4 = f32[] constant(1.0)
b = f32[1024] broadcast(constant2), dimensions={}
b2 = f32[1024] broadcast(constant3), dimensions={}
b3 = f32[1024] broadcast(constant4), dimensions={}
cmp = pred[1024] compare(constant1, b), direction=LT
s = f32[1024] select(cmp, b2, b3)
a1 = f32[1024] add(s, gte0)
a2 = f32[1024] add(s, gte1)
ROOT root = (f32[1024], f32[1024]) tuple(a1, a2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_TRUE(status_or.value());
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
XLA_VLOG_LINES(1, module->entry_computation()->ToString());
EXPECT_EQ(module->entry_computation()->instruction_count(), 23);
}
TEST_F(HloConstantSplitterTest, SplittingExpressionsWithSlice) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
iota.0 = u32[64] iota(), iota_dimension=0
slice.0 = u32[32] slice(iota.0), slice={[0:32]}
broadcast.0 = u32[16,32] broadcast(slice.0), dimensions={1}
broadcast.1 = u32[32,32] broadcast(slice.0), dimensions={1}
p.0 = u32[16,32] parameter(0)
p.1 = u32[32,32] parameter(1)
add.0 = u32[16,32] add(p.0, broadcast.0)
add.1 = u32[32,32] add(p.1, broadcast.1)
ROOT root = (u32[16,32], u32[32,32]) tuple(add.0, add.1)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const auto status_or = HloTestBase::RunHloPass(&pass, module.get());
TF_ASSERT_OK(status_or.status());
EXPECT_TRUE(status_or.value());
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
XLA_VLOG_LINES(1, module->entry_computation()->ToString());
EXPECT_EQ(module->entry_computation()->instruction_count(), 11);
}
TEST_F(HloConstantSplitterTest, NoSplittingSideEffectExpressions) {
const char* module_str = R"(
HloModule test_module
ENTRY entry_computation {
gte0 = f32[1024] parameter(0)
gte1 = f32[1024] parameter(1)
constant1 = f32[1024] iota(), iota_dimension=0
constant2 = f32[] constant(9.1934)
constant3 = f32[] constant(0.0)
constant4 = f32[] constant(0.0)
constant5 = f32[] constant(1.0)
b = f32[1024] broadcast(constant2), dimensions={}
b2 = f32[1024] broadcast(constant3), dimensions={}
rng = f32[] rng(constant4, constant5), distribution=rng_uniform
b3 = f32[1024] broadcast(rng), dimensions={}
cmp = pred[1024] compare(constant1, b), direction=LT
s = f32[1024] select(cmp, b2, b3)
a1 = f32[1024] add(s, gte0)
a2 = f32[1024] add(s, gte1)
ROOT root = (f32[1024], f32[1024]) tuple(a1, a2)
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
const int64_t count_before = module->entry_computation()->instruction_count();
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pass, module.get()));
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
const int64_t count_after_dce =
module->entry_computation()->instruction_count();
EXPECT_TRUE(changed);
EXPECT_EQ(count_before, count_after_dce);
int64_t rng_count = 0;
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kRng) {
rng_count++;
}
}
EXPECT_EQ(rng_count, 1);
}
TEST_F(HloConstantSplitterTest, InstructionsWithOneUser) {
const char* module_str = R"(
HloModule test_module, entry_computation_layout={(f32[1024]{0:T(512)})->f32[1024]{0:T(512)}}
reduce.add {
a = f32[] parameter(0)
b = f32[] parameter(1)
ROOT add = f32[] add(a, b)
}
ENTRY entry_computation {
constant1 = f32[] constant(1.1)
b1 = f32[1024]{0} broadcast(constant1), dimensions={}
iota.1 = f32[1024]{0} iota(), iota_dimension=0
add.1 = f32[1024]{0} add(b1, iota.1)
p0 = f32[1024]{0} parameter(0), sharding={devices=[4]0,1,2,3}
custom-call.0 = f32[256]{0} custom-call(p0), custom_call_target="SPMDFullToShardShape", sharding={manual}
constant0 = f32[] constant(0)
reduce.1 = f32[] reduce(custom-call.0, constant0), dimensions={0}, to_apply=reduce.add
b3 = f32[1024]{0} broadcast(reduce.1), dimensions={}
add.2 = f32[1024]{0} add(add.1, b3)
custom-call.1 = f32[4096]{0} custom-call(add.2), custom_call_target="SPMDShardToFullShape", sharding={devices=[4]0,1,2,3}
reshape = f32[4,1024]{1,0} reshape(custom-call.1)
reduce.2 = f32[1024]{0} reduce(reshape, constant0), dimensions={0}, to_apply=reduce.add
iota.2 = f32[1024]{0} iota(), iota_dimension=0
mul = f32[1024]{0} multiply(b1, iota.2)
ROOT sub = f32[1024]{0} subtract(reduce.2, mul), sharding={devices=[4]0,1,2,3}
}
)";
TF_ASSERT_OK_AND_ASSIGN(auto module,
ParseAndReturnUnverifiedModule(module_str));
HloConstantSplitter pass = HloConstantSplitter(true);
TF_ASSERT_OK_AND_ASSIGN(bool changed,
HloTestBase::RunHloPass(&pass, module.get()));
EXPECT_TRUE(changed);
int64_t broadcast_count_before_dce = 0, broadcast_count_after_dce = 0;
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
broadcast_count_before_dce++;
}
}
EXPECT_EQ(broadcast_count_before_dce, 4);
HloDCE dce;
TF_ASSERT_OK(dce.Run(module.get()).status());
for (HloInstruction* instruction :
module->entry_computation()->instructions()) {
if (instruction->opcode() == HloOpcode::kBroadcast) {
broadcast_count_after_dce++;
}
}
EXPECT_EQ(broadcast_count_after_dce, 3);
}
}
} | 2,186 |